public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Arisu Tachibana" <alicef@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Thu, 21 Aug 2025 05:20:40 +0000 (UTC)	[thread overview]
Message-ID: <1755753625.77fdf4c4bfec11fd0f67b8cac4d8e404333d6c29.alicef@gentoo> (raw)

commit:     77fdf4c4bfec11fd0f67b8cac4d8e404333d6c29
Author:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 21 05:20:25 2025 +0000
Commit:     Arisu Tachibana <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Aug 21 05:20:25 2025 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=77fdf4c4

Linux patch 5.4.283

Signed-off-by: Arisu Tachibana <alicef <AT> gentoo.org>

 0000_README              |    4 +
 1282_linux-5.4.283.patch | 5094 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5098 insertions(+)

diff --git a/0000_README b/0000_README
index d866c919..dc1c2615 100644
--- a/0000_README
+++ b/0000_README
@@ -1171,6 +1171,10 @@ Patch:  1281_linux-5.4.282.patch
 From:   https://www.kernel.org
 Desc:   Linux 5.4.282
 
+Patch:  1282_linux-5.4.283.patch
+From:   https://www.kernel.org
+Desc:   Linux 5.4.283
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1282_linux-5.4.283.patch b/1282_linux-5.4.283.patch
new file mode 100644
index 00000000..4c62131a
--- /dev/null
+++ b/1282_linux-5.4.283.patch
@@ -0,0 +1,5094 @@
+diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst
+index 179f2a5625a0e..95a3e7a40414c 100644
+--- a/Documentation/process/deprecated.rst
++++ b/Documentation/process/deprecated.rst
+@@ -48,6 +48,9 @@ Instead, the 2-factor form of the allocator should be used::
+ 
+ 	foo = kmalloc_array(count, size, GFP_KERNEL);
+ 
++Specifically, kmalloc() can be replaced with kmalloc_array(), and
++kzalloc() can be replaced with kcalloc().
++
+ If no 2-factor form is available, the saturate-on-overflow helpers should
+ be used::
+ 
+@@ -63,9 +66,20 @@ Instead, use the helper::
+ 
+ 	header = kzalloc(struct_size(header, item, count), GFP_KERNEL);
+ 
+-See :c:func:`array_size`, :c:func:`array3_size`, and :c:func:`struct_size`,
+-for more details as well as the related :c:func:`check_add_overflow` and
+-:c:func:`check_mul_overflow` family of functions.
++For other calculations, please compose the use of the size_mul(),
++size_add(), and size_sub() helpers. For example, in the case of::
++
++	foo = krealloc(current_size + chunk_size * (count - 3), GFP_KERNEL);
++
++Instead, use the helpers::
++
++	foo = krealloc(size_add(current_size,
++				size_mul(chunk_size,
++					 size_sub(count, 3))), GFP_KERNEL);
++
++For more details, also see array3_size() and flex_array_size(),
++as well as the related check_mul_overflow(), check_add_overflow(),
++check_sub_overflow(), and check_shl_overflow() family of functions.
+ 
+ simple_strtol(), simple_strtoll(), simple_strtoul(), simple_strtoull()
+ ----------------------------------------------------------------------
+diff --git a/Makefile b/Makefile
+index 4e9e7945fef43..362e593d630fe 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 282
++SUBLEVEL = 283
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
+index 7ff8000454346..048b75cadd2fd 100644
+--- a/arch/arm64/kernel/acpi_numa.c
++++ b/arch/arm64/kernel/acpi_numa.c
+@@ -27,7 +27,7 @@
+ 
+ #include <asm/numa.h>
+ 
+-static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE };
++static int acpi_early_node_map[NR_CPUS] __initdata = { [0 ... NR_CPUS - 1] = NUMA_NO_NODE };
+ 
+ int __init acpi_numa_get_nid(unsigned int cpu)
+ {
+diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
+index ae104eb4becc7..bd0ba3157d515 100644
+--- a/arch/openrisc/kernel/setup.c
++++ b/arch/openrisc/kernel/setup.c
+@@ -280,6 +280,9 @@ void calibrate_delay(void)
+ 
+ void __init setup_arch(char **cmdline_p)
+ {
++	/* setup memblock allocator */
++	setup_memory();
++
+ 	unflatten_and_copy_device_tree();
+ 
+ 	setup_cpuinfo();
+@@ -304,9 +307,6 @@ void __init setup_arch(char **cmdline_p)
+ 	initrd_below_start_ok = 1;
+ #endif
+ 
+-	/* setup memblock allocator */
+-	setup_memory();
+-
+ 	/* paging_init() sets up the MMU and marks all pages as reserved */
+ 	paging_init();
+ 
+diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
+index b4aa5af943ba5..c4c06bcd04835 100644
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -520,7 +520,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
+ 
+ 	old_regs = set_irq_regs(regs);
+ 	local_irq_disable();
+-	irq_enter();
++	irq_enter_rcu();
+ 
+ 	eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
+ 	if (!eirr_val)
+@@ -555,7 +555,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
+ #endif /* CONFIG_IRQSTACKS */
+ 
+  out:
+-	irq_exit();
++	irq_exit_rcu();
+ 	set_irq_regs(old_regs);
+ 	return;
+ 
+diff --git a/arch/powerpc/boot/simple_alloc.c b/arch/powerpc/boot/simple_alloc.c
+index 65ec135d01579..bc99f75b8582d 100644
+--- a/arch/powerpc/boot/simple_alloc.c
++++ b/arch/powerpc/boot/simple_alloc.c
+@@ -114,8 +114,11 @@ static void *simple_realloc(void *ptr, unsigned long size)
+ 		return ptr;
+ 
+ 	new = simple_malloc(size);
+-	memcpy(new, ptr, p->size);
+-	simple_free(ptr);
++	if (new) {
++		memcpy(new, ptr, p->size);
++		simple_free(ptr);
++	}
++
+ 	return new;
+ }
+ 
+diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
+index 7d13d2ef5a905..66de291b27d08 100644
+--- a/arch/powerpc/sysdev/xics/icp-native.c
++++ b/arch/powerpc/sysdev/xics/icp-native.c
+@@ -235,6 +235,8 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
+ 	rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
+ 			  cpu, hw_id);
+ 
++	if (!rname)
++		return -ENOMEM;
+ 	if (!request_mem_region(addr, size, rname)) {
+ 		pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n",
+ 			cpu, hw_id);
+diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
+index ef3c00b049ab4..67f63b76dc18b 100644
+--- a/arch/s390/include/asm/uv.h
++++ b/arch/s390/include/asm/uv.h
+@@ -97,7 +97,10 @@ static inline int share(unsigned long addr, u16 cmd)
+ 
+ 	if (!uv_call(0, (u64)&uvcb))
+ 		return 0;
+-	return -EINVAL;
++	pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n",
++	       uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare",
++	       uvcb.header.rc, uvcb.header.rrc);
++	panic("System security cannot be guaranteed unless the system panics now.\n");
+ }
+ 
+ /*
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index b8de27bb6e09c..c402b079b74e8 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -793,7 +793,10 @@ unsigned long arch_align_stack(unsigned long sp)
+ 
+ unsigned long arch_randomize_brk(struct mm_struct *mm)
+ {
+-	return randomize_page(mm->brk, 0x02000000);
++	if (mmap_is_ia32())
++		return randomize_page(mm->brk, SZ_32M);
++
++	return randomize_page(mm->brk, SZ_1G);
+ }
+ 
+ /*
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d1d279107a725..2ee0ee2b4752a 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -6141,6 +6141,9 @@ static void ata_host_release(struct kref *kref)
+ 	for (i = 0; i < host->n_ports; i++) {
+ 		struct ata_port *ap = host->ports[i];
+ 
++		if (!ap)
++			continue;
++
+ 		kfree(ap->pmp_link);
+ 		kfree(ap->slave_link);
+ 		kfree(ap);
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index 605e992d25df5..06e2fea1ffa92 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -1117,8 +1117,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+ 	rpp->len += skb->len;
+ 
+ 	if (stat & SAR_RSQE_EPDU) {
++		unsigned int len, truesize;
+ 		unsigned char *l1l2;
+-		unsigned int len;
+ 
+ 		l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
+ 
+@@ -1188,14 +1188,15 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
+ 		ATM_SKB(skb)->vcc = vcc;
+ 		__net_timestamp(skb);
+ 
++		truesize = skb->truesize;
+ 		vcc->push(vcc, skb);
+ 		atomic_inc(&vcc->stats->rx);
+ 
+-		if (skb->truesize > SAR_FB_SIZE_3)
++		if (truesize > SAR_FB_SIZE_3)
+ 			add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
+-		else if (skb->truesize > SAR_FB_SIZE_2)
++		else if (truesize > SAR_FB_SIZE_2)
+ 			add_rx_skb(card, 2, SAR_FB_SIZE_2, 1);
+-		else if (skb->truesize > SAR_FB_SIZE_1)
++		else if (truesize > SAR_FB_SIZE_1)
+ 			add_rx_skb(card, 1, SAR_FB_SIZE_1, 1);
+ 		else
+ 			add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index 8be4d807d1370..600c88fc3145f 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -765,7 +765,8 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
+ 		break;
+ 
+ 	case HCIUARTGETPROTO:
+-		if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
++		if (test_bit(HCI_UART_PROTO_SET, &hu->flags) &&
++		    test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ 			err = hu->proto->id;
+ 		else
+ 			err = -EUNATCH;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 39ca0718ced0c..863c3a14d3094 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -401,16 +401,24 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+ 
+ 	switch (args->in.op) {
+ 	case AMDGPU_CTX_OP_ALLOC_CTX:
++		if (args->in.flags)
++			return -EINVAL;
+ 		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
+ 		args->out.alloc.ctx_id = id;
+ 		break;
+ 	case AMDGPU_CTX_OP_FREE_CTX:
++		if (args->in.flags)
++			return -EINVAL;
+ 		r = amdgpu_ctx_free(fpriv, id);
+ 		break;
+ 	case AMDGPU_CTX_OP_QUERY_STATE:
++		if (args->in.flags)
++			return -EINVAL;
+ 		r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
+ 		break;
+ 	case AMDGPU_CTX_OP_QUERY_STATE2:
++		if (args->in.flags)
++			return -EINVAL;
+ 		r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
+ 		break;
+ 	default:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 65044b1b3d4cc..eac1997844e69 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -715,7 +715,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+ 	uint32_t created = 0;
+ 	uint32_t allocated = 0;
+ 	uint32_t tmp, handle = 0;
+-	uint32_t *size = &tmp;
++	uint32_t dummy = 0xffffffff;
++	uint32_t *size = &dummy;
+ 	unsigned idx;
+ 	int i, r = 0;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 990ffc0eeb6b7..b99e6b2e0acac 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1278,7 +1278,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ 		if (args->size != PAGE_SIZE)
+ 			return -EINVAL;
+ 		offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
+-		if (!offset)
++		if (!offset || (PAGE_SIZE > 4096))
+ 			return -ENOMEM;
+ 	}
+ 
+@@ -1872,6 +1872,9 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
+ 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ 		return -EINVAL;
+ 
++	if (PAGE_SIZE > 4096)
++		return -EINVAL;
++
+ 	address = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
+ 
+ 	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
+index 3fca560087c97..4bf216cb33030 100644
+--- a/drivers/gpu/drm/lima/lima_gp.c
++++ b/drivers/gpu/drm/lima/lima_gp.c
+@@ -138,6 +138,11 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe,
+ 	gp_write(LIMA_GP_CMD, cmd);
+ }
+ 
++static int lima_gp_bus_stop_poll(struct lima_ip *ip)
++{
++	return !!(gp_read(LIMA_GP_STATUS) & LIMA_GP_STATUS_BUS_STOPPED);
++}
++
+ static int lima_gp_hard_reset_poll(struct lima_ip *ip)
+ {
+ 	gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000);
+@@ -151,6 +156,13 @@ static int lima_gp_hard_reset(struct lima_ip *ip)
+ 
+ 	gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000);
+ 	gp_write(LIMA_GP_INT_MASK, 0);
++
++	gp_write(LIMA_GP_CMD, LIMA_GP_CMD_STOP_BUS);
++	ret = lima_poll_timeout(ip, lima_gp_bus_stop_poll, 10, 100);
++	if (ret) {
++		dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip));
++		return ret;
++	}
+ 	gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET);
+ 	ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100);
+ 	if (ret) {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+index 6a4813505c33c..c1b9a0500fa22 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+@@ -30,24 +30,14 @@
+  * @fmt: Pointer to format string
+  */
+ #define DPU_DEBUG(fmt, ...)                                                \
+-	do {                                                               \
+-		if (unlikely(drm_debug & DRM_UT_KMS))                      \
+-			DRM_DEBUG(fmt, ##__VA_ARGS__); \
+-		else                                                       \
+-			pr_debug(fmt, ##__VA_ARGS__);                      \
+-	} while (0)
++	DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+ 
+ /**
+  * DPU_DEBUG_DRIVER - macro for hardware driver logging
+  * @fmt: Pointer to format string
+  */
+ #define DPU_DEBUG_DRIVER(fmt, ...)                                         \
+-	do {                                                               \
+-		if (unlikely(drm_debug & DRM_UT_DRIVER))                   \
+-			DRM_ERROR(fmt, ##__VA_ARGS__); \
+-		else                                                       \
+-			pr_debug(fmt, ##__VA_ARGS__);                      \
+-	} while (0)
++	DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+ 
+ #define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+ #define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__)
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 839430b487553..6fbb2a3874606 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -866,7 +866,15 @@
+ #define USB_DEVICE_ID_MS_TYPE_COVER_2    0x07a9
+ #define USB_DEVICE_ID_MS_POWER_COVER     0x07da
+ #define USB_DEVICE_ID_MS_SURFACE3_COVER		0x07de
+-#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER	0x02fd
++/*
++ * For a description of the Xbox controller models, refer to:
++ * https://en.wikipedia.org/wiki/Xbox_Wireless_Controller#Summary
++ */
++#define USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1708	0x02fd
++#define USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1708_BLE	0x0b20
++#define USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1914	0x0b13
++#define USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1797	0x0b05
++#define USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1797_BLE	0x0b22
+ #define USB_DEVICE_ID_MS_PIXART_MOUSE    0x00cb
+ #define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS      0x02e0
+ #define USB_DEVICE_ID_MS_MOUSE_0783      0x0783
+diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
+index 8cb1ca1936e42..c0e93f7f33071 100644
+--- a/drivers/hid/hid-microsoft.c
++++ b/drivers/hid/hid-microsoft.c
+@@ -449,7 +449,16 @@ static const struct hid_device_id ms_devices[] = {
+ 		.driver_data = MS_PRESENTER },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, 0x091B),
+ 		.driver_data = MS_SURFACE_DIAL },
+-	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER),
++
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1708),
++		.driver_data = MS_QUIRK_FF },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1708_BLE),
++		.driver_data = MS_QUIRK_FF },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1914),
++		.driver_data = MS_QUIRK_FF },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1797),
++		.driver_data = MS_QUIRK_FF },
++	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_CONTROLLER_MODEL_1797_BLE),
+ 		.driver_data = MS_QUIRK_FF },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS),
+ 		.driver_data = MS_QUIRK_FF },
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 62a7a1c9a02ff..e083e21fe7429 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1920,12 +1920,14 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
+ 	int fmax = field->logical_maximum;
+ 	unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ 	int resolution_code = code;
+-	int resolution = hidinput_calc_abs_res(field, resolution_code);
++	int resolution;
+ 
+ 	if (equivalent_usage == HID_DG_TWIST) {
+ 		resolution_code = ABS_RZ;
+ 	}
+ 
++	resolution = hidinput_calc_abs_res(field, resolution_code);
++
+ 	if (equivalent_usage == HID_GD_X) {
+ 		fmin += features->offset_left;
+ 		fmax -= features->offset_right;
+diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
+index 800414886f6b5..588a3efb09c26 100644
+--- a/drivers/i2c/busses/i2c-riic.c
++++ b/drivers/i2c/busses/i2c-riic.c
+@@ -312,7 +312,7 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
+ 	 * frequency with only 62 clock ticks max (31 high, 31 low).
+ 	 * Aim for a duty of 60% LOW, 40% HIGH.
+ 	 */
+-	total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz);
++	total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz ?: 1);
+ 
+ 	for (cks = 0; cks < 7; cks++) {
+ 		/*
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index 65d6bf34614c8..6cf87fcfc4eb5 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -13067,15 +13067,16 @@ static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
+ {
+ 	u64 reg;
+ 	u16 idx = src / BITS_PER_REGISTER;
++	unsigned long flags;
+ 
+-	spin_lock(&dd->irq_src_lock);
++	spin_lock_irqsave(&dd->irq_src_lock, flags);
+ 	reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
+ 	if (set)
+ 		reg |= bits;
+ 	else
+ 		reg &= ~bits;
+ 	write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
+-	spin_unlock(&dd->irq_src_lock);
++	spin_unlock_irqrestore(&dd->irq_src_lock, flags);
+ }
+ 
+ /**
+diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
+index a81e141484077..dd457403a1a03 100644
+--- a/drivers/input/input-mt.c
++++ b/drivers/input/input-mt.c
+@@ -45,6 +45,9 @@ int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
+ 		return 0;
+ 	if (mt)
+ 		return mt->num_slots != num_slots ? -EINVAL : 0;
++	/* Arbitrary limit for avoiding too large memory allocation. */
++	if (num_slots > 1024)
++		return -EINVAL;
+ 
+ 	mt = kzalloc(struct_size(mt, slots, num_slots), GFP_KERNEL);
+ 	if (!mt)
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index d16776c6dee7b..ae3378ef469b0 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -3085,8 +3085,6 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
+ 	struct page *vprop_page;
+ 	int base, nr_ids, i, err = 0;
+ 
+-	BUG_ON(!vm);
+-
+ 	bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
+ 	if (!bitmap)
+ 		return -ENOMEM;
+diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c
+index 17712456fa634..383258eae750d 100644
+--- a/drivers/md/dm-clone-metadata.c
++++ b/drivers/md/dm-clone-metadata.c
+@@ -471,11 +471,6 @@ static void __destroy_persistent_data_structures(struct dm_clone_metadata *cmd)
+ 
+ /*---------------------------------------------------------------------------*/
+ 
+-static size_t bitmap_size(unsigned long nr_bits)
+-{
+-	return BITS_TO_LONGS(nr_bits) * sizeof(long);
+-}
+-
+ static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words,
+ 			    unsigned long nr_regions)
+ {
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index e89e710dd2920..83727fa05c168 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1064,8 +1064,26 @@ static int do_resume(struct dm_ioctl *param)
+ 			suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
+ 		if (param->flags & DM_NOFLUSH_FLAG)
+ 			suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
+-		if (!dm_suspended_md(md))
+-			dm_suspend(md, suspend_flags);
++		if (!dm_suspended_md(md)) {
++			r = dm_suspend(md, suspend_flags);
++			if (r) {
++				down_write(&_hash_lock);
++				hc = dm_get_mdptr(md);
++				if (hc && !hc->new_map) {
++					hc->new_map = new_map;
++					new_map = NULL;
++				} else {
++					r = -ENXIO;
++				}
++				up_write(&_hash_lock);
++				if (new_map) {
++					dm_sync_table(md);
++					dm_table_destroy(new_map);
++				}
++				dm_put(md);
++				return r;
++			}
++		}
+ 
+ 		old_map = dm_swap_table(md, new_map);
+ 		if (IS_ERR(old_map)) {
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 54ecfea2cf47b..4f6a54452b1ca 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -558,7 +558,8 @@ static void multipath_release_clone(struct request *clone,
+ 		if (pgpath && pgpath->pg->ps.type->end_io)
+ 			pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
+ 						    &pgpath->path,
+-						    mpio->nr_bytes);
++						    mpio->nr_bytes,
++						    clone->io_start_time_ns);
+ 	}
+ 
+ 	blk_put_request(clone);
+@@ -1568,7 +1569,8 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
+ 		struct path_selector *ps = &pgpath->pg->ps;
+ 
+ 		if (ps->type->end_io)
+-			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
++			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
++					 clone->io_start_time_ns);
+ 	}
+ 
+ 	return r;
+@@ -1612,7 +1614,8 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
+ 		struct path_selector *ps = &pgpath->pg->ps;
+ 
+ 		if (ps->type->end_io)
+-			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
++			ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
++					 dm_start_time_ns_from_clone(clone));
+ 	}
+ 
+ 	return r;
+diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
+index b6eb5365b1a46..c47bc0e20275b 100644
+--- a/drivers/md/dm-path-selector.h
++++ b/drivers/md/dm-path-selector.h
+@@ -74,7 +74,7 @@ struct path_selector_type {
+ 	int (*start_io) (struct path_selector *ps, struct dm_path *path,
+ 			 size_t nr_bytes);
+ 	int (*end_io) (struct path_selector *ps, struct dm_path *path,
+-		       size_t nr_bytes);
++		       size_t nr_bytes, u64 start_time);
+ };
+ 
+ /* Register a path selector */
+diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
+index 969c4f1a36336..5fd018d184187 100644
+--- a/drivers/md/dm-queue-length.c
++++ b/drivers/md/dm-queue-length.c
+@@ -227,7 +227,7 @@ static int ql_start_io(struct path_selector *ps, struct dm_path *path,
+ }
+ 
+ static int ql_end_io(struct path_selector *ps, struct dm_path *path,
+-		     size_t nr_bytes)
++		     size_t nr_bytes, u64 start_time)
+ {
+ 	struct path_info *pi = path->pscontext;
+ 
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index 6bc61927d3205..3d6ac63ee85ae 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -143,10 +143,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
+  */
+ static void rq_completed(struct mapped_device *md)
+ {
+-	/* nudge anyone waiting on suspend queue */
+-	if (unlikely(wq_has_sleeper(&md->wait)))
+-		wake_up(&md->wait);
+-
+ 	/*
+ 	 * dm_put() must be at the end of this function. See the comment above
+ 	 */
+diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
+index f006a9005593b..9cfda665e9ebd 100644
+--- a/drivers/md/dm-service-time.c
++++ b/drivers/md/dm-service-time.c
+@@ -309,7 +309,7 @@ static int st_start_io(struct path_selector *ps, struct dm_path *path,
+ }
+ 
+ static int st_end_io(struct path_selector *ps, struct dm_path *path,
+-		     size_t nr_bytes)
++		     size_t nr_bytes, u64 start_time)
+ {
+ 	struct path_info *pi = path->pscontext;
+ 
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index a7724ba45b437..3d00bb98d702b 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -637,27 +637,14 @@ static void free_tio(struct dm_target_io *tio)
+ 	bio_put(&tio->clone);
+ }
+ 
+-static bool md_in_flight_bios(struct mapped_device *md)
++u64 dm_start_time_ns_from_clone(struct bio *bio)
+ {
+-	int cpu;
+-	struct hd_struct *part = &dm_disk(md)->part0;
+-	long sum = 0;
+-
+-	for_each_possible_cpu(cpu) {
+-		sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
+-		sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
+-	}
+-
+-	return sum != 0;
+-}
++	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
++	struct dm_io *io = tio->io;
+ 
+-static bool md_in_flight(struct mapped_device *md)
+-{
+-	if (queue_is_mq(md->queue))
+-		return blk_mq_queue_inflight(md->queue);
+-	else
+-		return md_in_flight_bios(md);
++	return jiffies_to_nsecs(io->start_time);
+ }
++EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
+ 
+ static void start_io_acct(struct dm_io *io)
+ {
+@@ -2419,19 +2406,33 @@ void dm_put(struct mapped_device *md)
+ }
+ EXPORT_SYMBOL_GPL(dm_put);
+ 
+-static int dm_wait_for_completion(struct mapped_device *md, long task_state)
++static bool md_in_flight_bios(struct mapped_device *md)
++{
++	int cpu;
++	struct hd_struct *part = &dm_disk(md)->part0;
++	long sum = 0;
++
++	for_each_possible_cpu(cpu) {
++		sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
++		sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
++	}
++
++	return sum != 0;
++}
++
++static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
+ {
+ 	int r = 0;
+ 	DEFINE_WAIT(wait);
+ 
+-	while (1) {
++	while (true) {
+ 		prepare_to_wait(&md->wait, &wait, task_state);
+ 
+-		if (!md_in_flight(md))
++		if (!md_in_flight_bios(md))
+ 			break;
+ 
+ 		if (signal_pending_state(task_state, current)) {
+-			r = -EINTR;
++			r = -ERESTARTSYS;
+ 			break;
+ 		}
+ 
+@@ -2444,6 +2445,28 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+ 	return r;
+ }
+ 
++static int dm_wait_for_completion(struct mapped_device *md, long task_state)
++{
++	int r = 0;
++
++	if (!queue_is_mq(md->queue))
++		return dm_wait_for_bios_completion(md, task_state);
++
++	while (true) {
++		if (!blk_mq_queue_inflight(md->queue))
++			break;
++
++		if (signal_pending_state(task_state, current)) {
++			r = -ERESTARTSYS;
++			break;
++		}
++
++		msleep(5);
++	}
++
++	return r;
++}
++
+ /*
+  * Process the deferred bios
+  */
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 61c3e8df1b55b..e5f3010debb85 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7396,11 +7396,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
+ 
+ 	mddev = bdev->bd_disk->private_data;
+ 
+-	if (!mddev) {
+-		BUG();
+-		goto out;
+-	}
+-
+ 	/* Some actions do not requires the mutex */
+ 	switch (cmd) {
+ 	case GET_ARRAY_INFO:
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index da439ac857963..25ce7fb7fd9d0 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -275,7 +275,7 @@ static void sm_metadata_destroy(struct dm_space_map *sm)
+ {
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ 
+-	kfree(smm);
++	kvfree(smm);
+ }
+ 
+ static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
+@@ -759,7 +759,7 @@ struct dm_space_map *dm_sm_metadata_init(void)
+ {
+ 	struct sm_metadata *smm;
+ 
+-	smm = kmalloc(sizeof(*smm), GFP_KERNEL);
++	smm = kvmalloc(sizeof(*smm), GFP_KERNEL);
+ 	if (!smm)
+ 		return ERR_PTR(-ENOMEM);
+ 
+diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
+index 90224a9947022..e4e295067a7ee 100644
+--- a/drivers/media/pci/cx23885/cx23885-video.c
++++ b/drivers/media/pci/cx23885/cx23885-video.c
+@@ -1299,6 +1299,10 @@ int cx23885_video_register(struct cx23885_dev *dev)
+ 	/* register Video device */
+ 	dev->video_dev = cx23885_vdev_init(dev, dev->pci,
+ 		&cx23885_video_template, "video");
++	if (!dev->video_dev) {
++		err = -ENOMEM;
++		goto fail_unreg;
++	}
+ 	dev->video_dev->queue = &dev->vb2_vidq;
+ 	dev->video_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ 				      V4L2_CAP_AUDIO | V4L2_CAP_VIDEO_CAPTURE;
+@@ -1317,6 +1321,10 @@ int cx23885_video_register(struct cx23885_dev *dev)
+ 	/* register VBI device */
+ 	dev->vbi_dev = cx23885_vdev_init(dev, dev->pci,
+ 		&cx23885_vbi_template, "vbi");
++	if (!dev->vbi_dev) {
++		err = -ENOMEM;
++		goto fail_unreg;
++	}
+ 	dev->vbi_dev->queue = &dev->vb2_vbiq;
+ 	dev->vbi_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ 				    V4L2_CAP_AUDIO | V4L2_CAP_VBI_CAPTURE;
+diff --git a/drivers/media/pci/solo6x10/solo6x10-offsets.h b/drivers/media/pci/solo6x10/solo6x10-offsets.h
+index e3ae6a02dbb97..244837395f02d 100644
+--- a/drivers/media/pci/solo6x10/solo6x10-offsets.h
++++ b/drivers/media/pci/solo6x10/solo6x10-offsets.h
+@@ -57,16 +57,16 @@
+ #define SOLO_MP4E_EXT_ADDR(__solo) \
+ 	(SOLO_EREF_EXT_ADDR(__solo) + SOLO_EREF_EXT_AREA(__solo))
+ #define SOLO_MP4E_EXT_SIZE(__solo) \
+-	max((__solo->nr_chans * 0x00080000),				\
+-	    min(((__solo->sdram_size - SOLO_MP4E_EXT_ADDR(__solo)) -	\
+-		 __SOLO_JPEG_MIN_SIZE(__solo)), 0x00ff0000))
++	clamp(__solo->sdram_size - SOLO_MP4E_EXT_ADDR(__solo) -	\
++	      __SOLO_JPEG_MIN_SIZE(__solo),			\
++	      __solo->nr_chans * 0x00080000, 0x00ff0000)
+ 
+ #define __SOLO_JPEG_MIN_SIZE(__solo)		(__solo->nr_chans * 0x00080000)
+ #define SOLO_JPEG_EXT_ADDR(__solo) \
+ 		(SOLO_MP4E_EXT_ADDR(__solo) + SOLO_MP4E_EXT_SIZE(__solo))
+ #define SOLO_JPEG_EXT_SIZE(__solo) \
+-	max(__SOLO_JPEG_MIN_SIZE(__solo),				\
+-	    min((__solo->sdram_size - SOLO_JPEG_EXT_ADDR(__solo)), 0x00ff0000))
++	clamp(__solo->sdram_size - SOLO_JPEG_EXT_ADDR(__solo),	\
++	      __SOLO_JPEG_MIN_SIZE(__solo), 0x00ff0000)
+ 
+ #define SOLO_SDRAM_END(__solo) \
+ 	(SOLO_JPEG_EXT_ADDR(__solo) + SOLO_JPEG_EXT_SIZE(__solo))
+diff --git a/drivers/media/radio/radio-isa.c b/drivers/media/radio/radio-isa.c
+index ad2ac16ff12dd..610d3e3269518 100644
+--- a/drivers/media/radio/radio-isa.c
++++ b/drivers/media/radio/radio-isa.c
+@@ -36,7 +36,7 @@ static int radio_isa_querycap(struct file *file, void  *priv,
+ 
+ 	strscpy(v->driver, isa->drv->driver.driver.name, sizeof(v->driver));
+ 	strscpy(v->card, isa->drv->card, sizeof(v->card));
+-	snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", isa->v4l2_dev.name);
++	snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", dev_name(isa->v4l2_dev.dev));
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index 8a8271e23c63f..e62afdee20db1 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -723,11 +723,11 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
+ 	unsigned long flags;
+ 	u64 timestamp;
+ 	u32 delta_stc;
+-	u32 y1, y2;
++	u32 y1;
+ 	u32 x1, x2;
+ 	u32 mean;
+ 	u32 sof;
+-	u64 y;
++	u64 y, y2;
+ 
+ 	if (!uvc_hw_timestamps_param)
+ 		return;
+@@ -767,7 +767,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
+ 	sof = y;
+ 
+ 	uvc_trace(UVC_TRACE_CLOCK, "%s: PTS %u y %llu.%06llu SOF %u.%06llu "
+-		  "(x1 %u x2 %u y1 %u y2 %u SOF offset %u)\n",
++		  "(x1 %u x2 %u y1 %u y2 %llu SOF offset %u)\n",
+ 		  stream->dev->name, buf->pts,
+ 		  y >> 16, div_u64((y & 0xffff) * 1000000, 65536),
+ 		  sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
+@@ -782,7 +782,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
+ 		goto done;
+ 
+ 	y1 = NSEC_PER_SEC;
+-	y2 = (u32)ktime_to_ns(ktime_sub(last->host_time, first->host_time)) + y1;
++	y2 = ktime_to_ns(ktime_sub(last->host_time, first->host_time)) + y1;
+ 
+ 	/* Interpolated and host SOF timestamps can wrap around at slightly
+ 	 * different times. Handle this by adding or removing 2048 to or from
+@@ -802,7 +802,7 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
+ 	timestamp = ktime_to_ns(first->host_time) + y - y1;
+ 
+ 	uvc_trace(UVC_TRACE_CLOCK, "%s: SOF %u.%06llu y %llu ts %llu "
+-		  "buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %u)\n",
++		  "buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %llu)\n",
+ 		  stream->dev->name,
+ 		  sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
+ 		  y, timestamp, vbuf->vb2_buf.timestamp,
+diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
+index cd64e0f23ae54..8fcaec5544ff4 100644
+--- a/drivers/mmc/core/mmc_test.c
++++ b/drivers/mmc/core/mmc_test.c
+@@ -3097,13 +3097,13 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
+ 	test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
+ #ifdef CONFIG_HIGHMEM
+ 	test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
++	if (!test->highmem) {
++		count = -ENOMEM;
++		goto free_test_buffer;
++	}
+ #endif
+ 
+-#ifdef CONFIG_HIGHMEM
+-	if (test->buffer && test->highmem) {
+-#else
+ 	if (test->buffer) {
+-#endif
+ 		mutex_lock(&mmc_test_lock);
+ 		mmc_test_run(test, testcase);
+ 		mutex_unlock(&mmc_test_lock);
+@@ -3111,6 +3111,7 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
+ 
+ #ifdef CONFIG_HIGHMEM
+ 	__free_pages(test->highmem, BUFFER_ORDER);
++free_test_buffer:
+ #endif
+ 	kfree(test->buffer);
+ 	kfree(test);
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index ba37be8e423c2..78e20327828ad 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -3179,6 +3179,10 @@ int dw_mci_probe(struct dw_mci *host)
+ 	host->biu_clk = devm_clk_get(host->dev, "biu");
+ 	if (IS_ERR(host->biu_clk)) {
+ 		dev_dbg(host->dev, "biu clock not available\n");
++		ret = PTR_ERR(host->biu_clk);
++		if (ret == -EPROBE_DEFER)
++			return ret;
++
+ 	} else {
+ 		ret = clk_prepare_enable(host->biu_clk);
+ 		if (ret) {
+@@ -3190,6 +3194,10 @@ int dw_mci_probe(struct dw_mci *host)
+ 	host->ciu_clk = devm_clk_get(host->dev, "ciu");
+ 	if (IS_ERR(host->ciu_clk)) {
+ 		dev_dbg(host->dev, "ciu clock not available\n");
++		ret = PTR_ERR(host->ciu_clk);
++		if (ret == -EPROBE_DEFER)
++			goto err_clk_biu;
++
+ 		host->bus_hz = host->pdata->bus_hz;
+ 	} else {
+ 		ret = clk_prepare_enable(host->ciu_clk);
+diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
+index aa645ff86f641..2347d56c7155d 100644
+--- a/drivers/net/dsa/mv88e6xxx/Makefile
++++ b/drivers/net/dsa/mv88e6xxx/Makefile
+@@ -14,3 +14,7 @@ mv88e6xxx-objs += port_hidden.o
+ mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o
+ mv88e6xxx-objs += serdes.o
+ mv88e6xxx-objs += smi.o
++mv88e6xxx-objs += trace.o
++
++# for tracing framework to find trace.h
++CFLAGS_trace.o := -I$(src)
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
+index 0ae96a1e919b6..dc44e197817a8 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.h
++++ b/drivers/net/dsa/mv88e6xxx/global1.h
+@@ -339,5 +339,6 @@ int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
+ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
+ int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
+ void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
++int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid);
+ 
+ #endif /* _MV88E6XXX_GLOBAL1_H */
+diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
+index 792a96ef418ff..f2e07d903aa73 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
++++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
+@@ -12,6 +12,7 @@
+ 
+ #include "chip.h"
+ #include "global1.h"
++#include "trace.h"
+ 
+ /* Offset 0x01: ATU FID Register */
+ 
+@@ -82,6 +83,19 @@ static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
+ 	return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_ATU_OP, bit, 0);
+ }
+ 
++static int mv88e6xxx_g1_read_atu_violation(struct mv88e6xxx_chip *chip)
++{
++	int err;
++
++	err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_OP,
++				 MV88E6XXX_G1_ATU_OP_BUSY |
++				 MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
++	if (err)
++		return err;
++
++	return mv88e6xxx_g1_atu_op_wait(chip);
++}
++
+ static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
+ {
+ 	u16 val;
+@@ -122,6 +136,46 @@ static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
+ 	return mv88e6xxx_g1_atu_op_wait(chip);
+ }
+ 
++int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid)
++{
++	return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
++}
++
++static int mv88e6xxx_g1_atu_fid_read(struct mv88e6xxx_chip *chip, u16 *fid)
++{
++	u16 val = 0, upper = 0, op = 0;
++	int err = -EOPNOTSUPP;
++
++	if (mv88e6xxx_num_databases(chip) > 256) {
++		err = mv88e6xxx_g1_read(chip, MV88E6352_G1_ATU_FID, &val);
++		val &= 0xfff;
++		if (err)
++			return err;
++	} else {
++		err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &op);
++		if (err)
++			return err;
++		if (mv88e6xxx_num_databases(chip) > 64) {
++			/* ATU DBNum[7:4] are located in ATU Control 15:12 */
++			err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL,
++						&upper);
++			if (err)
++				return err;
++
++			upper = (upper >> 8) & 0x00f0;
++		} else if (mv88e6xxx_num_databases(chip) > 16) {
++			/* ATU DBNum[5:4] are located in ATU Operation 9:8 */
++			upper = (op >> 4) & 0x30;
++		}
++
++		/* ATU DBNum[3:0] are located in ATU Operation 3:0 */
++		val = (op & 0xf) | upper;
++	}
++	*fid = val;
++
++	return err;
++}
++
+ /* Offset 0x0C: ATU Data Register */
+ 
+ static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
+@@ -316,14 +370,12 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+ {
+ 	struct mv88e6xxx_chip *chip = dev_id;
+ 	struct mv88e6xxx_atu_entry entry;
+-	int spid;
+-	int err;
+-	u16 val;
++	int err, spid;
++	u16 val, fid;
+ 
+ 	mv88e6xxx_reg_lock(chip);
+ 
+-	err = mv88e6xxx_g1_atu_op(chip, 0,
+-				  MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
++	err = mv88e6xxx_g1_read_atu_violation(chip);
+ 	if (err)
+ 		goto out;
+ 
+@@ -331,6 +383,10 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+ 	if (err)
+ 		goto out;
+ 
++	err = mv88e6xxx_g1_atu_fid_read(chip, &fid);
++	if (err)
++		goto out;
++
+ 	err = mv88e6xxx_g1_atu_data_read(chip, &entry);
+ 	if (err)
+ 		goto out;
+@@ -348,24 +404,25 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
+ 	}
+ 
+ 	if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
+-		dev_err_ratelimited(chip->dev,
+-				    "ATU member violation for %pM portvec %x spid %d\n",
+-				    entry.mac, entry.portvec, spid);
++		trace_mv88e6xxx_atu_member_violation(chip->dev, spid,
++						     entry.portvec, entry.mac,
++						     fid);
+ 		chip->ports[spid].atu_member_violation++;
+ 	}
+ 
+ 	if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
+-		dev_err_ratelimited(chip->dev,
+-				    "ATU miss violation for %pM portvec %x spid %d\n",
+-				    entry.mac, entry.portvec, spid);
++		trace_mv88e6xxx_atu_miss_violation(chip->dev, spid,
++						   entry.portvec, entry.mac,
++						   fid);
+ 		chip->ports[spid].atu_miss_violation++;
+ 	}
+ 
+ 	if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
+-		dev_err_ratelimited(chip->dev,
+-				    "ATU full violation for %pM portvec %x spid %d\n",
+-				    entry.mac, entry.portvec, spid);
+-		chip->ports[spid].atu_full_violation++;
++		trace_mv88e6xxx_atu_full_violation(chip->dev, spid,
++						   entry.portvec, entry.mac,
++						   fid);
++		if (spid < ARRAY_SIZE(chip->ports))
++			chip->ports[spid].atu_full_violation++;
+ 	}
+ 	mv88e6xxx_reg_unlock(chip);
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
+index 7674b0b8cc707..4d4bd4a162815 100644
+--- a/drivers/net/dsa/mv88e6xxx/global2.c
++++ b/drivers/net/dsa/mv88e6xxx/global2.c
+@@ -280,6 +280,19 @@ int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
+ 	return err;
+ }
+ 
++/* Offset 0x0E: ATU Statistics */
++
++int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin)
++{
++	return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_ATU_STATS,
++				  kind | bin);
++}
++
++int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats)
++{
++	return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_ATU_STATS, stats);
++}
++
+ /* Offset 0x0F: Priority Override Table */
+ 
+ static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
+diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
+index 12807e52ecea1..de63e3f08e5cd 100644
+--- a/drivers/net/dsa/mv88e6xxx/global2.h
++++ b/drivers/net/dsa/mv88e6xxx/global2.h
+@@ -113,7 +113,16 @@
+ #define MV88E6XXX_G2_SWITCH_MAC_DATA_MASK	0x00ff
+ 
+ /* Offset 0x0E: ATU Stats Register */
+-#define MV88E6XXX_G2_ATU_STATS		0x0e
++#define MV88E6XXX_G2_ATU_STATS				0x0e
++#define MV88E6XXX_G2_ATU_STATS_BIN_0			(0x0 << 14)
++#define MV88E6XXX_G2_ATU_STATS_BIN_1			(0x1 << 14)
++#define MV88E6XXX_G2_ATU_STATS_BIN_2			(0x2 << 14)
++#define MV88E6XXX_G2_ATU_STATS_BIN_3			(0x3 << 14)
++#define MV88E6XXX_G2_ATU_STATS_MODE_ALL			(0x0 << 12)
++#define MV88E6XXX_G2_ATU_STATS_MODE_ALL_DYNAMIC		(0x1 << 12)
++#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL		(0x2 << 12)
++#define MV88E6XXX_G2_ATU_STATS_MODE_FID_ALL_DYNAMIC	(0x3 << 12)
++#define MV88E6XXX_G2_ATU_STATS_MASK			0x0fff
+ 
+ /* Offset 0x0F: Priority Override Table */
+ #define MV88E6XXX_G2_PRIO_OVERRIDE		0x0f
+@@ -354,6 +363,8 @@ extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
+ 
+ int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ 				      bool external);
++int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
++int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
+ 
+ #else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
+ 
+@@ -516,6 +527,18 @@ static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip,
+ 	return -EOPNOTSUPP;
+ }
+ 
++static inline int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip,
++					     u16 kind, u16 bin)
++{
++	return -EOPNOTSUPP;
++}
++
++static inline int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip,
++					     u16 *stats)
++{
++	return -EOPNOTSUPP;
++}
++
+ #endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
+ 
+ #endif /* _MV88E6XXX_GLOBAL2_H */
+diff --git a/drivers/net/dsa/mv88e6xxx/trace.c b/drivers/net/dsa/mv88e6xxx/trace.c
+new file mode 100644
+index 0000000000000..7833cb50ca5d7
+--- /dev/null
++++ b/drivers/net/dsa/mv88e6xxx/trace.c
+@@ -0,0 +1,6 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/* Copyright 2022 NXP
++ */
++
++#define CREATE_TRACE_POINTS
++#include "trace.h"
+diff --git a/drivers/net/dsa/mv88e6xxx/trace.h b/drivers/net/dsa/mv88e6xxx/trace.h
+new file mode 100644
+index 0000000000000..d9ab5c8dee55d
+--- /dev/null
++++ b/drivers/net/dsa/mv88e6xxx/trace.h
+@@ -0,0 +1,66 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/* Copyright 2022 NXP
++ */
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM	mv88e6xxx
++
++#if !defined(_MV88E6XXX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _MV88E6XXX_TRACE_H
++
++#include <linux/device.h>
++#include <linux/if_ether.h>
++#include <linux/tracepoint.h>
++
++DECLARE_EVENT_CLASS(mv88e6xxx_atu_violation,
++
++	TP_PROTO(const struct device *dev, int spid, u16 portvec,
++		 const unsigned char *addr, u16 fid),
++
++	TP_ARGS(dev, spid, portvec, addr, fid),
++
++	TP_STRUCT__entry(
++		__string(name, dev_name(dev))
++		__field(int, spid)
++		__field(u16, portvec)
++		__array(unsigned char, addr, ETH_ALEN)
++		__field(u16, fid)
++	),
++
++	TP_fast_assign(
++		__assign_str(name, dev_name(dev));
++		__entry->spid = spid;
++		__entry->portvec = portvec;
++		memcpy(__entry->addr, addr, ETH_ALEN);
++		__entry->fid = fid;
++	),
++
++	TP_printk("dev %s spid %d portvec 0x%x addr %pM fid %u",
++		  __get_str(name), __entry->spid, __entry->portvec,
++		  __entry->addr, __entry->fid)
++);
++
++DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_member_violation,
++	     TP_PROTO(const struct device *dev, int spid, u16 portvec,
++		      const unsigned char *addr, u16 fid),
++	     TP_ARGS(dev, spid, portvec, addr, fid));
++
++DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_miss_violation,
++	     TP_PROTO(const struct device *dev, int spid, u16 portvec,
++		      const unsigned char *addr, u16 fid),
++	     TP_ARGS(dev, spid, portvec, addr, fid));
++
++DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_full_violation,
++	     TP_PROTO(const struct device *dev, int spid, u16 portvec,
++		      const unsigned char *addr, u16 fid),
++	     TP_ARGS(dev, spid, portvec, addr, fid));
++
++#endif /* _MV88E6XXX_TRACE_H */
++
++/* We don't want to use include/trace/events */
++#undef TRACE_INCLUDE_PATH
++#define TRACE_INCLUDE_PATH .
++#undef TRACE_INCLUDE_FILE
++#define TRACE_INCLUDE_FILE	trace
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
+index c7ff98c26ee39..a1dd82d25ce3c 100644
+--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
++++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
+@@ -531,7 +531,7 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ 		return 0;
+ 	}
+ 
+-	cmd = (phy << 21) | (regnum << 16);
++	cmd = (phy << 21) | (regnum << 16) | val;
+ 	ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index 44f86a33ef624..75ed4723070ff 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -969,7 +969,8 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
+ 	 * in the Compressed Filter Tuple.
+ 	 */
+ 	if (tp->vlan_shift >= 0 && fs->mask.ivlan)
+-		ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
++		ntuple |= (u64)(FT_VLAN_VLD_F |
++				fs->val.ivlan) << tp->vlan_shift;
+ 
+ 	if (tp->port_shift >= 0 && fs->mask.iport)
+ 		ntuple |= (u64)fs->val.iport << tp->port_shift;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index d09cc10b3517f..8736e254f098b 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -4388,6 +4388,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
+ 	struct hns3_nic_priv *priv = netdev_priv(netdev);
+ 	int ret;
+ 
++	if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
++		hns3_nic_net_stop(netdev);
++
+ 	if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ 		netdev_warn(netdev, "already uninitialized\n");
+ 		return 0;
+diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
+index 1a86184d44c0a..e0c9fee4e1e65 100644
+--- a/drivers/net/ethernet/i825xx/sun3_82586.c
++++ b/drivers/net/ethernet/i825xx/sun3_82586.c
+@@ -990,7 +990,7 @@ static void sun3_82586_timeout(struct net_device *dev)
+ 	{
+ #ifdef DEBUG
+ 		printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
+-		printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
++		printk("%s: command-stats: %04x\n", dev->name, swab16(p->xmit_cmds[0]->cmd_status));
+ 		printk("%s: check, whether you set the right interrupt number!\n",dev->name);
+ #endif
+ 		sun3_82586_close(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+index acd946f2ddbe7..be49a2a53f29d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+@@ -676,7 +676,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+ 	if (num_tuples <= 0) {
+ 		netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
+ 			    __func__, num_tuples);
+-		return num_tuples;
++		return num_tuples < 0 ? num_tuples : -EINVAL;
+ 	}
+ 
+ 	eth_ft = get_flow_table(priv, fs, num_tuples);
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+index 04e51af32178c..c43d437f22bdf 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+@@ -159,22 +159,17 @@
+ #define XAE_RCW1_OFFSET		0x00000404 /* Rx Configuration Word 1 */
+ #define XAE_TC_OFFSET		0x00000408 /* Tx Configuration */
+ #define XAE_FCC_OFFSET		0x0000040C /* Flow Control Configuration */
+-#define XAE_EMMC_OFFSET		0x00000410 /* EMAC mode configuration */
+-#define XAE_PHYC_OFFSET		0x00000414 /* RGMII/SGMII configuration */
+-#define XAE_MDIO_MC_OFFSET	0x00000500 /* MII Management Config */
+-#define XAE_MDIO_MCR_OFFSET	0x00000504 /* MII Management Control */
+-#define XAE_MDIO_MWD_OFFSET	0x00000508 /* MII Management Write Data */
+-#define XAE_MDIO_MRD_OFFSET	0x0000050C /* MII Management Read Data */
+-#define XAE_MDIO_MIS_OFFSET	0x00000600 /* MII Management Interrupt Status */
+-/* MII Mgmt Interrupt Pending register offset */
+-#define XAE_MDIO_MIP_OFFSET	0x00000620
+-/* MII Management Interrupt Enable register offset */
+-#define XAE_MDIO_MIE_OFFSET	0x00000640
+-/* MII Management Interrupt Clear register offset. */
+-#define XAE_MDIO_MIC_OFFSET	0x00000660
++#define XAE_EMMC_OFFSET		0x00000410 /* MAC speed configuration */
++#define XAE_PHYC_OFFSET		0x00000414 /* RX Max Frame Configuration */
++#define XAE_ID_OFFSET		0x000004F8 /* Identification register */
++#define XAE_MDIO_MC_OFFSET	0x00000500 /* MDIO Setup */
++#define XAE_MDIO_MCR_OFFSET	0x00000504 /* MDIO Control */
++#define XAE_MDIO_MWD_OFFSET	0x00000508 /* MDIO Write Data */
++#define XAE_MDIO_MRD_OFFSET	0x0000050C /* MDIO Read Data */
+ #define XAE_UAW0_OFFSET		0x00000700 /* Unicast address word 0 */
+ #define XAE_UAW1_OFFSET		0x00000704 /* Unicast address word 1 */
+-#define XAE_FMI_OFFSET		0x00000708 /* Filter Mask Index */
++#define XAE_FMI_OFFSET		0x00000708 /* Frame Filter Control */
++#define XAE_FFE_OFFSET		0x0000070C /* Frame Filter Enable */
+ #define XAE_AF0_OFFSET		0x00000710 /* Address Filter 0 */
+ #define XAE_AF1_OFFSET		0x00000714 /* Address Filter 1 */
+ 
+@@ -313,7 +308,7 @@
+  */
+ #define XAE_UAW1_UNICASTADDR_MASK	0x0000FFFF
+ 
+-/* Bit masks for Axi Ethernet FMI register */
++/* Bit masks for Axi Ethernet FMC register */
+ #define XAE_FMI_PM_MASK			0x80000000 /* Promis. mode enable */
+ #define XAE_FMI_IND_MASK		0x00000003 /* Index Mask */
+ 
+@@ -335,6 +330,7 @@
+ #define XAE_FEATURE_PARTIAL_TX_CSUM	(1 << 1)
+ #define XAE_FEATURE_FULL_RX_CSUM	(1 << 2)
+ #define XAE_FEATURE_FULL_TX_CSUM	(1 << 3)
++#define XAE_FEATURE_DMA_64BIT		(1 << 4)
+ 
+ #define XAE_NO_CSUM_OFFLOAD		0
+ 
+@@ -347,9 +343,9 @@
+ /**
+  * struct axidma_bd - Axi Dma buffer descriptor layout
+  * @next:         MM2S/S2MM Next Descriptor Pointer
+- * @reserved1:    Reserved and not used
++ * @next_msb:     MM2S/S2MM Next Descriptor Pointer (high 32 bits)
+  * @phys:         MM2S/S2MM Buffer Address
+- * @reserved2:    Reserved and not used
++ * @phys_msb:     MM2S/S2MM Buffer Address (high 32 bits)
+  * @reserved3:    Reserved and not used
+  * @reserved4:    Reserved and not used
+  * @cntrl:        MM2S/S2MM Control value
+@@ -362,9 +358,9 @@
+  */
+ struct axidma_bd {
+ 	u32 next;	/* Physical address of next buffer descriptor */
+-	u32 reserved1;
++	u32 next_msb;	/* high 32 bits for IP >= v7.1, reserved on older IP */
+ 	u32 phys;
+-	u32 reserved2;
++	u32 phys_msb;	/* for IP >= v7.1, reserved for older IP */
+ 	u32 reserved3;
+ 	u32 reserved4;
+ 	u32 cntrl;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index bbc1cf288d25f..2aacc077ee2bc 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -148,6 +148,34 @@ static inline void axienet_dma_out32(struct axienet_local *lp,
+ 	iowrite32(value, lp->dma_regs + reg);
+ }
+ 
++static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
++				 dma_addr_t addr)
++{
++	axienet_dma_out32(lp, reg, lower_32_bits(addr));
++
++	if (lp->features & XAE_FEATURE_DMA_64BIT)
++		axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
++}
++
++static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
++			       struct axidma_bd *desc)
++{
++	desc->phys = lower_32_bits(addr);
++	if (lp->features & XAE_FEATURE_DMA_64BIT)
++		desc->phys_msb = upper_32_bits(addr);
++}
++
++static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
++				     struct axidma_bd *desc)
++{
++	dma_addr_t ret = desc->phys;
++
++	if (lp->features & XAE_FEATURE_DMA_64BIT)
++		ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
++
++	return ret;
++}
++
+ /**
+  * axienet_dma_bd_release - Release buffer descriptor rings
+  * @ndev:	Pointer to the net_device structure
+@@ -161,24 +189,41 @@ static void axienet_dma_bd_release(struct net_device *ndev)
+ 	int i;
+ 	struct axienet_local *lp = netdev_priv(ndev);
+ 
++	/* If we end up here, tx_bd_v must have been DMA allocated. */
++	dma_free_coherent(ndev->dev.parent,
++			  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
++			  lp->tx_bd_v,
++			  lp->tx_bd_p);
++
++	if (!lp->rx_bd_v)
++		return;
++
+ 	for (i = 0; i < lp->rx_bd_num; i++) {
+-		dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
+-				 lp->max_frm_size, DMA_FROM_DEVICE);
++		dma_addr_t phys;
++
++		/* A NULL skb means this descriptor has not been initialised
++		 * at all.
++		 */
++		if (!lp->rx_bd_v[i].skb)
++			break;
++
+ 		dev_kfree_skb(lp->rx_bd_v[i].skb);
+-	}
+ 
+-	if (lp->rx_bd_v) {
+-		dma_free_coherent(ndev->dev.parent,
+-				  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+-				  lp->rx_bd_v,
+-				  lp->rx_bd_p);
+-	}
+-	if (lp->tx_bd_v) {
+-		dma_free_coherent(ndev->dev.parent,
+-				  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+-				  lp->tx_bd_v,
+-				  lp->tx_bd_p);
++		/* For each descriptor, we programmed cntrl with the (non-zero)
++		 * descriptor size, after it had been successfully allocated.
++		 * So a non-zero value in there means we need to unmap it.
++		 */
++		if (lp->rx_bd_v[i].cntrl) {
++			phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
++			dma_unmap_single(ndev->dev.parent, phys,
++					 lp->max_frm_size, DMA_FROM_DEVICE);
++		}
+ 	}
++
++	dma_free_coherent(ndev->dev.parent,
++			  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
++			  lp->rx_bd_v,
++			  lp->rx_bd_p);
+ }
+ 
+ /**
+@@ -208,7 +253,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
+ 					 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
+ 					 &lp->tx_bd_p, GFP_KERNEL);
+ 	if (!lp->tx_bd_v)
+-		goto out;
++		return -ENOMEM;
+ 
+ 	lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ 					 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
+@@ -217,25 +262,37 @@ static int axienet_dma_bd_init(struct net_device *ndev)
+ 		goto out;
+ 
+ 	for (i = 0; i < lp->tx_bd_num; i++) {
+-		lp->tx_bd_v[i].next = lp->tx_bd_p +
+-				      sizeof(*lp->tx_bd_v) *
+-				      ((i + 1) % lp->tx_bd_num);
++		dma_addr_t addr = lp->tx_bd_p +
++				  sizeof(*lp->tx_bd_v) *
++				  ((i + 1) % lp->tx_bd_num);
++
++		lp->tx_bd_v[i].next = lower_32_bits(addr);
++		if (lp->features & XAE_FEATURE_DMA_64BIT)
++			lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
+ 	}
+ 
+ 	for (i = 0; i < lp->rx_bd_num; i++) {
+-		lp->rx_bd_v[i].next = lp->rx_bd_p +
+-				      sizeof(*lp->rx_bd_v) *
+-				      ((i + 1) % lp->rx_bd_num);
++		dma_addr_t addr;
++
++		addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
++			((i + 1) % lp->rx_bd_num);
++		lp->rx_bd_v[i].next = lower_32_bits(addr);
++		if (lp->features & XAE_FEATURE_DMA_64BIT)
++			lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
+ 
+ 		skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
+ 		if (!skb)
+ 			goto out;
+ 
+ 		lp->rx_bd_v[i].skb = skb;
+-		lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
+-						     skb->data,
+-						     lp->max_frm_size,
+-						     DMA_FROM_DEVICE);
++		addr = dma_map_single(ndev->dev.parent, skb->data,
++				      lp->max_frm_size, DMA_FROM_DEVICE);
++		if (dma_mapping_error(ndev->dev.parent, addr)) {
++			netdev_err(ndev, "DMA mapping error\n");
++			goto out;
++		}
++		desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
++
+ 		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
+ 	}
+ 
+@@ -268,18 +325,18 @@ static int axienet_dma_bd_init(struct net_device *ndev)
+ 	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ 	 * halted state. This will make the Rx side ready for reception.
+ 	 */
+-	axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
++	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ 	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
+ 			  cr | XAXIDMA_CR_RUNSTOP_MASK);
+-	axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+-			  (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
++	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
++			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ 
+ 	/* Write to the RS (Run-stop) bit in the Tx channel control register.
+ 	 * Tx channel is now ready to run. But only after we write to the
+ 	 * tail pointer register that the Tx channel will start transmitting.
+ 	 */
+-	axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
++	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ 	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
+ 			  cr | XAXIDMA_CR_RUNSTOP_MASK);
+@@ -352,7 +409,7 @@ static int netdev_set_mac_address(struct net_device *ndev, void *p)
+  */
+ static void axienet_set_multicast_list(struct net_device *ndev)
+ {
+-	int i;
++	int i = 0;
+ 	u32 reg, af0reg, af1reg;
+ 	struct axienet_local *lp = netdev_priv(ndev);
+ 
+@@ -370,7 +427,10 @@ static void axienet_set_multicast_list(struct net_device *ndev)
+ 	} else if (!netdev_mc_empty(ndev)) {
+ 		struct netdev_hw_addr *ha;
+ 
+-		i = 0;
++		reg = axienet_ior(lp, XAE_FMI_OFFSET);
++		reg &= ~XAE_FMI_PM_MASK;
++		axienet_iow(lp, XAE_FMI_OFFSET, reg);
++
+ 		netdev_for_each_mc_addr(ha, ndev) {
+ 			if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
+ 				break;
+@@ -389,6 +449,7 @@ static void axienet_set_multicast_list(struct net_device *ndev)
+ 			axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ 			axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
+ 			axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
++			axienet_iow(lp, XAE_FFE_OFFSET, 1);
+ 			i++;
+ 		}
+ 	} else {
+@@ -396,18 +457,15 @@ static void axienet_set_multicast_list(struct net_device *ndev)
+ 		reg &= ~XAE_FMI_PM_MASK;
+ 
+ 		axienet_iow(lp, XAE_FMI_OFFSET, reg);
+-
+-		for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
+-			reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+-			reg |= i;
+-
+-			axienet_iow(lp, XAE_FMI_OFFSET, reg);
+-			axienet_iow(lp, XAE_AF0_OFFSET, 0);
+-			axienet_iow(lp, XAE_AF1_OFFSET, 0);
+-		}
+-
+ 		dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
+ 	}
++
++	for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
++		reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
++		reg |= i;
++		axienet_iow(lp, XAE_FMI_OFFSET, reg);
++		axienet_iow(lp, XAE_FFE_OFFSET, 0);
++	}
+ }
+ 
+ /**
+@@ -533,33 +591,49 @@ static int axienet_device_reset(struct net_device *ndev)
+ }
+ 
+ /**
+- * axienet_start_xmit_done - Invoked once a transmit is completed by the
+- * Axi DMA Tx channel.
++ * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
+  * @ndev:	Pointer to the net_device structure
++ * @first_bd:	Index of first descriptor to clean up
++ * @nr_bds:	Number of descriptors to clean up, can be -1 if unknown.
++ * @sizep:	Pointer to a u32 filled with the total sum of all bytes
++ * 		in all cleaned-up descriptors. Ignored if NULL.
+  *
+- * This function is invoked from the Axi DMA Tx isr to notify the completion
+- * of transmit operation. It clears fields in the corresponding Tx BDs and
+- * unmaps the corresponding buffer so that CPU can regain ownership of the
+- * buffer. It finally invokes "netif_wake_queue" to restart transmission if
+- * required.
++ * Would either be called after a successful transmit operation, or after
++ * there was an error when setting up the chain.
++ * Returns the number of descriptors handled.
+  */
+-static void axienet_start_xmit_done(struct net_device *ndev)
++static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
++				 int nr_bds, u32 *sizep)
+ {
+-	u32 size = 0;
+-	u32 packets = 0;
+ 	struct axienet_local *lp = netdev_priv(ndev);
+ 	struct axidma_bd *cur_p;
+-	unsigned int status = 0;
+-
+-	cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+-	status = cur_p->status;
+-	while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
+-		dma_unmap_single(ndev->dev.parent, cur_p->phys,
+-				(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
+-				DMA_TO_DEVICE);
+-		if (cur_p->skb)
++	int max_bds = nr_bds;
++	unsigned int status;
++	dma_addr_t phys;
++	int i;
++
++	if (max_bds == -1)
++		max_bds = lp->tx_bd_num;
++
++	for (i = 0; i < max_bds; i++) {
++		cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
++		status = cur_p->status;
++
++		/* If no number is given, clean up *all* descriptors that have
++		 * been completed by the MAC.
++		 */
++		if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
++			break;
++
++		phys = desc_get_phys_addr(lp, cur_p);
++		dma_unmap_single(ndev->dev.parent, phys,
++				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
++				 DMA_TO_DEVICE);
++
++		if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ 			dev_consume_skb_irq(cur_p->skb);
+-		/*cur_p->phys = 0;*/
++
++		cur_p->cntrl = 0;
+ 		cur_p->app0 = 0;
+ 		cur_p->app1 = 0;
+ 		cur_p->app2 = 0;
+@@ -567,15 +641,36 @@ static void axienet_start_xmit_done(struct net_device *ndev)
+ 		cur_p->status = 0;
+ 		cur_p->skb = NULL;
+ 
+-		size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
+-		packets++;
+-
+-		if (++lp->tx_bd_ci >= lp->tx_bd_num)
+-			lp->tx_bd_ci = 0;
+-		cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
+-		status = cur_p->status;
++		if (sizep)
++			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
+ 	}
+ 
++	return i;
++}
++
++/**
++ * axienet_start_xmit_done - Invoked once a transmit is completed by the
++ * Axi DMA Tx channel.
++ * @ndev:	Pointer to the net_device structure
++ *
++ * This function is invoked from the Axi DMA Tx isr to notify the completion
++ * of transmit operation. It clears fields in the corresponding Tx BDs and
++ * unmaps the corresponding buffer so that CPU can regain ownership of the
++ * buffer. It finally invokes "netif_wake_queue" to restart transmission if
++ * required.
++ */
++static void axienet_start_xmit_done(struct net_device *ndev)
++{
++	struct axienet_local *lp = netdev_priv(ndev);
++	u32 packets = 0;
++	u32 size = 0;
++
++	packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
++
++	lp->tx_bd_ci += packets;
++	if (lp->tx_bd_ci >= lp->tx_bd_num)
++		lp->tx_bd_ci -= lp->tx_bd_num;
++
+ 	ndev->stats.tx_packets += packets;
+ 	ndev->stats.tx_bytes += size;
+ 
+@@ -629,9 +724,10 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 	u32 csum_start_off;
+ 	u32 csum_index_off;
+ 	skb_frag_t *frag;
+-	dma_addr_t tail_p;
++	dma_addr_t tail_p, phys;
+ 	struct axienet_local *lp = netdev_priv(ndev);
+ 	struct axidma_bd *cur_p;
++	u32 orig_tail_ptr = lp->tx_bd_tail;
+ 
+ 	num_frag = skb_shinfo(skb)->nr_frags;
+ 	cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+@@ -667,19 +763,37 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
+ 	}
+ 
++	phys = dma_map_single(ndev->dev.parent, skb->data,
++			      skb_headlen(skb), DMA_TO_DEVICE);
++	if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
++		if (net_ratelimit())
++			netdev_err(ndev, "TX DMA mapping error\n");
++		ndev->stats.tx_dropped++;
++		return NETDEV_TX_OK;
++	}
++	desc_set_phys_addr(lp, phys, cur_p);
+ 	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
+-	cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+-				     skb_headlen(skb), DMA_TO_DEVICE);
+ 
+ 	for (ii = 0; ii < num_frag; ii++) {
+ 		if (++lp->tx_bd_tail >= lp->tx_bd_num)
+ 			lp->tx_bd_tail = 0;
+ 		cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ 		frag = &skb_shinfo(skb)->frags[ii];
+-		cur_p->phys = dma_map_single(ndev->dev.parent,
+-					     skb_frag_address(frag),
+-					     skb_frag_size(frag),
+-					     DMA_TO_DEVICE);
++		phys = dma_map_single(ndev->dev.parent,
++				      skb_frag_address(frag),
++				      skb_frag_size(frag),
++				      DMA_TO_DEVICE);
++		if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
++			if (net_ratelimit())
++				netdev_err(ndev, "TX DMA mapping error\n");
++			ndev->stats.tx_dropped++;
++			axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
++					      NULL);
++			lp->tx_bd_tail = orig_tail_ptr;
++
++			return NETDEV_TX_OK;
++		}
++		desc_set_phys_addr(lp, phys, cur_p);
+ 		cur_p->cntrl = skb_frag_size(frag);
+ 	}
+ 
+@@ -688,7 +802,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 
+ 	tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ 	/* Start the transfer */
+-	axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
++	axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
+ 	if (++lp->tx_bd_tail >= lp->tx_bd_num)
+ 		lp->tx_bd_tail = 0;
+ 
+@@ -718,10 +832,12 @@ static void axienet_recv(struct net_device *ndev)
+ 	cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
+ 
+ 	while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
++		dma_addr_t phys;
++
+ 		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+ 
+-		dma_unmap_single(ndev->dev.parent, cur_p->phys,
+-				 lp->max_frm_size,
++		phys = desc_get_phys_addr(lp, cur_p);
++		dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
+ 				 DMA_FROM_DEVICE);
+ 
+ 		skb = cur_p->skb;
+@@ -757,9 +873,17 @@ static void axienet_recv(struct net_device *ndev)
+ 		if (!new_skb)
+ 			return;
+ 
+-		cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
+-					     lp->max_frm_size,
+-					     DMA_FROM_DEVICE);
++		phys = dma_map_single(ndev->dev.parent, new_skb->data,
++				      lp->max_frm_size,
++				      DMA_FROM_DEVICE);
++		if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
++			if (net_ratelimit())
++				netdev_err(ndev, "RX DMA mapping error\n");
++			dev_kfree_skb(new_skb);
++			return;
++		}
++		desc_set_phys_addr(lp, phys, cur_p);
++
+ 		cur_p->cntrl = lp->max_frm_size;
+ 		cur_p->status = 0;
+ 		cur_p->skb = new_skb;
+@@ -773,7 +897,7 @@ static void axienet_recv(struct net_device *ndev)
+ 	ndev->stats.rx_bytes += size;
+ 
+ 	if (tail_p)
+-		axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
++		axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+ }
+ 
+ /**
+@@ -803,7 +927,8 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
+ 		return IRQ_NONE;
+ 	if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ 		dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
+-		dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
++		dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
++			(lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
+ 			(lp->tx_bd_v[lp->tx_bd_ci]).phys);
+ 
+ 		cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+@@ -852,7 +977,8 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
+ 		return IRQ_NONE;
+ 	if (status & XAXIDMA_IRQ_ERROR_MASK) {
+ 		dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
+-		dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
++		dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
++			(lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
+ 			(lp->rx_bd_v[lp->rx_bd_ci]).phys);
+ 
+ 		cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+@@ -1182,10 +1308,6 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
+ 	data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
+ 	data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
+ 	data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
+-	data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
+-	data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
+-	data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
+-	data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
+ 	data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
+ 	data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
+ 	data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
+@@ -1549,14 +1671,18 @@ static void axienet_dma_err_handler(struct work_struct *work)
+ 
+ 	for (i = 0; i < lp->tx_bd_num; i++) {
+ 		cur_p = &lp->tx_bd_v[i];
+-		if (cur_p->phys)
+-			dma_unmap_single(ndev->dev.parent, cur_p->phys,
++		if (cur_p->cntrl) {
++			dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
++
++			dma_unmap_single(ndev->dev.parent, addr,
+ 					 (cur_p->cntrl &
+ 					  XAXIDMA_BD_CTRL_LENGTH_MASK),
+ 					 DMA_TO_DEVICE);
++		}
+ 		if (cur_p->skb)
+ 			dev_kfree_skb_irq(cur_p->skb);
+ 		cur_p->phys = 0;
++		cur_p->phys_msb = 0;
+ 		cur_p->cntrl = 0;
+ 		cur_p->status = 0;
+ 		cur_p->app0 = 0;
+@@ -1610,18 +1736,18 @@ static void axienet_dma_err_handler(struct work_struct *work)
+ 	/* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ 	 * halted state. This will make the Rx side ready for reception.
+ 	 */
+-	axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
++	axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ 	cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ 	axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
+ 			  cr | XAXIDMA_CR_RUNSTOP_MASK);
+-	axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+-			  (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
++	axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
++			     (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+ 
+ 	/* Write to the RS (Run-stop) bit in the Tx channel control register.
+ 	 * Tx channel is now ready to run. But only after we write to the
+ 	 * tail pointer register that the Tx channel will start transmitting
+ 	 */
+-	axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
++	axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ 	cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ 	axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
+ 			  cr | XAXIDMA_CR_RUNSTOP_MASK);
+@@ -1832,6 +1958,29 @@ static int axienet_probe(struct platform_device *pdev)
+ 		goto free_netdev;
+ 	}
+ 
++	/* Autodetect the need for 64-bit DMA pointers.
++	 * When the IP is configured for a bus width bigger than 32 bits,
++	 * writing the MSB registers is mandatory, even if they are all 0.
++	 * We can detect this case by writing all 1's to one such register
++	 * and see if that sticks: when the IP is configured for 32 bits
++	 * only, those registers are RES0.
++	 * Those MSB registers were introduced in IP v7.1, which we check first.
++	 */
++	if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
++		void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
++
++		iowrite32(0x0, desc);
++		if (ioread32(desc) == 0) {	/* sanity check */
++			iowrite32(0xffffffff, desc);
++			if (ioread32(desc) > 0) {
++				lp->features |= XAE_FEATURE_DMA_64BIT;
++				dev_info(&pdev->dev,
++					 "autodetected 64-bit DMA range\n");
++			}
++			iowrite32(0x0, desc);
++		}
++	}
++
+ 	/* Check for Ethernet core IRQ (optional) */
+ 	if (lp->eth_irq <= 0)
+ 		dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 0d6d2fe9eabdb..c868f4ffa240f 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -573,6 +573,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (skb_cow_head(skb, dev->needed_headroom))
+ 		goto tx_err;
+ 
++	if (!pskb_inet_may_pull(skb))
++		goto tx_err;
++
+ 	skb_reset_inner_headers(skb);
+ 
+ 	/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
+@@ -804,7 +807,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
+ 	sock = sockfd_lookup(fd, &err);
+ 	if (!sock) {
+ 		pr_debug("gtp socket fd=%d not found\n", fd);
+-		return NULL;
++		return ERR_PTR(err);
+ 	}
+ 
+ 	sk = sock->sk;
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 472b02bcfcbf4..92b51c4c46f57 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -3372,11 +3372,23 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
+ 	set_bit(PHY_RESET, &tp->flags);
+ }
+ 
+-static void r8152b_exit_oob(struct r8152 *tp)
++static void wait_oob_link_list_ready(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+ 	int i;
+ 
++	for (i = 0; i < 1000; i++) {
++		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
++		if (ocp_data & LINK_LIST_READY)
++			break;
++		usleep_range(1000, 2000);
++	}
++}
++
++static void r8152b_exit_oob(struct r8152 *tp)
++{
++	u32 ocp_data;
++
+ 	ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ 	ocp_data &= ~RCR_ACPT_ALL;
+ 	ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+@@ -3394,23 +3406,13 @@ static void r8152b_exit_oob(struct r8152 *tp)
+ 	ocp_data &= ~MCU_BORW_EN;
+ 	ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+ 
+-	for (i = 0; i < 1000; i++) {
+-		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+-		if (ocp_data & LINK_LIST_READY)
+-			break;
+-		usleep_range(1000, 2000);
+-	}
++	wait_oob_link_list_ready(tp);
+ 
+ 	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ 	ocp_data |= RE_INIT_LL;
+ 	ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+ 
+-	for (i = 0; i < 1000; i++) {
+-		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+-		if (ocp_data & LINK_LIST_READY)
+-			break;
+-		usleep_range(1000, 2000);
+-	}
++	wait_oob_link_list_ready(tp);
+ 
+ 	rtl8152_nic_reset(tp);
+ 
+@@ -3452,7 +3454,6 @@ static void r8152b_exit_oob(struct r8152 *tp)
+ static void r8152b_enter_oob(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+-	int i;
+ 
+ 	ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ 	ocp_data &= ~NOW_IS_OOB;
+@@ -3464,23 +3465,13 @@ static void r8152b_enter_oob(struct r8152 *tp)
+ 
+ 	rtl_disable(tp);
+ 
+-	for (i = 0; i < 1000; i++) {
+-		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+-		if (ocp_data & LINK_LIST_READY)
+-			break;
+-		usleep_range(1000, 2000);
+-	}
++	wait_oob_link_list_ready(tp);
+ 
+ 	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ 	ocp_data |= RE_INIT_LL;
+ 	ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+ 
+-	for (i = 0; i < 1000; i++) {
+-		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+-		if (ocp_data & LINK_LIST_READY)
+-			break;
+-		usleep_range(1000, 2000);
+-	}
++	wait_oob_link_list_ready(tp);
+ 
+ 	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+ 
+@@ -3705,7 +3696,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
+ static void r8153_first_init(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+-	int i;
+ 
+ 	rxdy_gated_en(tp, true);
+ 	r8153_teredo_off(tp);
+@@ -3725,23 +3715,13 @@ static void r8153_first_init(struct r8152 *tp)
+ 	ocp_data &= ~MCU_BORW_EN;
+ 	ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+ 
+-	for (i = 0; i < 1000; i++) {
+-		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+-		if (ocp_data & LINK_LIST_READY)
+-			break;
+-		usleep_range(1000, 2000);
+-	}
++	wait_oob_link_list_ready(tp);
+ 
+ 	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ 	ocp_data |= RE_INIT_LL;
+ 	ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+ 
+-	for (i = 0; i < 1000; i++) {
+-		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+-		if (ocp_data & LINK_LIST_READY)
+-			break;
+-		usleep_range(1000, 2000);
+-	}
++	wait_oob_link_list_ready(tp);
+ 
+ 	rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX);
+ 
+@@ -3766,7 +3746,6 @@ static void r8153_first_init(struct r8152 *tp)
+ static void r8153_enter_oob(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+-	int i;
+ 
+ 	ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+ 	ocp_data &= ~NOW_IS_OOB;
+@@ -3775,23 +3754,13 @@ static void r8153_enter_oob(struct r8152 *tp)
+ 	rtl_disable(tp);
+ 	rtl_reset_bmu(tp);
+ 
+-	for (i = 0; i < 1000; i++) {
+-		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+-		if (ocp_data & LINK_LIST_READY)
+-			break;
+-		usleep_range(1000, 2000);
+-	}
++	wait_oob_link_list_ready(tp);
+ 
+ 	ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+ 	ocp_data |= RE_INIT_LL;
+ 	ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+ 
+-	for (i = 0; i < 1000; i++) {
+-		ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+-		if (ocp_data & LINK_LIST_READY)
+-			break;
+-		usleep_range(1000, 2000);
+-	}
++	wait_oob_link_list_ready(tp);
+ 
+ 	ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+ 	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+index 3a58267d3d710..3bce3b59a12b1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+@@ -2140,7 +2140,7 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
+ 	if (!(mvm->scan_status & type))
+ 		return 0;
+ 
+-	if (iwl_mvm_is_radio_killed(mvm)) {
++	if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
+ 		ret = 0;
+ 		goto out;
+ 	}
+diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+index f0f966cecc3a4..e40f727547ec7 100644
+--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+@@ -4319,11 +4319,27 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
+ 	if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
+ 		wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ 
+-	wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
+-	if (adapter->config_bands & BAND_A)
+-		wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
+-	else
++	wiphy->bands[NL80211_BAND_2GHZ] = devm_kmemdup(adapter->dev,
++						       &mwifiex_band_2ghz,
++						       sizeof(mwifiex_band_2ghz),
++						       GFP_KERNEL);
++	if (!wiphy->bands[NL80211_BAND_2GHZ]) {
++		ret = -ENOMEM;
++		goto err;
++	}
++
++	if (adapter->config_bands & BAND_A) {
++		wiphy->bands[NL80211_BAND_5GHZ] = devm_kmemdup(adapter->dev,
++							       &mwifiex_band_5ghz,
++							       sizeof(mwifiex_band_5ghz),
++							       GFP_KERNEL);
++		if (!wiphy->bands[NL80211_BAND_5GHZ]) {
++			ret = -ENOMEM;
++			goto err;
++		}
++	} else {
+ 		wiphy->bands[NL80211_BAND_5GHZ] = NULL;
++	}
+ 
+ 	if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+ 		wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
+@@ -4411,8 +4427,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
+ 	if (ret < 0) {
+ 		mwifiex_dbg(adapter, ERROR,
+ 			    "%s: wiphy_register failed: %d\n", __func__, ret);
+-		wiphy_free(wiphy);
+-		return ret;
++		goto err;
+ 	}
+ 
+ 	if (!adapter->regd) {
+@@ -4454,4 +4469,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
+ 
+ 	adapter->wiphy = wiphy;
+ 	return ret;
++
++err:
++	wiphy_free(wiphy);
++
++	return ret;
+ }
+diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
+index 2dfcdb1459441..31f5b2d8f5191 100644
+--- a/drivers/net/wireless/st/cw1200/txrx.c
++++ b/drivers/net/wireless/st/cw1200/txrx.c
+@@ -1170,7 +1170,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
+ 		size_t ies_len = skb->len - (ies - (u8 *)(skb->data));
+ 
+ 		tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len);
+-		if (tim_ie) {
++		if (tim_ie && tim_ie[1] >= sizeof(struct ieee80211_tim_ie)) {
+ 			struct ieee80211_tim_ie *tim =
+ 				(struct ieee80211_tim_ie *)&tim_ie[2];
+ 
+diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
+index 1e90ff17f87db..9610a9b1929f1 100644
+--- a/drivers/nfc/pn533/pn533.c
++++ b/drivers/nfc/pn533/pn533.c
+@@ -185,6 +185,32 @@ struct pn533_cmd_jump_dep_response {
+ 	u8 gt[];
+ } __packed;
+ 
++struct pn532_autopoll_resp {
++	u8 type;
++	u8 ln;
++	u8 tg;
++	u8 tgdata[];
++};
++
++/* PN532_CMD_IN_AUTOPOLL */
++#define PN532_AUTOPOLL_POLLNR_INFINITE	0xff
++#define PN532_AUTOPOLL_PERIOD		0x03 /* in units of 150 ms */
++
++#define PN532_AUTOPOLL_TYPE_GENERIC_106		0x00
++#define PN532_AUTOPOLL_TYPE_GENERIC_212		0x01
++#define PN532_AUTOPOLL_TYPE_GENERIC_424		0x02
++#define PN532_AUTOPOLL_TYPE_JEWEL		0x04
++#define PN532_AUTOPOLL_TYPE_MIFARE		0x10
++#define PN532_AUTOPOLL_TYPE_FELICA212		0x11
++#define PN532_AUTOPOLL_TYPE_FELICA424		0x12
++#define PN532_AUTOPOLL_TYPE_ISOA		0x20
++#define PN532_AUTOPOLL_TYPE_ISOB		0x23
++#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_106	0x40
++#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_212	0x41
++#define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_424	0x42
++#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_106	0x80
++#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_212	0x81
++#define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_424	0x82
+ 
+ /* PN533_TG_INIT_AS_TARGET */
+ #define PN533_INIT_TARGET_PASSIVE 0x1
+@@ -1394,6 +1420,101 @@ static int pn533_poll_dep(struct nfc_dev *nfc_dev)
+ 	return rc;
+ }
+ 
++static int pn533_autopoll_complete(struct pn533 *dev, void *arg,
++			       struct sk_buff *resp)
++{
++	struct pn532_autopoll_resp *apr;
++	struct nfc_target nfc_tgt;
++	u8 nbtg;
++	int rc;
++
++	if (IS_ERR(resp)) {
++		rc = PTR_ERR(resp);
++
++		nfc_err(dev->dev, "%s  autopoll complete error %d\n",
++			__func__, rc);
++
++		if (rc == -ENOENT) {
++			if (dev->poll_mod_count != 0)
++				return rc;
++			goto stop_poll;
++		} else if (rc < 0) {
++			nfc_err(dev->dev,
++				"Error %d when running autopoll\n", rc);
++			goto stop_poll;
++		}
++	}
++
++	nbtg = resp->data[0];
++	if ((nbtg > 2) || (nbtg <= 0))
++		return -EAGAIN;
++
++	apr = (struct pn532_autopoll_resp *)&resp->data[1];
++	while (nbtg--) {
++		memset(&nfc_tgt, 0, sizeof(struct nfc_target));
++		switch (apr->type) {
++		case PN532_AUTOPOLL_TYPE_ISOA:
++			dev_dbg(dev->dev, "ISOA\n");
++			rc = pn533_target_found_type_a(&nfc_tgt, apr->tgdata,
++						       apr->ln - 1);
++			break;
++		case PN532_AUTOPOLL_TYPE_FELICA212:
++		case PN532_AUTOPOLL_TYPE_FELICA424:
++			dev_dbg(dev->dev, "FELICA\n");
++			rc = pn533_target_found_felica(&nfc_tgt, apr->tgdata,
++						       apr->ln - 1);
++			break;
++		case PN532_AUTOPOLL_TYPE_JEWEL:
++			dev_dbg(dev->dev, "JEWEL\n");
++			rc = pn533_target_found_jewel(&nfc_tgt, apr->tgdata,
++						      apr->ln - 1);
++			break;
++		case PN532_AUTOPOLL_TYPE_ISOB:
++			dev_dbg(dev->dev, "ISOB\n");
++			rc = pn533_target_found_type_b(&nfc_tgt, apr->tgdata,
++						       apr->ln - 1);
++			break;
++		case PN532_AUTOPOLL_TYPE_MIFARE:
++			dev_dbg(dev->dev, "Mifare\n");
++			rc = pn533_target_found_type_a(&nfc_tgt, apr->tgdata,
++						       apr->ln - 1);
++			break;
++		default:
++			nfc_err(dev->dev,
++				    "Unknown current poll modulation\n");
++			rc = -EPROTO;
++		}
++
++		if (rc)
++			goto done;
++
++		if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
++			nfc_err(dev->dev,
++				    "The Tg found doesn't have the desired protocol\n");
++			rc = -EAGAIN;
++			goto done;
++		}
++
++		dev->tgt_available_prots = nfc_tgt.supported_protocols;
++		apr = (struct pn532_autopoll_resp *)
++			(apr->tgdata + (apr->ln - 1));
++	}
++
++	pn533_poll_reset_mod_list(dev);
++	nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1);
++
++done:
++	dev_kfree_skb(resp);
++	return rc;
++
++stop_poll:
++	nfc_err(dev->dev, "autopoll operation has been stopped\n");
++
++	pn533_poll_reset_mod_list(dev);
++	dev->poll_protocols = 0;
++	return rc;
++}
++
+ static int pn533_poll_complete(struct pn533 *dev, void *arg,
+ 			       struct sk_buff *resp)
+ {
+@@ -1537,6 +1658,7 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
+ {
+ 	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ 	struct pn533_poll_modulations *cur_mod;
++	struct sk_buff *skb;
+ 	u8 rand_mod;
+ 	int rc;
+ 
+@@ -1562,9 +1684,78 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
+ 			tm_protocols = 0;
+ 	}
+ 
+-	pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
+ 	dev->poll_protocols = im_protocols;
+ 	dev->listen_protocols = tm_protocols;
++	if (dev->device_type == PN533_DEVICE_PN532_AUTOPOLL) {
++		skb = pn533_alloc_skb(dev, 4 + 6);
++		if (!skb)
++			return -ENOMEM;
++
++		*((u8 *)skb_put(skb, sizeof(u8))) =
++			PN532_AUTOPOLL_POLLNR_INFINITE;
++		*((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_PERIOD;
++
++		if ((im_protocols & NFC_PROTO_MIFARE_MASK) &&
++				(im_protocols & NFC_PROTO_ISO14443_MASK) &&
++				(im_protocols & NFC_PROTO_NFC_DEP_MASK))
++			*((u8 *)skb_put(skb, sizeof(u8))) =
++				PN532_AUTOPOLL_TYPE_GENERIC_106;
++		else {
++			if (im_protocols & NFC_PROTO_MIFARE_MASK)
++				*((u8 *)skb_put(skb, sizeof(u8))) =
++					PN532_AUTOPOLL_TYPE_MIFARE;
++
++			if (im_protocols & NFC_PROTO_ISO14443_MASK)
++				*((u8 *)skb_put(skb, sizeof(u8))) =
++					PN532_AUTOPOLL_TYPE_ISOA;
++
++			if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
++				*((u8 *)skb_put(skb, sizeof(u8))) =
++					PN532_AUTOPOLL_TYPE_DEP_PASSIVE_106;
++				*((u8 *)skb_put(skb, sizeof(u8))) =
++					PN532_AUTOPOLL_TYPE_DEP_PASSIVE_212;
++				*((u8 *)skb_put(skb, sizeof(u8))) =
++					PN532_AUTOPOLL_TYPE_DEP_PASSIVE_424;
++			}
++		}
++
++		if (im_protocols & NFC_PROTO_FELICA_MASK ||
++				im_protocols & NFC_PROTO_NFC_DEP_MASK) {
++			*((u8 *)skb_put(skb, sizeof(u8))) =
++				PN532_AUTOPOLL_TYPE_FELICA212;
++			*((u8 *)skb_put(skb, sizeof(u8))) =
++				PN532_AUTOPOLL_TYPE_FELICA424;
++		}
++
++		if (im_protocols & NFC_PROTO_JEWEL_MASK)
++			*((u8 *)skb_put(skb, sizeof(u8))) =
++				PN532_AUTOPOLL_TYPE_JEWEL;
++
++		if (im_protocols & NFC_PROTO_ISO14443_B_MASK)
++			*((u8 *)skb_put(skb, sizeof(u8))) =
++				PN532_AUTOPOLL_TYPE_ISOB;
++
++		if (tm_protocols)
++			*((u8 *)skb_put(skb, sizeof(u8))) =
++				PN532_AUTOPOLL_TYPE_DEP_ACTIVE_106;
++
++		rc = pn533_send_cmd_async(dev, PN533_CMD_IN_AUTOPOLL, skb,
++				pn533_autopoll_complete, NULL);
++
++		if (rc < 0)
++			dev_kfree_skb(skb);
++		else
++			dev->poll_mod_count++;
++
++		return rc;
++	}
++
++	pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
++	if (!dev->poll_mod_count) {
++		nfc_err(dev->dev,
++			"Poll mod list is empty\n");
++		return -EINVAL;
++	}
+ 
+ 	/* Do not always start polling from the same modulation */
+ 	get_random_bytes(&rand_mod, sizeof(rand_mod));
+@@ -2465,7 +2656,11 @@ static int pn533_dev_up(struct nfc_dev *nfc_dev)
+ {
+ 	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ 
+-	if (dev->device_type == PN533_DEVICE_PN532) {
++	if (dev->phy_ops->dev_up)
++		dev->phy_ops->dev_up(dev);
++
++	if ((dev->device_type == PN533_DEVICE_PN532) ||
++		(dev->device_type == PN533_DEVICE_PN532_AUTOPOLL)) {
+ 		int rc = pn532_sam_configuration(nfc_dev);
+ 
+ 		if (rc)
+@@ -2477,7 +2672,14 @@ static int pn533_dev_up(struct nfc_dev *nfc_dev)
+ 
+ static int pn533_dev_down(struct nfc_dev *nfc_dev)
+ {
+-	return pn533_rf_field(nfc_dev, 0);
++	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
++	int ret;
++
++	ret = pn533_rf_field(nfc_dev, 0);
++	if (dev->phy_ops->dev_down && !ret)
++		dev->phy_ops->dev_down(dev);
++
++	return ret;
+ }
+ 
+ static struct nfc_ops pn533_nfc_ops = {
+@@ -2505,6 +2707,7 @@ static int pn533_setup(struct pn533 *dev)
+ 	case PN533_DEVICE_PASORI:
+ 	case PN533_DEVICE_ACR122U:
+ 	case PN533_DEVICE_PN532:
++	case PN533_DEVICE_PN532_AUTOPOLL:
+ 		max_retries.mx_rty_atr = 0x2;
+ 		max_retries.mx_rty_psl = 0x1;
+ 		max_retries.mx_rty_passive_act =
+@@ -2541,6 +2744,7 @@ static int pn533_setup(struct pn533 *dev)
+ 	switch (dev->device_type) {
+ 	case PN533_DEVICE_STD:
+ 	case PN533_DEVICE_PN532:
++	case PN533_DEVICE_PN532_AUTOPOLL:
+ 		break;
+ 
+ 	case PN533_DEVICE_PASORI:
+diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h
+index 8bf9d6ece0f50..f9256e5485acc 100644
+--- a/drivers/nfc/pn533/pn533.h
++++ b/drivers/nfc/pn533/pn533.h
+@@ -6,10 +6,11 @@
+  * Copyright (C) 2012-2013 Tieto Poland
+  */
+ 
+-#define PN533_DEVICE_STD     0x1
+-#define PN533_DEVICE_PASORI  0x2
+-#define PN533_DEVICE_ACR122U 0x3
+-#define PN533_DEVICE_PN532   0x4
++#define PN533_DEVICE_STD		0x1
++#define PN533_DEVICE_PASORI		0x2
++#define PN533_DEVICE_ACR122U		0x3
++#define PN533_DEVICE_PN532		0x4
++#define PN533_DEVICE_PN532_AUTOPOLL	0x5
+ 
+ #define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
+ 			     NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
+@@ -70,6 +71,7 @@
+ #define PN533_CMD_IN_ATR 0x50
+ #define PN533_CMD_IN_RELEASE 0x52
+ #define PN533_CMD_IN_JUMP_FOR_DEP 0x56
++#define PN533_CMD_IN_AUTOPOLL 0x60
+ 
+ #define PN533_CMD_TG_INIT_AS_TARGET 0x8c
+ #define PN533_CMD_TG_GET_DATA 0x86
+@@ -207,6 +209,15 @@ struct pn533_phy_ops {
+ 			  struct sk_buff *out);
+ 	int (*send_ack)(struct pn533 *dev, gfp_t flags);
+ 	void (*abort_cmd)(struct pn533 *priv, gfp_t flags);
++	/*
++	 * dev_up and dev_down are optional.
++	 * They are used to inform the phy layer that the nfc chip
++	 * is going to be really used very soon. The phy layer can then
++	 * bring up it's interface to the chip and have it suspended for power
++	 * saving reasons otherwise.
++	 */
++	void (*dev_up)(struct pn533 *priv);
++	void (*dev_down)(struct pn533 *priv);
+ };
+ 
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 9144ed14b0741..0676637e1eab6 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1080,8 +1080,10 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
+ 
+ 	error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
+ 			sizeof(struct nvme_id_ctrl));
+-	if (error)
++	if (error) {
+ 		kfree(*id);
++		*id = NULL;
++	}
+ 	return error;
+ }
+ 
+@@ -1193,6 +1195,7 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl,
+ 	if (error) {
+ 		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
+ 		kfree(*id);
++		*id = NULL;
+ 	}
+ 
+ 	return error;
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 50e2007092bc0..ae41b6001c7e2 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -428,12 +428,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
+ 	return 0;
+ 
+ out_free:
+-	while (--i >= 0) {
+-		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+-
+-		list_del(&rsp->free_list);
+-		nvmet_rdma_free_rsp(ndev, rsp);
+-	}
++	while (--i >= 0)
++		nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
+ 	kfree(queue->rsps);
+ out:
+ 	return ret;
+@@ -444,12 +440,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
+ 	struct nvmet_rdma_device *ndev = queue->dev;
+ 	int i, nr_rsps = queue->recv_queue_size * 2;
+ 
+-	for (i = 0; i < nr_rsps; i++) {
+-		struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
+-
+-		list_del(&rsp->free_list);
+-		nvmet_rdma_free_rsp(ndev, rsp);
+-	}
++	for (i = 0; i < nr_rsps; i++)
++		nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
+ 	kfree(queue->rsps);
+ }
+ 
+diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
+index d40bd57537ba1..fa6e7fbf356e7 100644
+--- a/drivers/nvme/target/tcp.c
++++ b/drivers/nvme/target/tcp.c
+@@ -792,6 +792,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
+ 		pr_err("bad nvme-tcp pdu length (%d)\n",
+ 			le32_to_cpu(icreq->hdr.plen));
+ 		nvmet_tcp_fatal_error(queue);
++		return -EPROTO;
+ 	}
+ 
+ 	if (icreq->pfv != NVME_TCP_PFV_1_0) {
+diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
+index 1373a3c67962a..a3564e12927b6 100644
+--- a/drivers/nvme/target/trace.c
++++ b/drivers/nvme/target/trace.c
+@@ -195,7 +195,7 @@ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name)
+ 	return ret;
+ }
+ 
+-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
++const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id)
+ {
+ 	const char *ret = trace_seq_buffer_ptr(p);
+ 
+@@ -208,8 +208,8 @@ const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl)
+ 	 * If we can know the extra data of the connect command in this stage,
+ 	 * we can update this print statement later.
+ 	 */
+-	if (ctrl)
+-		trace_seq_printf(p, "%d", ctrl->cntlid);
++	if (ctrl_id)
++		trace_seq_printf(p, "%d", ctrl_id);
+ 	else
+ 		trace_seq_printf(p, "_");
+ 	trace_seq_putc(p, 0);
+diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h
+index 3f61b6657175e..5b00a338d52ca 100644
+--- a/drivers/nvme/target/trace.h
++++ b/drivers/nvme/target/trace.h
+@@ -32,18 +32,24 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p, u8 fctype,
+ 	 nvmet_trace_parse_nvm_cmd(p, opcode, cdw10) :			\
+ 	 nvmet_trace_parse_admin_cmd(p, opcode, cdw10)))
+ 
+-const char *nvmet_trace_ctrl_name(struct trace_seq *p, struct nvmet_ctrl *ctrl);
+-#define __print_ctrl_name(ctrl)				\
+-	nvmet_trace_ctrl_name(p, ctrl)
++const char *nvmet_trace_ctrl_id(struct trace_seq *p, u16 ctrl_id);
++#define __print_ctrl_id(ctrl_id)			\
++	nvmet_trace_ctrl_id(p, ctrl_id)
+ 
+ const char *nvmet_trace_disk_name(struct trace_seq *p, char *name);
+ #define __print_disk_name(name)				\
+ 	nvmet_trace_disk_name(p, name)
+ 
+ #ifndef TRACE_HEADER_MULTI_READ
+-static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
++static inline u16 nvmet_req_to_ctrl_id(struct nvmet_req *req)
+ {
+-	return req->sq->ctrl;
++	/*
++	 * The queue and controller pointers are not valid until an association
++	 * has been established.
++	 */
++	if (!req->sq || !req->sq->ctrl)
++		return 0;
++	return req->sq->ctrl->cntlid;
+ }
+ 
+ static inline void __assign_req_name(char *name, struct nvmet_req *req)
+@@ -60,7 +66,7 @@ TRACE_EVENT(nvmet_req_init,
+ 	TP_ARGS(req, cmd),
+ 	TP_STRUCT__entry(
+ 		__field(struct nvme_command *, cmd)
+-		__field(struct nvmet_ctrl *, ctrl)
++		__field(u16, ctrl_id)
+ 		__array(char, disk, DISK_NAME_LEN)
+ 		__field(int, qid)
+ 		__field(u16, cid)
+@@ -73,7 +79,7 @@ TRACE_EVENT(nvmet_req_init,
+ 	),
+ 	TP_fast_assign(
+ 		__entry->cmd = cmd;
+-		__entry->ctrl = nvmet_req_to_ctrl(req);
++		__entry->ctrl_id = nvmet_req_to_ctrl_id(req);
+ 		__assign_req_name(__entry->disk, req);
+ 		__entry->qid = req->sq->qid;
+ 		__entry->cid = cmd->common.command_id;
+@@ -87,7 +93,7 @@ TRACE_EVENT(nvmet_req_init,
+ 	),
+ 	TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, "
+ 		  "meta=%#llx, cmd=(%s, %s)",
+-		__print_ctrl_name(__entry->ctrl),
++		__print_ctrl_id(__entry->ctrl_id),
+ 		__print_disk_name(__entry->disk),
+ 		__entry->qid, __entry->cid, __entry->nsid,
+ 		__entry->flags, __entry->metadata,
+@@ -101,7 +107,7 @@ TRACE_EVENT(nvmet_req_complete,
+ 	TP_PROTO(struct nvmet_req *req),
+ 	TP_ARGS(req),
+ 	TP_STRUCT__entry(
+-		__field(struct nvmet_ctrl *, ctrl)
++		__field(u16, ctrl_id)
+ 		__array(char, disk, DISK_NAME_LEN)
+ 		__field(int, qid)
+ 		__field(int, cid)
+@@ -109,7 +115,7 @@ TRACE_EVENT(nvmet_req_complete,
+ 		__field(u16, status)
+ 	),
+ 	TP_fast_assign(
+-		__entry->ctrl = nvmet_req_to_ctrl(req);
++		__entry->ctrl_id = nvmet_req_to_ctrl_id(req);
+ 		__entry->qid = req->cq->qid;
+ 		__entry->cid = req->cqe->command_id;
+ 		__entry->result = le64_to_cpu(req->cqe->result.u64);
+@@ -117,7 +123,7 @@ TRACE_EVENT(nvmet_req_complete,
+ 		__assign_req_name(__entry->disk, req);
+ 	),
+ 	TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
+-		__print_ctrl_name(__entry->ctrl),
++		__print_ctrl_id(__entry->ctrl_id),
+ 		__print_disk_name(__entry->disk),
+ 		__entry->qid, __entry->cid, __entry->result, __entry->status)
+ 
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index 13517243355ce..43b25119efa2e 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -323,6 +323,8 @@ static int pcs_get_function(struct pinctrl_dev *pctldev, unsigned pin,
+ 		return -ENOTSUPP;
+ 	fselector = setting->func;
+ 	function = pinmux_generic_get_function(pctldev, fselector);
++	if (!function)
++		return -EINVAL;
+ 	*func = function->data;
+ 	if (!(*func)) {
+ 		dev_err(pcs->dev, "%s could not find function%i\n",
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index a5cee56ffe61e..05363aaa7cfed 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -1665,9 +1665,15 @@ static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
+ 	if (!sense)
+ 		return 0;
+ 
+-	return !!(sense[1] & SNS1_NO_REC_FOUND) ||
+-		!!(sense[1] & SNS1_FILE_PROTECTED) ||
+-		scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
++	if (sense[1] & SNS1_NO_REC_FOUND)
++		return 1;
++
++	if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
++	    scsw_is_tm(&irb->scsw) &&
++	    !(sense[2] & SNS2_ENV_DATA_PRESENT))
++		return 1;
++
++	return 0;
+ }
+ 
+ static int dasd_ese_oos_cond(u8 *sense)
+@@ -1688,7 +1694,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 	struct dasd_device *device;
+ 	unsigned long now;
+ 	int nrf_suppressed = 0;
+-	int fp_suppressed = 0;
++	int it_suppressed = 0;
+ 	struct request *req;
+ 	u8 *sense = NULL;
+ 	int expires;
+@@ -1743,8 +1749,9 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 		 */
+ 		sense = dasd_get_sense(irb);
+ 		if (sense) {
+-			fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
+-				test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
++			it_suppressed =	(sense[1] & SNS1_INV_TRACK_FORMAT) &&
++				!(sense[2] & SNS2_ENV_DATA_PRESENT) &&
++				test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
+ 			nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
+ 				test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+ 
+@@ -1759,7 +1766,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 				return;
+ 			}
+ 		}
+-		if (!(fp_suppressed || nrf_suppressed))
++		if (!(it_suppressed || nrf_suppressed))
+ 			device->discipline->dump_sense_dbf(device, irb, "int");
+ 
+ 		if (device->features & DASD_FEATURE_ERPLOG)
+@@ -2513,14 +2520,17 @@ static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
+ 	rc = 0;
+ 	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
+ 		/*
+-		 * In some cases the 'File Protected' or 'Incorrect Length'
+-		 * error might be expected and error recovery would be
+-		 * unnecessary in these cases.	Check if the according suppress
+-		 * bit is set.
++		 * In some cases certain errors might be expected and
++		 * error recovery would be unnecessary in these cases.
++		 * Check if the according suppress bit is set.
+ 		 */
+ 		sense = dasd_get_sense(&cqr->irb);
+-		if (sense && sense[1] & SNS1_FILE_PROTECTED &&
+-		    test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
++		if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
++		    !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
++		    test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags))
++			continue;
++		if (sense && (sense[1] & SNS1_NO_REC_FOUND) &&
++		    test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags))
+ 			continue;
+ 		if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
+ 		    test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
+diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
+index 8598c792ded30..e2163176bd9df 100644
+--- a/drivers/s390/block/dasd_3990_erp.c
++++ b/drivers/s390/block/dasd_3990_erp.c
+@@ -1401,14 +1401,8 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
+ 
+ 	struct dasd_device *device = erp->startdev;
+ 
+-	/*
+-	 * In some cases the 'File Protected' error might be expected and
+-	 * log messages shouldn't be written then.
+-	 * Check if the according suppress bit is set.
+-	 */
+-	if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags))
+-		dev_err(&device->cdev->dev,
+-			"Accessing the DASD failed because of a hardware error\n");
++	dev_err(&device->cdev->dev,
++		"Accessing the DASD failed because of a hardware error\n");
+ 
+ 	return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
+ 
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index c6930c159d2a6..fddcb910157cc 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -2201,6 +2201,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
+ 	cqr->status = DASD_CQR_FILLED;
+ 	/* Set flags to suppress output for expected errors */
+ 	set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
++	set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
+ 
+ 	return cqr;
+ }
+@@ -2482,7 +2483,6 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
+ 	cqr->buildclk = get_tod_clock();
+ 	cqr->status = DASD_CQR_FILLED;
+ 	/* Set flags to suppress output for expected errors */
+-	set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+ 	set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+ 
+ 	return cqr;
+@@ -4031,8 +4031,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
+ 
+ 	/* Set flags to suppress output for expected errors */
+ 	if (dasd_eckd_is_ese(basedev)) {
+-		set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+-		set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+ 		set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+ 	}
+ 
+@@ -4534,9 +4532,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
+ 
+ 	/* Set flags to suppress output for expected errors */
+ 	if (dasd_eckd_is_ese(basedev)) {
+-		set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+-		set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+ 		set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
++		set_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
+ 	}
+ 
+ 	return cqr;
+@@ -5706,36 +5703,32 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
+ {
+ 	u8 *sense = dasd_get_sense(irb);
+ 
+-	if (scsw_is_tm(&irb->scsw)) {
+-		/*
+-		 * In some cases the 'File Protected' or 'Incorrect Length'
+-		 * error might be expected and log messages shouldn't be written
+-		 * then. Check if the according suppress bit is set.
+-		 */
+-		if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
+-		    test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
+-			return;
+-		if (scsw_cstat(&irb->scsw) == 0x40 &&
+-		    test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
+-			return;
++	/*
++	 * In some cases certain errors might be expected and
++	 * log messages shouldn't be written then.
++	 * Check if the according suppress bit is set.
++	 */
++	if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
++	    !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
++	    test_bit(DASD_CQR_SUPPRESS_IT, &req->flags))
++		return;
+ 
+-		dasd_eckd_dump_sense_tcw(device, req, irb);
+-	} else {
+-		/*
+-		 * In some cases the 'Command Reject' or 'No Record Found'
+-		 * error might be expected and log messages shouldn't be
+-		 * written then. Check if the according suppress bit is set.
+-		 */
+-		if (sense && sense[0] & SNS0_CMD_REJECT &&
+-		    test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
+-			return;
++	if (sense && sense[0] & SNS0_CMD_REJECT &&
++	    test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
++		return;
+ 
+-		if (sense && sense[1] & SNS1_NO_REC_FOUND &&
+-		    test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
+-			return;
++	if (sense && sense[1] & SNS1_NO_REC_FOUND &&
++	    test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
++		return;
+ 
++	if (scsw_cstat(&irb->scsw) == 0x40 &&
++	    test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
++		return;
++
++	if (scsw_is_tm(&irb->scsw))
++		dasd_eckd_dump_sense_tcw(device, req, irb);
++	else
+ 		dasd_eckd_dump_sense_ccw(device, req, irb);
+-	}
+ }
+ 
+ static int dasd_eckd_pm_freeze(struct dasd_device *device)
+diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
+index 5d7d35ca5eb48..052b5d1ba9c12 100644
+--- a/drivers/s390/block/dasd_int.h
++++ b/drivers/s390/block/dasd_int.h
+@@ -226,7 +226,7 @@ struct dasd_ccw_req {
+  * The following flags are used to suppress output of certain errors.
+  */
+ #define DASD_CQR_SUPPRESS_NRF	4	/* Suppress 'No Record Found' error */
+-#define DASD_CQR_SUPPRESS_FP	5	/* Suppress 'File Protected' error*/
++#define DASD_CQR_SUPPRESS_IT	5	/* Suppress 'Invalid Track' error*/
+ #define DASD_CQR_SUPPRESS_IL	6	/* Suppress 'Incorrect Length' error */
+ #define DASD_CQR_SUPPRESS_CR	7	/* Suppress 'Command Reject' error */
+ 
+diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
+index 835de44dbbccd..b98526d3ddfdc 100644
+--- a/drivers/s390/cio/idset.c
++++ b/drivers/s390/cio/idset.c
+@@ -16,20 +16,21 @@ struct idset {
+ 	unsigned long bitmap[0];
+ };
+ 
+-static inline unsigned long bitmap_size(int num_ssid, int num_id)
++static inline unsigned long idset_bitmap_size(int num_ssid, int num_id)
+ {
+-	return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
++	return bitmap_size(size_mul(num_ssid, num_id));
+ }
+ 
+ static struct idset *idset_new(int num_ssid, int num_id)
+ {
+ 	struct idset *set;
+ 
+-	set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
++	set = vmalloc(sizeof(struct idset) +
++		      idset_bitmap_size(num_ssid, num_id));
+ 	if (set) {
+ 		set->num_ssid = num_ssid;
+ 		set->num_id = num_id;
+-		memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
++		memset(set->bitmap, 0, idset_bitmap_size(num_ssid, num_id));
+ 	}
+ 	return set;
+ }
+@@ -41,7 +42,8 @@ void idset_free(struct idset *set)
+ 
+ void idset_fill(struct idset *set)
+ {
+-	memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
++	memset(set->bitmap, 0xff,
++	       idset_bitmap_size(set->num_ssid, set->num_id));
+ }
+ 
+ static inline void idset_add(struct idset *set, int ssid, int id)
+diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
+index d4fcfa1e54e02..8849eca08a494 100644
+--- a/drivers/scsi/aacraid/comminit.c
++++ b/drivers/scsi/aacraid/comminit.c
+@@ -638,6 +638,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+ 
+ 	if (aac_comm_init(dev)<0){
+ 		kfree(dev->queues);
++		dev->queues = NULL;
+ 		return NULL;
+ 	}
+ 	/*
+@@ -645,6 +646,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+ 	 */
+ 	if (aac_fib_setup(dev) < 0) {
+ 		kfree(dev->queues);
++		dev->queues = NULL;
+ 		return NULL;
+ 	}
+ 		
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index e489c68cfb631..04b9a94f2f5e5 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -6902,7 +6902,7 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
+ 	struct lpfc_sglq *sglq_entry = NULL;
+ 	struct lpfc_sglq *sglq_entry_next = NULL;
+ 	struct lpfc_sglq *sglq_entry_first = NULL;
+-	int status, total_cnt;
++	int status = 0, total_cnt;
+ 	int post_cnt = 0, num_posted = 0, block_cnt = 0;
+ 	int last_xritag = NO_XRI;
+ 	LIST_HEAD(prep_sgl_list);
+diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
+index c37dd15d16d24..83f2576ed2aa0 100644
+--- a/drivers/scsi/scsi_transport_spi.c
++++ b/drivers/scsi/scsi_transport_spi.c
+@@ -677,10 +677,10 @@ spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
+ 	for (r = 0; r < retries; r++) {
+ 		result = spi_execute(sdev, spi_write_buffer, DMA_TO_DEVICE,
+ 				     buffer, len, &sshdr);
+-		if(result || !scsi_device_online(sdev)) {
++		if (result || !scsi_device_online(sdev)) {
+ 
+ 			scsi_device_set_state(sdev, SDEV_QUIESCE);
+-			if (scsi_sense_valid(&sshdr)
++			if (result > 0 && scsi_sense_valid(&sshdr)
+ 			    && sshdr.sense_key == ILLEGAL_REQUEST
+ 			    /* INVALID FIELD IN CDB */
+ 			    && sshdr.asc == 0x24 && sshdr.ascq == 0x00)
+diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
+index f6c3d17b05c74..c447f3ede514a 100644
+--- a/drivers/soc/qcom/cmd-db.c
++++ b/drivers/soc/qcom/cmd-db.c
+@@ -247,7 +247,7 @@ static int cmd_db_dev_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
++	cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WC);
+ 	if (!cmd_db_header) {
+ 		ret = -ENOMEM;
+ 		cmd_db_header = NULL;
+diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
+index 3b3f909407c39..b7d81bde35ca8 100644
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -1407,18 +1407,18 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
+ 					    unsigned int port_num)
+ {
+ 	struct sdw_dpn_prop *dpn_prop;
+-	u8 num_ports;
++	unsigned long mask;
+ 	int i;
+ 
+ 	if (direction == SDW_DATA_DIR_TX) {
+-		num_ports = hweight32(slave->prop.source_ports);
++		mask = slave->prop.source_ports;
+ 		dpn_prop = slave->prop.src_dpn_prop;
+ 	} else {
+-		num_ports = hweight32(slave->prop.sink_ports);
++		mask = slave->prop.sink_ports;
+ 		dpn_prop = slave->prop.sink_dpn_prop;
+ 	}
+ 
+-	for (i = 0; i < num_ports; i++) {
++	for_each_set_bit(i, &mask, 32) {
+ 		if (dpn_prop[i].num == port_num)
+ 			return &dpn_prop[i];
+ 	}
+diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
+index 0a26984acb2ca..9e54bc7eec663 100644
+--- a/drivers/ssb/main.c
++++ b/drivers/ssb/main.c
+@@ -835,7 +835,7 @@ static u32 clkfactor_f6_resolve(u32 v)
+ 	case SSB_CHIPCO_CLK_F6_7:
+ 		return 7;
+ 	}
+-	return 0;
++	return 1;
+ }
+ 
+ /* Calculate the speed the backplane would run at a given set of clockcontrol values */
+diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
+index 3fbe223d59b8e..28723db44a8ff 100644
+--- a/drivers/staging/ks7010/ks7010_sdio.c
++++ b/drivers/staging/ks7010/ks7010_sdio.c
+@@ -395,9 +395,9 @@ int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
+ 	priv->hostt.buff[priv->hostt.qtail] = le16_to_cpu(hdr->event);
+ 	priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
+ 
+-	spin_lock(&priv->tx_dev.tx_dev_lock);
++	spin_lock_bh(&priv->tx_dev.tx_dev_lock);
+ 	result = enqueue_txdev(priv, p, size, complete_handler, skb);
+-	spin_unlock(&priv->tx_dev.tx_dev_lock);
++	spin_unlock_bh(&priv->tx_dev.tx_dev_lock);
+ 
+ 	if (txq_has_space(priv))
+ 		queue_delayed_work(priv->wq, &priv->rw_dwork, 0);
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index edb62b49f572e..e8768d33efd6c 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1767,6 +1767,9 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
+ 	.driver_info = SINGLE_RX_URB,
+ 	},
++	{ USB_DEVICE(0x1901, 0x0006), /* GE Healthcare Patient Monitor UI Controller */
++	.driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
++	},
+ 	{ USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
+ 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ 	},
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index 4a4f39c458a95..859388d96117e 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -690,6 +690,7 @@ static int add_power_attributes(struct device *dev)
+ 
+ static void remove_power_attributes(struct device *dev)
+ {
++	sysfs_unmerge_group(&dev->kobj, &usb3_hardware_lpm_attr_group);
+ 	sysfs_unmerge_group(&dev->kobj, &usb2_hardware_lpm_attr_group);
+ 	sysfs_unmerge_group(&dev->kobj, &power_attr_group);
+ }
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 12e72e804278e..d77092d154df2 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -391,6 +391,13 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
+ static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
+ {
+ 	struct dwc3_event_buffer *evt;
++	unsigned int hw_mode;
++
++	hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
++	if (hw_mode == DWC3_GHWPARAMS0_MODE_HOST) {
++		dwc->ev_buf = NULL;
++		return 0;
++	}
+ 
+ 	evt = dwc3_alloc_one_event_buffer(dwc, length);
+ 	if (IS_ERR(evt)) {
+@@ -412,6 +419,9 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc)
+ {
+ 	struct dwc3_event_buffer	*evt;
+ 
++	if (!dwc->ev_buf)
++		return 0;
++
+ 	evt = dwc->ev_buf;
+ 	evt->lpos = 0;
+ 	dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
+@@ -428,6 +438,17 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc)
+ void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
+ {
+ 	struct dwc3_event_buffer	*evt;
++	u32				reg;
++
++	if (!dwc->ev_buf)
++		return;
++	/*
++	 * Exynos platforms may not be able to access event buffer if the
++	 * controller failed to halt on dwc3_core_exit().
++	 */
++	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
++	if (!(reg & DWC3_DSTS_DEVCTRLHLT))
++		return;
+ 
+ 	evt = dwc->ev_buf;
+ 
+diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
+index 81ee1cd794f7d..5465b355f00b2 100644
+--- a/drivers/usb/dwc3/dwc3-omap.c
++++ b/drivers/usb/dwc3/dwc3-omap.c
+@@ -527,11 +527,13 @@ static int dwc3_omap_probe(struct platform_device *pdev)
+ 	if (ret) {
+ 		dev_err(dev, "failed to request IRQ #%d --> %d\n",
+ 			omap->irq, ret);
+-		goto err1;
++		goto err2;
+ 	}
+ 	dwc3_omap_enable_irqs(omap);
+ 	return 0;
+ 
++err2:
++	of_platform_depopulate(dev);
+ err1:
+ 	pm_runtime_put_sync(dev);
+ 	pm_runtime_disable(dev);
+diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
+index c682420f25ca9..c48fd6bb97b7f 100644
+--- a/drivers/usb/dwc3/dwc3-st.c
++++ b/drivers/usb/dwc3/dwc3-st.c
+@@ -219,10 +219,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ 	dwc3_data->regmap = regmap;
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "syscfg-reg");
+-	if (!res) {
+-		ret = -ENXIO;
+-		goto undo_platform_dev_alloc;
+-	}
++	if (!res)
++		return -ENXIO;
+ 
+ 	dwc3_data->syscfg_reg_off = res->start;
+ 
+@@ -233,8 +231,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ 		devm_reset_control_get_exclusive(dev, "powerdown");
+ 	if (IS_ERR(dwc3_data->rstc_pwrdn)) {
+ 		dev_err(&pdev->dev, "could not get power controller\n");
+-		ret = PTR_ERR(dwc3_data->rstc_pwrdn);
+-		goto undo_platform_dev_alloc;
++		return PTR_ERR(dwc3_data->rstc_pwrdn);
+ 	}
+ 
+ 	/* Manage PowerDown */
+@@ -269,7 +266,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ 	if (!child_pdev) {
+ 		dev_err(dev, "failed to find dwc3 core device\n");
+ 		ret = -ENODEV;
+-		goto err_node_put;
++		goto depopulate;
+ 	}
+ 
+ 	dwc3_data->dr_mode = usb_get_dr_mode(&child_pdev->dev);
+@@ -285,6 +282,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ 	ret = st_dwc3_drd_init(dwc3_data);
+ 	if (ret) {
+ 		dev_err(dev, "drd initialisation failed\n");
++		of_platform_depopulate(dev);
+ 		goto undo_softreset;
+ 	}
+ 
+@@ -294,14 +292,14 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ 	platform_set_drvdata(pdev, dwc3_data);
+ 	return 0;
+ 
++depopulate:
++	of_platform_depopulate(dev);
+ err_node_put:
+ 	of_node_put(child);
+ undo_softreset:
+ 	reset_control_assert(dwc3_data->rstc_rst);
+ undo_powerdown:
+ 	reset_control_assert(dwc3_data->rstc_pwrdn);
+-undo_platform_dev_alloc:
+-	platform_device_put(pdev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
+index 9a05863b28768..6a4e206b1e2df 100644
+--- a/drivers/usb/gadget/udc/fsl_udc_core.c
++++ b/drivers/usb/gadget/udc/fsl_udc_core.c
+@@ -2503,7 +2503,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
+ 	/* setup the udc->eps[] for non-control endpoints and link
+ 	 * to gadget.ep_list */
+ 	for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) {
+-		char name[14];
++		char name[16];
+ 
+ 		sprintf(name, "ep%dout", i);
+ 		struct_ep_setup(udc_controller, i * 2, name, 1);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index eb78091d1b35e..dfc406be08563 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -2819,7 +2819,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
+ 				xhci->num_active_eps);
+ 		return -ENOMEM;
+ 	}
+-	if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
++	if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change &&
+ 	    xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
+ 		if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
+ 			xhci_free_host_resources(xhci, ctrl_ctx);
+@@ -4231,8 +4231,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 		mutex_unlock(&xhci->mutex);
+ 		ret = xhci_disable_slot(xhci, udev->slot_id);
+ 		xhci_free_virt_device(xhci, udev->slot_id);
+-		if (!ret)
+-			xhci_alloc_dev(hcd, udev);
++		if (!ret) {
++			if (xhci_alloc_dev(hcd, udev) == 1)
++				xhci_setup_addressable_virt_dev(xhci, udev);
++		}
+ 		kfree(command->completion);
+ 		kfree(command);
+ 		return -EPROTO;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 6545b295be99f..35bd9c10518f3 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -619,6 +619,8 @@ static void option_instat_callback(struct urb *urb);
+ 
+ /* MeiG Smart Technology products */
+ #define MEIGSMART_VENDOR_ID			0x2dee
++/* MeiG Smart SRM825L based on Qualcomm 315 */
++#define MEIGSMART_PRODUCT_SRM825L		0x4d22
+ /* MeiG Smart SLM320 based on UNISOC UIS8910 */
+ #define MEIGSMART_PRODUCT_SLM320		0x4d41
+ 
+@@ -2366,6 +2368,9 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
+index 28aef31a6e6f2..aeeba59fa7342 100644
+--- a/fs/binfmt_elf_fdpic.c
++++ b/fs/binfmt_elf_fdpic.c
+@@ -320,7 +320,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
+ 	else
+ 		executable_stack = EXSTACK_DEFAULT;
+ 
+-	if (stack_size == 0) {
++	if (stack_size == 0 && interp_params.flags & ELF_FDPIC_FLAG_PRESENT) {
+ 		stack_size = interp_params.stack_size;
+ 		if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK)
+ 			executable_stack = EXSTACK_ENABLE_X;
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index 23b563ff0dd7a..7557fb429df5c 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -60,12 +60,11 @@ typedef struct {
+ 	char *name;
+ 	struct dentry *dentry;
+ 	struct file *interp_file;
++	refcount_t users;		/* sync removal with load_misc_binary() */
+ } Node;
+ 
+ static DEFINE_RWLOCK(entries_lock);
+ static struct file_system_type bm_fs_type;
+-static struct vfsmount *bm_mnt;
+-static int entry_count;
+ 
+ /*
+  * Max length of the register string.  Determined by:
+@@ -82,19 +81,23 @@ static int entry_count;
+  */
+ #define MAX_REGISTER_LENGTH 1920
+ 
+-/*
+- * Check if we support the binfmt
+- * if we do, return the node, else NULL
+- * locking is done in load_misc_binary
++/**
++ * search_binfmt_handler - search for a binary handler for @bprm
++ * @misc: handle to binfmt_misc instance
++ * @bprm: binary for which we are looking for a handler
++ *
++ * Search for a binary type handler for @bprm in the list of registered binary
++ * type handlers.
++ *
++ * Return: binary type list entry on success, NULL on failure
+  */
+-static Node *check_file(struct linux_binprm *bprm)
++static Node *search_binfmt_handler(struct linux_binprm *bprm)
+ {
+ 	char *p = strrchr(bprm->interp, '.');
+-	struct list_head *l;
++	Node *e;
+ 
+ 	/* Walk all the registered handlers. */
+-	list_for_each(l, &entries) {
+-		Node *e = list_entry(l, Node, list);
++	list_for_each_entry(e, &entries, list) {
+ 		char *s;
+ 		int j;
+ 
+@@ -123,9 +126,49 @@ static Node *check_file(struct linux_binprm *bprm)
+ 		if (j == e->size)
+ 			return e;
+ 	}
++
+ 	return NULL;
+ }
+ 
++/**
++ * get_binfmt_handler - try to find a binary type handler
++ * @misc: handle to binfmt_misc instance
++ * @bprm: binary for which we are looking for a handler
++ *
++ * Try to find a binfmt handler for the binary type. If one is found take a
++ * reference to protect against removal via bm_{entry,status}_write().
++ *
++ * Return: binary type list entry on success, NULL on failure
++ */
++static Node *get_binfmt_handler(struct linux_binprm *bprm)
++{
++	Node *e;
++
++	read_lock(&entries_lock);
++	e = search_binfmt_handler(bprm);
++	if (e)
++		refcount_inc(&e->users);
++	read_unlock(&entries_lock);
++	return e;
++}
++
++/**
++ * put_binfmt_handler - put binary handler node
++ * @e: node to put
++ *
++ * Free node syncing with load_misc_binary() and defer final free to
++ * load_misc_binary() in case it is using the binary type handler we were
++ * requested to remove.
++ */
++static void put_binfmt_handler(Node *e)
++{
++	if (refcount_dec_and_test(&e->users)) {
++		if (e->flags & MISC_FMT_OPEN_FILE)
++			filp_close(e->interp_file, NULL);
++		kfree(e);
++	}
++}
++
+ /*
+  * the loader itself
+  */
+@@ -140,12 +183,7 @@ static int load_misc_binary(struct linux_binprm *bprm)
+ 	if (!enabled)
+ 		return retval;
+ 
+-	/* to keep locking time low, we copy the interpreter string */
+-	read_lock(&entries_lock);
+-	fmt = check_file(bprm);
+-	if (fmt)
+-		dget(fmt->dentry);
+-	read_unlock(&entries_lock);
++	fmt = get_binfmt_handler(bprm);
+ 	if (!fmt)
+ 		return retval;
+ 
+@@ -239,7 +277,16 @@ static int load_misc_binary(struct linux_binprm *bprm)
+ 		goto error;
+ 
+ ret:
+-	dput(fmt->dentry);
++
++	/*
++	 * If we actually put the node here all concurrent calls to
++	 * load_misc_binary() will have finished. We also know
++	 * that for the refcount to be zero ->evict_inode() must have removed
++	 * the node to be deleted from the list. All that is left for us is to
++	 * close and free.
++	 */
++	put_binfmt_handler(fmt);
++
+ 	return retval;
+ error:
+ 	if (fd_binary > 0)
+@@ -600,30 +647,90 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
+ 	return inode;
+ }
+ 
++/**
++ * bm_evict_inode - cleanup data associated with @inode
++ * @inode: inode to which the data is attached
++ *
++ * Cleanup the binary type handler data associated with @inode if a binary type
++ * entry is removed or the filesystem is unmounted and the super block is
++ * shutdown.
++ *
++ * If the ->evict call was not caused by a super block shutdown but by a write
++ * to remove the entry or all entries via bm_{entry,status}_write() the entry
++ * will have already been removed from the list. We keep the list_empty() check
++ * to make that explicit.
++*/
+ static void bm_evict_inode(struct inode *inode)
+ {
+ 	Node *e = inode->i_private;
+ 
+-	if (e && e->flags & MISC_FMT_OPEN_FILE)
+-		filp_close(e->interp_file, NULL);
+-
+ 	clear_inode(inode);
+-	kfree(e);
++
++	if (e) {
++		write_lock(&entries_lock);
++		if (!list_empty(&e->list))
++			list_del_init(&e->list);
++		write_unlock(&entries_lock);
++		put_binfmt_handler(e);
++	}
+ }
+ 
+-static void kill_node(Node *e)
++/**
++ * unlink_binfmt_dentry - remove the dentry for the binary type handler
++ * @dentry: dentry associated with the binary type handler
++ *
++ * Do the actual filesystem work to remove a dentry for a registered binary
++ * type handler. Since binfmt_misc only allows simple files to be created
++ * directly under the root dentry of the filesystem we ensure that we are
++ * indeed passed a dentry directly beneath the root dentry, that the inode
++ * associated with the root dentry is locked, and that it is a regular file we
++ * are asked to remove.
++ */
++static void unlink_binfmt_dentry(struct dentry *dentry)
+ {
+-	struct dentry *dentry;
++	struct dentry *parent = dentry->d_parent;
++	struct inode *inode, *parent_inode;
++
++	/* All entries are immediate descendants of the root dentry. */
++	if (WARN_ON_ONCE(dentry->d_sb->s_root != parent))
++		return;
+ 
++	/* We only expect to be called on regular files. */
++	inode = d_inode(dentry);
++	if (WARN_ON_ONCE(!S_ISREG(inode->i_mode)))
++		return;
++
++	/* The parent inode must be locked. */
++	parent_inode = d_inode(parent);
++	if (WARN_ON_ONCE(!inode_is_locked(parent_inode)))
++		return;
++
++	if (simple_positive(dentry)) {
++		dget(dentry);
++		simple_unlink(parent_inode, dentry);
++		d_delete(dentry);
++		dput(dentry);
++	}
++}
++
++/**
++ * remove_binfmt_handler - remove a binary type handler
++ * @misc: handle to binfmt_misc instance
++ * @e: binary type handler to remove
++ *
++ * Remove a binary type handler from the list of binary type handlers and
++ * remove its associated dentry. This is called from
++ * binfmt_{entry,status}_write(). In the future, we might want to think about
++ * adding a proper ->unlink() method to binfmt_misc instead of forcing caller's
++ * to use writes to files in order to delete binary type handlers. But it has
++ * worked for so long that it's not a pressing issue.
++ */
++static void remove_binfmt_handler(Node *e)
++{
+ 	write_lock(&entries_lock);
+ 	list_del_init(&e->list);
+ 	write_unlock(&entries_lock);
+-
+-	dentry = e->dentry;
+-	drop_nlink(d_inode(dentry));
+-	d_drop(dentry);
+-	dput(dentry);
+-	simple_release_fs(&bm_mnt, &entry_count);
++	unlink_binfmt_dentry(e->dentry);
+ }
+ 
+ /* /<entry> */
+@@ -650,8 +757,8 @@ bm_entry_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
+ 				size_t count, loff_t *ppos)
+ {
+-	struct dentry *root;
+-	Node *e = file_inode(file)->i_private;
++	struct inode *inode = file_inode(file);
++	Node *e = inode->i_private;
+ 	int res = parse_command(buffer, count);
+ 
+ 	switch (res) {
+@@ -665,13 +772,22 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
+ 		break;
+ 	case 3:
+ 		/* Delete this handler. */
+-		root = file_inode(file)->i_sb->s_root;
+-		inode_lock(d_inode(root));
++		inode = d_inode(inode->i_sb->s_root);
++		inode_lock(inode);
+ 
++		/*
++		 * In order to add new element or remove elements from the list
++		 * via bm_{entry,register,status}_write() inode_lock() on the
++		 * root inode must be held.
++		 * The lock is exclusive ensuring that the list can't be
++		 * modified. Only load_misc_binary() can access but does so
++		 * read-only. So we only need to take the write lock when we
++		 * actually remove the entry from the list.
++		 */
+ 		if (!list_empty(&e->list))
+-			kill_node(e);
++			remove_binfmt_handler(e);
+ 
+-		inode_unlock(d_inode(root));
++		inode_unlock(inode);
+ 		break;
+ 	default:
+ 		return res;
+@@ -730,13 +846,7 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
+ 	if (!inode)
+ 		goto out2;
+ 
+-	err = simple_pin_fs(&bm_fs_type, &bm_mnt, &entry_count);
+-	if (err) {
+-		iput(inode);
+-		inode = NULL;
+-		goto out2;
+-	}
+-
++	refcount_set(&e->users, 1);
+ 	e->dentry = dget(dentry);
+ 	inode->i_private = e;
+ 	inode->i_fop = &bm_entry_operations;
+@@ -780,7 +890,8 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
+ 		size_t count, loff_t *ppos)
+ {
+ 	int res = parse_command(buffer, count);
+-	struct dentry *root;
++	Node *e, *next;
++	struct inode *inode;
+ 
+ 	switch (res) {
+ 	case 1:
+@@ -793,13 +904,22 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
+ 		break;
+ 	case 3:
+ 		/* Delete all handlers. */
+-		root = file_inode(file)->i_sb->s_root;
+-		inode_lock(d_inode(root));
++		inode = d_inode(file_inode(file)->i_sb->s_root);
++		inode_lock(inode);
+ 
+-		while (!list_empty(&entries))
+-			kill_node(list_first_entry(&entries, Node, list));
++		/*
++		 * In order to add new element or remove elements from the list
++		 * via bm_{entry,register,status}_write() inode_lock() on the
++		 * root inode must be held.
++		 * The lock is exclusive ensuring that the list can't be
++		 * modified. Only load_misc_binary() can access but does so
++		 * read-only. So we only need to take the write lock when we
++		 * actually remove the entry from the list.
++		 */
++		list_for_each_entry_safe(e, next, &entries, list)
++			remove_binfmt_handler(e);
+ 
+-		inode_unlock(d_inode(root));
++		inode_unlock(inode);
+ 		break;
+ 	default:
+ 		return res;
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 95afe5ef7500c..e3a43bf63f06c 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -988,7 +988,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
+ 
+ 	if (delayed_node &&
+ 	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
+-		BUG_ON(!delayed_node->root);
++		ASSERT(delayed_node->root);
+ 		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
+ 		delayed_node->count--;
+ 
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index af89fce57ff75..7819bae9be29d 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -1729,9 +1729,9 @@ static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
+ 	ctl->free_space -= bytes;
+ }
+ 
+-static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
+-			    struct btrfs_free_space *info, u64 offset,
+-			    u64 bytes)
++static void btrfs_bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
++				  struct btrfs_free_space *info, u64 offset,
++				  u64 bytes)
+ {
+ 	unsigned long start, count;
+ 
+@@ -1988,7 +1988,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
+ 
+ 	bytes_to_set = min(end - offset, bytes);
+ 
+-	bitmap_set_bits(ctl, info, offset, bytes_to_set);
++	btrfs_bitmap_set_bits(ctl, info, offset, bytes_to_set);
+ 
+ 	/*
+ 	 * We set some bytes, we have no idea what the max extent size is
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 2c86be3fc25cd..d2a988bf9c895 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4520,7 +4520,14 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
+ 	ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
+ 	if (ret < 0)
+ 		goto out;
+-	BUG_ON(ret == 0);
++	if (ret == 0) {
++		/*
++		 * Key with offset -1 found, there would have to exist a root
++		 * with such id, but this is out of valid range.
++		 */
++		ret = -EUCLEAN;
++		goto out;
++	}
+ 
+ 	ret = 0;
+ 	if (path->slots[0] > 0) {
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index d6184b6206ff2..7d0b7e58d66af 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2603,8 +2603,6 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
+ 	if (nr_old_roots == 0 && nr_new_roots == 0)
+ 		goto out_free;
+ 
+-	BUG_ON(!fs_info->quota_root);
+-
+ 	trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
+ 					num_bytes, nr_old_roots, nr_new_roots);
+ 
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 576c027909f8a..e1063ef3dece5 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -692,7 +692,12 @@ static int begin_cmd(struct send_ctx *sctx, int cmd)
+ 	if (WARN_ON(!sctx->send_buf))
+ 		return -EINVAL;
+ 
+-	BUG_ON(sctx->send_size);
++	if (unlikely(sctx->send_size != 0)) {
++		btrfs_err(sctx->send_root->fs_info,
++			  "send: command header buffer not empty cmd %d offset %llu",
++			  cmd, sctx->send_off);
++		return -EINVAL;
++	}
+ 
+ 	sctx->send_size += sizeof(*hdr);
+ 	hdr = (struct btrfs_cmd_header *)sctx->send_buf;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 90b12c7c0f20a..fed3dfed2c249 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3591,9 +3591,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
+ 	struct ext4_extent *ex, *abut_ex;
+ 	ext4_lblk_t ee_block, eof_block;
+ 	unsigned int ee_len, depth, map_len = map->m_len;
+-	int allocated = 0, max_zeroout = 0;
+ 	int err = 0;
+ 	int split_flag = EXT4_EXT_DATA_VALID2;
++	int allocated = 0;
++	unsigned int max_zeroout = 0;
+ 
+ 	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
+ 		"block %llu, max_blocks %u\n", inode->i_ino,
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 9bc590564ea1f..b268dc0e1df44 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -5227,6 +5227,9 @@ static int ext4_try_to_trim_range(struct super_block *sb,
+ 	bool set_trimmed = false;
+ 	void *bitmap;
+ 
++	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
++		return 0;
++
+ 	last = ext4_last_grp_cluster(sb, e4b->bd_group);
+ 	bitmap = e4b->bd_bitmap;
+ 	if (start == 0 && max >= last)
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index e43b57755a7fe..da37e2b8a0ec7 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -2131,6 +2131,8 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
+ #endif
+ 
+ 	segno = GET_SEGNO(sbi, blkaddr);
++	if (segno == NULL_SEGNO)
++		return;
+ 
+ 	se = get_seg_entry(sbi, segno);
+ 	new_vblocks = se->valid_blocks + del;
+@@ -3113,8 +3115,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+ 	 * since SSR needs latest valid block information.
+ 	 */
+ 	update_sit_entry(sbi, *new_blkaddr, 1);
+-	if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+-		update_sit_entry(sbi, old_blkaddr, -1);
++	update_sit_entry(sbi, old_blkaddr, -1);
+ 
+ 	if (!__has_curseg_space(sbi, type))
+ 		sit_i->s_ops->allocate_segment(sbi, type, false);
+diff --git a/fs/file.c b/fs/file.c
+index 9e62810462742..cf7c04f355647 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -41,27 +41,23 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
+ #define BITBIT_NR(nr)	BITS_TO_LONGS(BITS_TO_LONGS(nr))
+ #define BITBIT_SIZE(nr)	(BITBIT_NR(nr) * sizeof(long))
+ 
++#define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
+ /*
+  * Copy 'count' fd bits from the old table to the new table and clear the extra
+  * space if any.  This does not copy the file pointers.  Called with the files
+  * spinlock held for write.
+  */
+-static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
+-			    unsigned int count)
++static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
++			    unsigned int copy_words)
+ {
+-	unsigned int cpy, set;
+-
+-	cpy = count / BITS_PER_BYTE;
+-	set = (nfdt->max_fds - count) / BITS_PER_BYTE;
+-	memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
+-	memset((char *)nfdt->open_fds + cpy, 0, set);
+-	memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
+-	memset((char *)nfdt->close_on_exec + cpy, 0, set);
+-
+-	cpy = BITBIT_SIZE(count);
+-	set = BITBIT_SIZE(nfdt->max_fds) - cpy;
+-	memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
+-	memset((char *)nfdt->full_fds_bits + cpy, 0, set);
++	unsigned int nwords = fdt_words(nfdt);
++
++	bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
++			copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
++	bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
++			copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
++	bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
++			copy_words, nwords);
+ }
+ 
+ /*
+@@ -79,7 +75,7 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
+ 	memcpy(nfdt->fd, ofdt->fd, cpy);
+ 	memset((char *)nfdt->fd + cpy, 0, set);
+ 
+-	copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
++	copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
+ }
+ 
+ static struct fdtable * alloc_fdtable(unsigned int nr)
+@@ -330,7 +326,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
+ 		open_files = count_open_files(old_fdt);
+ 	}
+ 
+-	copy_fd_bitmaps(new_fdt, old_fdt, open_files);
++	copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
+ 
+ 	old_fds = old_fdt->fd;
+ 	new_fds = new_fdt->fd;
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index ac6a8da340139..2d02008ecb5b5 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1626,9 +1626,11 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
+ 
+ 		this_num = min_t(unsigned, num, PAGE_SIZE - offset);
+ 		err = fuse_copy_page(cs, &page, offset, this_num, 0);
+-		if (!err && offset == 0 &&
+-		    (this_num == PAGE_SIZE || file_size == end))
++		if (!PageUptodate(page) && !err && offset == 0 &&
++		    (this_num == PAGE_SIZE || file_size == end)) {
++			zero_user_segment(page, this_num, PAGE_SIZE);
+ 			SetPageUptodate(page);
++		}
+ 		unlock_page(page);
+ 		put_page(page);
+ 
+diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
+index fadf6fb90fe22..48f144d083813 100644
+--- a/fs/fuse/virtio_fs.c
++++ b/fs/fuse/virtio_fs.c
+@@ -237,6 +237,16 @@ static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
+ 		return -ENOMEM;
+ 	memcpy(fs->tag, tag_buf, len);
+ 	fs->tag[len] = '\0';
++
++	/* While the VIRTIO specification allows any character, newlines are
++	 * awkward on mount(8) command-lines and cause problems in the sysfs
++	 * "tag" attr and uevent TAG= properties. Forbid them.
++	 */
++	if (strchr(fs->tag, '\n')) {
++		dev_dbg(&vdev->dev, "refusing virtiofs tag with newline character\n");
++		return -EINVAL;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 988bb7b17ed8f..4e0c933e08002 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -1869,7 +1869,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
+ 	kuid_t ouid, nuid;
+ 	kgid_t ogid, ngid;
+ 	int error;
+-	struct gfs2_alloc_parms ap;
++	struct gfs2_alloc_parms ap = {};
+ 
+ 	ouid = inode->i_uid;
+ 	ogid = inode->i_gid;
+diff --git a/fs/inode.c b/fs/inode.c
+index a6c4c443d45a6..8c850a062300e 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -450,6 +450,39 @@ static void inode_lru_list_del(struct inode *inode)
+ 		this_cpu_dec(nr_unused);
+ }
+ 
++static void inode_pin_lru_isolating(struct inode *inode)
++{
++	lockdep_assert_held(&inode->i_lock);
++	WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
++	inode->i_state |= I_LRU_ISOLATING;
++}
++
++static void inode_unpin_lru_isolating(struct inode *inode)
++{
++	spin_lock(&inode->i_lock);
++	WARN_ON(!(inode->i_state & I_LRU_ISOLATING));
++	inode->i_state &= ~I_LRU_ISOLATING;
++	smp_mb();
++	wake_up_bit(&inode->i_state, __I_LRU_ISOLATING);
++	spin_unlock(&inode->i_lock);
++}
++
++static void inode_wait_for_lru_isolating(struct inode *inode)
++{
++	spin_lock(&inode->i_lock);
++	if (inode->i_state & I_LRU_ISOLATING) {
++		DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LRU_ISOLATING);
++		wait_queue_head_t *wqh;
++
++		wqh = bit_waitqueue(&inode->i_state, __I_LRU_ISOLATING);
++		spin_unlock(&inode->i_lock);
++		__wait_on_bit(wqh, &wq, bit_wait, TASK_UNINTERRUPTIBLE);
++		spin_lock(&inode->i_lock);
++		WARN_ON(inode->i_state & I_LRU_ISOLATING);
++	}
++	spin_unlock(&inode->i_lock);
++}
++
+ /**
+  * inode_sb_list_add - add inode to the superblock list of inodes
+  * @inode: inode to add
+@@ -562,6 +595,8 @@ static void evict(struct inode *inode)
+ 
+ 	inode_sb_list_del(inode);
+ 
++	inode_wait_for_lru_isolating(inode);
++
+ 	/*
+ 	 * Wait for flusher thread to be done with the inode so that filesystem
+ 	 * does not start destroying it while writeback is still running. Since
+@@ -761,7 +796,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
+ 	}
+ 
+ 	if (inode_has_buffers(inode) || inode->i_data.nrpages) {
+-		__iget(inode);
++		inode_pin_lru_isolating(inode);
+ 		spin_unlock(&inode->i_lock);
+ 		spin_unlock(lru_lock);
+ 		if (remove_inode_buffers(inode)) {
+@@ -774,7 +809,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
+ 			if (current->reclaim_state)
+ 				current->reclaim_state->reclaimed_slab += reap;
+ 		}
+-		iput(inode);
++		inode_unpin_lru_isolating(inode);
+ 		spin_lock(lru_lock);
+ 		return LRU_RETRY;
+ 	}
+diff --git a/fs/locks.c b/fs/locks.c
+index 85c8af53d4eb1..cf6ed857664b2 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2542,7 +2542,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		f = fcheck(fd);
+ 		spin_unlock(&current->files->file_lock);
+ 		if (f != filp) {
+-			locks_remove_posix(filp, &current->files);
++			locks_remove_posix(filp, current->files);
+ 			error = -EBADF;
+ 		}
+ 	}
+@@ -2672,7 +2672,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		f = fcheck(fd);
+ 		spin_unlock(&current->files->file_lock);
+ 		if (f != filp) {
+-			locks_remove_posix(filp, &current->files);
++			locks_remove_posix(filp, current->files);
+ 			error = -EBADF;
+ 		}
+ 	}
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 3b19fa74b0620..3f7d905d7528d 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1953,6 +1953,14 @@ pnfs_update_layout(struct inode *ino,
+ 	}
+ 
+ lookup_again:
++	if (!nfs4_valid_open_stateid(ctx->state)) {
++		trace_pnfs_update_layout(ino, pos, count,
++					 iomode, lo, lseg,
++					 PNFS_UPDATE_LAYOUT_INVALID_OPEN);
++		lseg = ERR_PTR(-EIO);
++		goto out;
++	}
++
+ 	lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
+ 	if (IS_ERR(lseg))
+ 		goto out;
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index a7ddb874912d4..14c0dd5b65a43 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -986,9 +986,8 @@ struct dquot *dqget(struct super_block *sb, struct kqid qid)
+ 	 * smp_mb__before_atomic() in dquot_acquire().
+ 	 */
+ 	smp_rmb();
+-#ifdef CONFIG_QUOTA_DEBUG
+-	BUG_ON(!dquot->dq_sb);	/* Has somebody invalidated entry under us? */
+-#endif
++	/* Has somebody invalidated entry under us? */
++	WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash));
+ out:
+ 	if (empty)
+ 		do_destroy_dquot(empty);
+diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
+index 29fc933df3bf0..a92ecbc391ef9 100644
+--- a/include/linux/bitmap.h
++++ b/include/linux/bitmap.h
+@@ -216,22 +216,24 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf,
+ #define small_const_nbits(nbits) \
+ 	(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
+ 
++#define bitmap_size(nbits)	(ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
++
+ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+ {
+-	unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
++	unsigned int len = bitmap_size(nbits);
+ 	memset(dst, 0, len);
+ }
+ 
+ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+ {
+-	unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
++	unsigned int len = bitmap_size(nbits);
+ 	memset(dst, 0xff, len);
+ }
+ 
+ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
+ 			unsigned int nbits)
+ {
+-	unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
++	unsigned int len = bitmap_size(nbits);
+ 	memcpy(dst, src, len);
+ }
+ 
+@@ -246,6 +248,18 @@ static inline void bitmap_copy_clear_tail(unsigned long *dst,
+ 		dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
+ }
+ 
++static inline void bitmap_copy_and_extend(unsigned long *to,
++					  const unsigned long *from,
++					  unsigned int count, unsigned int size)
++{
++	unsigned int copy = BITS_TO_LONGS(count);
++
++	memcpy(to, from, copy * sizeof(long));
++	if (count % BITS_PER_LONG)
++		to[copy - 1] &= BITMAP_LAST_WORD_MASK(count);
++	memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long));
++}
++
+ /*
+  * On 32-bit systems bitmaps are represented as u32 arrays internally, and
+  * therefore conversion is not needed when copying data from/to arrays of u32.
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 308c2d8cdca19..5194467d7d753 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -59,7 +59,7 @@ struct blk_stat_callback;
+  */
+ #define BLKCG_MAX_POLS		5
+ 
+-static inline int blk_validate_block_size(unsigned int bsize)
++static inline int blk_validate_block_size(unsigned long bsize)
+ {
+ 	if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
+ 		return -EINVAL;
+diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
+index 78a73eba64dd6..30c0c3c610ad2 100644
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -685,7 +685,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+  */
+ static inline unsigned int cpumask_size(void)
+ {
+-	return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
++	return bitmap_size(nr_cpumask_bits);
+ }
+ 
+ /*
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index 60631f3abddbd..b077f32043653 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -332,6 +332,8 @@ void *dm_per_bio_data(struct bio *bio, size_t data_size);
+ struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
+ unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
+ 
++u64 dm_start_time_ns_from_clone(struct bio *bio);
++
+ int dm_register_target(struct target_type *t);
+ void dm_unregister_target(struct target_type *t);
+ 
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 5e122cb506d6e..d4f5fcc60744d 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2153,6 +2153,9 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+  *			Used to detect that mark_inode_dirty() should not move
+  * 			inode between dirty lists.
+  *
++ * I_LRU_ISOLATING	Inode is pinned being isolated from LRU without holding
++ *			i_count.
++ *
+  * Q: What is the difference between I_WILL_FREE and I_FREEING?
+  */
+ #define I_DIRTY_SYNC		(1 << 0)
+@@ -2174,6 +2177,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+ #define I_OVL_INUSE		(1 << 14)
+ #define I_CREATING		(1 << 15)
+ #define I_SYNC_QUEUED		(1 << 17)
++#define __I_LRU_ISOLATING	19
++#define I_LRU_ISOLATING		(1 << __I_LRU_ISOLATING)
+ 
+ #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
+ #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
+diff --git a/include/linux/overflow.h b/include/linux/overflow.h
+index 50c93ca0c3d6f..63e7c77ba942f 100644
+--- a/include/linux/overflow.h
++++ b/include/linux/overflow.h
+@@ -241,80 +241,125 @@
+ })
+ 
+ /**
+- * array_size() - Calculate size of 2-dimensional array.
++ * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
+  *
+- * @a: dimension one
+- * @b: dimension two
++ * @factor1: first factor
++ * @factor2: second factor
+  *
+- * Calculates size of 2-dimensional array: @a * @b.
+- *
+- * Returns: number of bytes needed to represent the array or SIZE_MAX on
+- * overflow.
++ * Returns: calculate @factor1 * @factor2, both promoted to size_t,
++ * with any overflow causing the return value to be SIZE_MAX. The
++ * lvalue must be size_t to avoid implicit type conversion.
+  */
+-static inline __must_check size_t array_size(size_t a, size_t b)
++static inline size_t __must_check size_mul(size_t factor1, size_t factor2)
+ {
+ 	size_t bytes;
+ 
+-	if (check_mul_overflow(a, b, &bytes))
++	if (check_mul_overflow(factor1, factor2, &bytes))
+ 		return SIZE_MAX;
+ 
+ 	return bytes;
+ }
+ 
+ /**
+- * array3_size() - Calculate size of 3-dimensional array.
++ * size_add() - Calculate size_t addition with saturation at SIZE_MAX
+  *
+- * @a: dimension one
+- * @b: dimension two
+- * @c: dimension three
++ * @addend1: first addend
++ * @addend2: second addend
+  *
+- * Calculates size of 3-dimensional array: @a * @b * @c.
+- *
+- * Returns: number of bytes needed to represent the array or SIZE_MAX on
+- * overflow.
++ * Returns: calculate @addend1 + @addend2, both promoted to size_t,
++ * with any overflow causing the return value to be SIZE_MAX. The
++ * lvalue must be size_t to avoid implicit type conversion.
+  */
+-static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
++static inline size_t __must_check size_add(size_t addend1, size_t addend2)
+ {
+ 	size_t bytes;
+ 
+-	if (check_mul_overflow(a, b, &bytes))
+-		return SIZE_MAX;
+-	if (check_mul_overflow(bytes, c, &bytes))
++	if (check_add_overflow(addend1, addend2, &bytes))
+ 		return SIZE_MAX;
+ 
+ 	return bytes;
+ }
+ 
+-/*
+- * Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for
+- * struct_size() below.
++/**
++ * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX
++ *
++ * @minuend: value to subtract from
++ * @subtrahend: value to subtract from @minuend
++ *
++ * Returns: calculate @minuend - @subtrahend, both promoted to size_t,
++ * with any overflow causing the return value to be SIZE_MAX. For
++ * composition with the size_add() and size_mul() helpers, neither
++ * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX).
++ * The lvalue must be size_t to avoid implicit type conversion.
+  */
+-static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c)
++static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
+ {
+ 	size_t bytes;
+ 
+-	if (check_mul_overflow(a, b, &bytes))
+-		return SIZE_MAX;
+-	if (check_add_overflow(bytes, c, &bytes))
++	if (minuend == SIZE_MAX || subtrahend == SIZE_MAX ||
++	    check_sub_overflow(minuend, subtrahend, &bytes))
+ 		return SIZE_MAX;
+ 
+ 	return bytes;
+ }
+ 
+ /**
+- * struct_size() - Calculate size of structure with trailing array.
++ * array_size() - Calculate size of 2-dimensional array.
++ *
++ * @a: dimension one
++ * @b: dimension two
++ *
++ * Calculates size of 2-dimensional array: @a * @b.
++ *
++ * Returns: number of bytes needed to represent the array or SIZE_MAX on
++ * overflow.
++ */
++#define array_size(a, b)	size_mul(a, b)
++
++/**
++ * array3_size() - Calculate size of 3-dimensional array.
++ *
++ * @a: dimension one
++ * @b: dimension two
++ * @c: dimension three
++ *
++ * Calculates size of 3-dimensional array: @a * @b * @c.
++ *
++ * Returns: number of bytes needed to represent the array or SIZE_MAX on
++ * overflow.
++ */
++#define array3_size(a, b, c)	size_mul(size_mul(a, b), c)
++
++/**
++ * flex_array_size() - Calculate size of a flexible array member
++ *                     within an enclosing structure.
++ *
++ * @p: Pointer to the structure.
++ * @member: Name of the flexible array member.
++ * @count: Number of elements in the array.
++ *
++ * Calculates size of a flexible array of @count number of @member
++ * elements, at the end of structure @p.
++ *
++ * Return: number of bytes needed or SIZE_MAX on overflow.
++ */
++#define flex_array_size(p, member, count)				\
++	size_mul(count,							\
++		 sizeof(*(p)->member) + __must_be_array((p)->member))
++
++/**
++ * struct_size() - Calculate size of structure with trailing flexible array.
++ *
+  * @p: Pointer to the structure.
+  * @member: Name of the array member.
+- * @n: Number of elements in the array.
++ * @count: Number of elements in the array.
+  *
+  * Calculates size of memory needed for structure @p followed by an
+- * array of @n @member elements.
++ * array of @count number of @member elements.
+  *
+  * Return: number of bytes needed or SIZE_MAX on overflow.
+  */
+-#define struct_size(p, member, n)					\
+-	__ab_c_size(n,							\
+-		    sizeof(*(p)->member) + __must_be_array((p)->member),\
+-		    sizeof(*(p)))
++#define struct_size(p, member, count)					\
++	size_add(sizeof(*(p)), flex_array_size(p, member, count))
+ 
+ #endif /* __LINUX_OVERFLOW_H */
+diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
+index 16258c0c7319e..36259516ec0d2 100644
+--- a/include/net/busy_poll.h
++++ b/include/net/busy_poll.h
+@@ -61,7 +61,7 @@ static inline bool sk_can_busy_loop(struct sock *sk)
+ static inline unsigned long busy_loop_current_time(void)
+ {
+ #ifdef CONFIG_NET_RX_BUSY_POLL
+-	return (unsigned long)(local_clock() >> 10);
++	return (unsigned long)(ktime_get_ns() >> 10);
+ #else
+ 	return 0;
+ #endif
+diff --git a/include/net/kcm.h b/include/net/kcm.h
+index 2d704f8f49059..8e8252e08a9ce 100644
+--- a/include/net/kcm.h
++++ b/include/net/kcm.h
+@@ -70,6 +70,7 @@ struct kcm_sock {
+ 	struct work_struct tx_work;
+ 	struct list_head wait_psock_list;
+ 	struct sk_buff *seq_skb;
++	struct mutex tx_mutex;
+ 	u32 tx_stopped : 1;
+ 
+ 	/* Don't use bit fields here, these are set under different locks */
+diff --git a/ipc/util.c b/ipc/util.c
+index 09c3bd9f8e768..aaf28b14a030e 100644
+--- a/ipc/util.c
++++ b/ipc/util.c
+@@ -754,21 +754,13 @@ struct pid_namespace *ipc_seq_pid_ns(struct seq_file *s)
+ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
+ 					      loff_t *new_pos)
+ {
+-	struct kern_ipc_perm *ipc;
+-	int total, id;
+-
+-	total = 0;
+-	for (id = 0; id < pos && total < ids->in_use; id++) {
+-		ipc = idr_find(&ids->ipcs_idr, id);
+-		if (ipc != NULL)
+-			total++;
+-	}
++	struct kern_ipc_perm *ipc = NULL;
++	int max_idx = ipc_get_maxidx(ids);
+ 
+-	ipc = NULL;
+-	if (total >= ids->in_use)
++	if (max_idx == -1 || pos > max_idx)
+ 		goto out;
+ 
+-	for (; pos < ipc_mni; pos++) {
++	for (; pos <= max_idx; pos++) {
+ 		ipc = idr_find(&ids->ipcs_idr, pos);
+ 		if (ipc != NULL) {
+ 			rcu_read_lock();
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 6ec74b9120b75..807fc3fbd5ab2 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -22,6 +22,7 @@
+  *  distribution for more details.
+  */
+ 
++#include "cgroup-internal.h"
+ #include <linux/cpu.h>
+ #include <linux/cpumask.h>
+ #include <linux/cpuset.h>
+@@ -3644,10 +3645,14 @@ int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
+ 	if (!buf)
+ 		goto out;
+ 
+-	css = task_get_css(tsk, cpuset_cgrp_id);
+-	retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
+-				current->nsproxy->cgroup_ns);
+-	css_put(css);
++	rcu_read_lock();
++	spin_lock_irq(&css_set_lock);
++	css = task_css(tsk, cpuset_cgrp_id);
++	retval = cgroup_path_ns_locked(css->cgroup, buf, PATH_MAX,
++				       current->nsproxy->cgroup_ns);
++	spin_unlock_irq(&css_set_lock);
++	rcu_read_unlock();
++
+ 	if (retval >= PATH_MAX)
+ 		retval = -ENAMETOOLONG;
+ 	if (retval < 0)
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 1b301dd1692b8..2e4f136bdf6ab 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1183,6 +1183,8 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ 	struct hrtimer_clock_base *base;
+ 	unsigned long flags;
+ 
++	if (WARN_ON_ONCE(!timer->function))
++		return;
+ 	/*
+ 	 * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
+ 	 * match on CONFIG_PREEMPT_RT = n. With PREEMPT_RT check the hard
+diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
+index 052f5b727be7d..d6b3964197d33 100644
+--- a/lib/math/prime_numbers.c
++++ b/lib/math/prime_numbers.c
+@@ -6,8 +6,6 @@
+ #include <linux/prime_numbers.h>
+ #include <linux/slab.h>
+ 
+-#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
+-
+ struct primes {
+ 	struct rcu_head rcu;
+ 	unsigned long last, sz;
+diff --git a/lib/test_overflow.c b/lib/test_overflow.c
+index 7a4b6f6c5473c..7a5a5738d2d21 100644
+--- a/lib/test_overflow.c
++++ b/lib/test_overflow.c
+@@ -588,12 +588,110 @@ static int __init test_overflow_allocation(void)
+ 	return err;
+ }
+ 
++struct __test_flex_array {
++	unsigned long flags;
++	size_t count;
++	unsigned long data[];
++};
++
++static int __init test_overflow_size_helpers(void)
++{
++	struct __test_flex_array *obj;
++	int count = 0;
++	int err = 0;
++	int var;
++
++#define check_one_size_helper(expected, func, args...)	({	\
++	bool __failure = false;					\
++	size_t _r;						\
++								\
++	_r = func(args);					\
++	if (_r != (expected)) {					\
++		pr_warn("expected " #func "(" #args ") "	\
++			"to return %zu but got %zu instead\n",	\
++			(size_t)(expected), _r);		\
++		__failure = true;				\
++	}							\
++	count++;						\
++	__failure;						\
++})
++
++	var = 4;
++	err |= check_one_size_helper(20,       size_mul, var++, 5);
++	err |= check_one_size_helper(20,       size_mul, 4, var++);
++	err |= check_one_size_helper(0,	       size_mul, 0, 3);
++	err |= check_one_size_helper(0,	       size_mul, 3, 0);
++	err |= check_one_size_helper(6,	       size_mul, 2, 3);
++	err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX,  1);
++	err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX,  3);
++	err |= check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3);
++
++	var = 4;
++	err |= check_one_size_helper(9,        size_add, var++, 5);
++	err |= check_one_size_helper(9,        size_add, 4, var++);
++	err |= check_one_size_helper(9,	       size_add, 9, 0);
++	err |= check_one_size_helper(9,	       size_add, 0, 9);
++	err |= check_one_size_helper(5,	       size_add, 2, 3);
++	err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX,  1);
++	err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX,  3);
++	err |= check_one_size_helper(SIZE_MAX, size_add, SIZE_MAX, -3);
++
++	var = 4;
++	err |= check_one_size_helper(1,        size_sub, var--, 3);
++	err |= check_one_size_helper(1,        size_sub, 4, var--);
++	err |= check_one_size_helper(1,        size_sub, 3, 2);
++	err |= check_one_size_helper(9,	       size_sub, 9, 0);
++	err |= check_one_size_helper(SIZE_MAX, size_sub, 9, -3);
++	err |= check_one_size_helper(SIZE_MAX, size_sub, 0, 9);
++	err |= check_one_size_helper(SIZE_MAX, size_sub, 2, 3);
++	err |= check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX,  0);
++	err |= check_one_size_helper(SIZE_MAX, size_sub, SIZE_MAX, 10);
++	err |= check_one_size_helper(SIZE_MAX, size_sub, 0,  SIZE_MAX);
++	err |= check_one_size_helper(SIZE_MAX, size_sub, 14, SIZE_MAX);
++	err |= check_one_size_helper(SIZE_MAX - 2, size_sub, SIZE_MAX - 1,  1);
++	err |= check_one_size_helper(SIZE_MAX - 4, size_sub, SIZE_MAX - 1,  3);
++	err |= check_one_size_helper(1,		size_sub, SIZE_MAX - 1, -3);
++
++	var = 4;
++	err |= check_one_size_helper(4 * sizeof(*obj->data),
++				     flex_array_size, obj, data, var++);
++	err |= check_one_size_helper(5 * sizeof(*obj->data),
++				     flex_array_size, obj, data, var++);
++	err |= check_one_size_helper(0, flex_array_size, obj, data, 0);
++	err |= check_one_size_helper(sizeof(*obj->data),
++				     flex_array_size, obj, data, 1);
++	err |= check_one_size_helper(7 * sizeof(*obj->data),
++				     flex_array_size, obj, data, 7);
++	err |= check_one_size_helper(SIZE_MAX,
++				     flex_array_size, obj, data, -1);
++	err |= check_one_size_helper(SIZE_MAX,
++				     flex_array_size, obj, data, SIZE_MAX - 4);
++
++	var = 4;
++	err |= check_one_size_helper(sizeof(*obj) + (4 * sizeof(*obj->data)),
++				     struct_size, obj, data, var++);
++	err |= check_one_size_helper(sizeof(*obj) + (5 * sizeof(*obj->data)),
++				     struct_size, obj, data, var++);
++	err |= check_one_size_helper(sizeof(*obj), struct_size, obj, data, 0);
++	err |= check_one_size_helper(sizeof(*obj) + sizeof(*obj->data),
++				     struct_size, obj, data, 1);
++	err |= check_one_size_helper(SIZE_MAX,
++				     struct_size, obj, data, -3);
++	err |= check_one_size_helper(SIZE_MAX,
++				     struct_size, obj, data, SIZE_MAX - 3);
++
++	pr_info("%d overflow size helper tests finished\n", count);
++
++	return err;
++}
++
+ static int __init test_module_init(void)
+ {
+ 	int err = 0;
+ 
+ 	err |= test_overflow_calculation();
+ 	err |= test_overflow_shift();
++	err |= test_overflow_size_helpers();
+ 	err |= test_overflow_allocation();
+ 
+ 	if (err) {
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 0cc7ee52d6bda..5ac119509335d 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4732,9 +4732,12 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
+ 	buf = endp + 1;
+ 
+ 	cfd = simple_strtoul(buf, &endp, 10);
+-	if ((*endp != ' ') && (*endp != '\0'))
++	if (*endp == '\0')
++		buf = endp;
++	else if (*endp == ' ')
++		buf = endp + 1;
++	else
+ 		return -EINVAL;
+-	buf = endp + 1;
+ 
+ 	event = kzalloc(sizeof(*event), GFP_KERNEL);
+ 	if (!event)
+diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
+index 43c284158f63e..089160361dded 100644
+--- a/net/bluetooth/bnep/core.c
++++ b/net/bluetooth/bnep/core.c
+@@ -385,7 +385,8 @@ static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
+ 
+ 	case BNEP_COMPRESSED_DST_ONLY:
+ 		__skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN);
+-		__skb_put_data(nskb, s->eh.h_source, ETH_ALEN + 2);
++		__skb_put_data(nskb, s->eh.h_source, ETH_ALEN);
++		put_unaligned(s->eh.h_proto, (__be16 *)__skb_put(nskb, 2));
+ 		break;
+ 
+ 	case BNEP_GENERAL:
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 71a7e42097cc0..f1e8bda97c106 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -4055,15 +4055,27 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
+ 	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
+ }
+ 
+-static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
++static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
+ {
+-	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+-		/* ACL tx timeout must be longer than maximum
+-		 * link supervision timeout (40.9 seconds) */
+-		if (!cnt && time_after(jiffies, hdev->acl_last_tx +
+-				       HCI_ACL_TX_TIMEOUT))
+-			hci_link_tx_to(hdev, ACL_LINK);
++	unsigned long last_tx;
++
++	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
++		return;
++
++	switch (type) {
++	case LE_LINK:
++		last_tx = hdev->le_last_tx;
++		break;
++	default:
++		last_tx = hdev->acl_last_tx;
++		break;
+ 	}
++
++	/* tx timeout must be longer than maximum link supervision timeout
++	 * (40.9 seconds)
++	 */
++	if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
++		hci_link_tx_to(hdev, type);
+ }
+ 
+ static void hci_sched_acl_pkt(struct hci_dev *hdev)
+@@ -4073,7 +4085,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)
+ 	struct sk_buff *skb;
+ 	int quote;
+ 
+-	__check_timeout(hdev, cnt);
++	__check_timeout(hdev, cnt, ACL_LINK);
+ 
+ 	while (hdev->acl_cnt &&
+ 	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+@@ -4112,8 +4124,6 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)
+ 	int quote;
+ 	u8 type;
+ 
+-	__check_timeout(hdev, cnt);
+-
+ 	BT_DBG("%s", hdev->name);
+ 
+ 	if (hdev->dev_type == HCI_AMP)
+@@ -4121,6 +4131,8 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)
+ 	else
+ 		type = ACL_LINK;
+ 
++	__check_timeout(hdev, cnt, type);
++
+ 	while (hdev->block_cnt > 0 &&
+ 	       (chan = hci_chan_sent(hdev, type, &quote))) {
+ 		u32 priority = (skb_peek(&chan->data_q))->priority;
+@@ -4233,24 +4245,19 @@ static void hci_sched_le(struct hci_dev *hdev)
+ {
+ 	struct hci_chan *chan;
+ 	struct sk_buff *skb;
+-	int quote, cnt, tmp;
++	int quote, *cnt, tmp;
+ 
+ 	BT_DBG("%s", hdev->name);
+ 
+ 	if (!hci_conn_num(hdev, LE_LINK))
+ 		return;
+ 
+-	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+-		/* LE tx timeout must be longer than maximum
+-		 * link supervision timeout (40.9 seconds) */
+-		if (!hdev->le_cnt && hdev->le_pkts &&
+-		    time_after(jiffies, hdev->le_last_tx + HZ * 45))
+-			hci_link_tx_to(hdev, LE_LINK);
+-	}
++	cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
+ 
+-	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
+-	tmp = cnt;
+-	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
++	__check_timeout(hdev, *cnt, LE_LINK);
++
++	tmp = *cnt;
++	while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
+ 		u32 priority = (skb_peek(&chan->data_q))->priority;
+ 		while (quote-- && (skb = skb_peek(&chan->data_q))) {
+ 			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+@@ -4265,18 +4272,13 @@ static void hci_sched_le(struct hci_dev *hdev)
+ 			hci_send_frame(hdev, skb);
+ 			hdev->le_last_tx = jiffies;
+ 
+-			cnt--;
++			(*cnt)--;
+ 			chan->sent++;
+ 			chan->conn->sent++;
+ 		}
+ 	}
+ 
+-	if (hdev->le_pkts)
+-		hdev->le_cnt = cnt;
+-	else
+-		hdev->acl_cnt = cnt;
+-
+-	if (cnt != tmp)
++	if (*cnt != tmp)
+ 		hci_prio_recalculate(hdev, LE_LINK);
+ }
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 0ae5d3cab4dc2..2706e238ca449 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -2908,6 +2908,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ 		 * will be kept and this function does nothing.
+ 		 */
+ 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
++		if (!p) {
++			err = -EIO;
++			goto unlock;
++		}
+ 
+ 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+ 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 9ae38c3e2bf0a..f0346cf4462e0 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -549,6 +549,9 @@ int __ethtool_get_link_ksettings(struct net_device *dev,
+ 	if (!dev->ethtool_ops->get_link_ksettings)
+ 		return -EOPNOTSUPP;
+ 
++	if (!netif_device_present(dev))
++		return -ENODEV;
++
+ 	memset(link_ksettings, 0, sizeof(*link_ksettings));
+ 	return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
+ }
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index ad45f13a0370b..bcad7028bbf45 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -212,7 +212,7 @@ static ssize_t speed_show(struct device *dev,
+ 	if (!rtnl_trylock())
+ 		return restart_syscall();
+ 
+-	if (netif_running(netdev) && netif_device_present(netdev)) {
++	if (netif_running(netdev)) {
+ 		struct ethtool_link_ksettings cmd;
+ 
+ 		if (!__ethtool_get_link_ksettings(netdev, &cmd))
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index c67d634dccd47..a34a562b0954d 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1882,6 +1882,7 @@ int ip6_send_skb(struct sk_buff *skb)
+ 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ 	int err;
+ 
++	rcu_read_lock();
+ 	err = ip6_local_out(net, skb->sk, skb);
+ 	if (err) {
+ 		if (err > 0)
+@@ -1891,6 +1892,7 @@ int ip6_send_skb(struct sk_buff *skb)
+ 				      IPSTATS_MIB_OUTDISCARDS);
+ 	}
+ 
++	rcu_read_unlock();
+ 	return err;
+ }
+ 
+diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
+index f0364649186b9..dc85973479284 100644
+--- a/net/iucv/iucv.c
++++ b/net/iucv/iucv.c
+@@ -1136,8 +1136,7 @@ static int iucv_message_receive_iprmdata(struct iucv_path *path,
+ 		size = (size < 8) ? size : 8;
+ 		for (array = buffer; size > 0; array++) {
+ 			copy = min_t(size_t, size, array->length);
+-			memcpy((u8 *)(addr_t) array->address,
+-				rmmsg, copy);
++			memcpy(phys_to_virt(array->address), rmmsg, copy);
+ 			rmmsg += copy;
+ 			size -= copy;
+ 		}
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 920b0ebf1cb80..50dceed77ba66 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -913,6 +913,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 		  !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
+ 	int err = -EPIPE;
+ 
++	mutex_lock(&kcm->tx_mutex);
+ 	lock_sock(sk);
+ 
+ 	/* Per tcp_sendmsg this should be in poll */
+@@ -1061,6 +1062,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 	KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
+ 
+ 	release_sock(sk);
++	mutex_unlock(&kcm->tx_mutex);
+ 	return copied;
+ 
+ out_error:
+@@ -1086,6 +1088,7 @@ static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 		sk->sk_write_space(sk);
+ 
+ 	release_sock(sk);
++	mutex_unlock(&kcm->tx_mutex);
+ 	return err;
+ }
+ 
+@@ -1328,6 +1331,7 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
+ 	spin_unlock_bh(&mux->lock);
+ 
+ 	INIT_WORK(&kcm->tx_work, kcm_tx_work);
++	mutex_init(&kcm->tx_mutex);
+ 
+ 	spin_lock_bh(&mux->rx_lock);
+ 	kcm_rcv_ready(kcm);
+diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
+index f6d4d0fa23a62..bf829fabf2785 100644
+--- a/net/netfilter/nft_counter.c
++++ b/net/netfilter/nft_counter.c
+@@ -105,11 +105,16 @@ static void nft_counter_reset(struct nft_counter_percpu_priv *priv,
+ 			      struct nft_counter *total)
+ {
+ 	struct nft_counter *this_cpu;
++	seqcount_t *myseq;
+ 
+ 	local_bh_disable();
+ 	this_cpu = this_cpu_ptr(priv->counter);
++	myseq = this_cpu_ptr(&nft_counter_seq);
++
++	write_seqcount_begin(myseq);
+ 	this_cpu->packets -= total->packets;
+ 	this_cpu->bytes -= total->bytes;
++	write_seqcount_end(myseq);
+ 	local_bh_enable();
+ }
+ 
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index 967d115f97efd..f570d64367a41 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -424,6 +424,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
+ 	struct sock *sk = rds_rs_to_sk(rs);
+ 	int ret = 0;
+ 	unsigned long flags;
++	struct rds_incoming *to_drop = NULL;
+ 
+ 	write_lock_irqsave(&rs->rs_recv_lock, flags);
+ 	if (!list_empty(&inc->i_item)) {
+@@ -434,11 +435,14 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
+ 					      -be32_to_cpu(inc->i_hdr.h_len),
+ 					      inc->i_hdr.h_dport);
+ 			list_del_init(&inc->i_item);
+-			rds_inc_put(inc);
++			to_drop = inc;
+ 		}
+ 	}
+ 	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
+ 
++	if (to_drop)
++		rds_inc_put(to_drop);
++
+ 	rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop);
+ 	return ret;
+ }
+@@ -761,16 +765,21 @@ void rds_clear_recv_queue(struct rds_sock *rs)
+ 	struct sock *sk = rds_rs_to_sk(rs);
+ 	struct rds_incoming *inc, *tmp;
+ 	unsigned long flags;
++	LIST_HEAD(to_drop);
+ 
+ 	write_lock_irqsave(&rs->rs_recv_lock, flags);
+ 	list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) {
+ 		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
+ 				      -be32_to_cpu(inc->i_hdr.h_len),
+ 				      inc->i_hdr.h_dport);
++		list_move(&inc->i_item, &to_drop);
++	}
++	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
++
++	list_for_each_entry_safe(inc, tmp, &to_drop, i_item) {
+ 		list_del_init(&inc->i_item);
+ 		rds_inc_put(inc);
+ 	}
+-	write_unlock_irqrestore(&rs->rs_recv_lock, flags);
+ }
+ 
+ /*
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 265a02b6ad099..591ca93e2a01d 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -437,12 +437,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 	struct netem_sched_data *q = qdisc_priv(sch);
+ 	/* We don't fill cb now as skb_unshare() may invalidate it */
+ 	struct netem_skb_cb *cb;
+-	struct sk_buff *skb2;
++	struct sk_buff *skb2 = NULL;
+ 	struct sk_buff *segs = NULL;
+ 	unsigned int prev_len = qdisc_pkt_len(skb);
+ 	int count = 1;
+-	int rc = NET_XMIT_SUCCESS;
+-	int rc_drop = NET_XMIT_DROP;
+ 
+ 	/* Do not fool qdisc_drop_all() */
+ 	skb->prev = NULL;
+@@ -471,19 +469,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		skb_orphan_partial(skb);
+ 
+ 	/*
+-	 * If we need to duplicate packet, then re-insert at top of the
+-	 * qdisc tree, since parent queuer expects that only one
+-	 * skb will be queued.
++	 * If we need to duplicate packet, then clone it before
++	 * original is modified.
+ 	 */
+-	if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
+-		struct Qdisc *rootq = qdisc_root_bh(sch);
+-		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
+-
+-		q->duplicate = 0;
+-		rootq->enqueue(skb2, rootq, to_free);
+-		q->duplicate = dupsave;
+-		rc_drop = NET_XMIT_SUCCESS;
+-	}
++	if (count > 1)
++		skb2 = skb_clone(skb, GFP_ATOMIC);
+ 
+ 	/*
+ 	 * Randomized packet corruption.
+@@ -495,7 +485,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		if (skb_is_gso(skb)) {
+ 			skb = netem_segment(skb, sch, to_free);
+ 			if (!skb)
+-				return rc_drop;
++				goto finish_segs;
++
+ 			segs = skb->next;
+ 			skb_mark_not_on_list(skb);
+ 			qdisc_skb_cb(skb)->pkt_len = skb->len;
+@@ -521,7 +512,24 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 		/* re-link segs, so that qdisc_drop_all() frees them all */
+ 		skb->next = segs;
+ 		qdisc_drop_all(skb, sch, to_free);
+-		return rc_drop;
++		if (skb2)
++			__qdisc_drop(skb2, to_free);
++		return NET_XMIT_DROP;
++	}
++
++	/*
++	 * If doing duplication then re-insert at top of the
++	 * qdisc tree, since parent queuer expects that only one
++	 * skb will be queued.
++	 */
++	if (skb2) {
++		struct Qdisc *rootq = qdisc_root_bh(sch);
++		u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
++
++		q->duplicate = 0;
++		rootq->enqueue(skb2, rootq, to_free);
++		q->duplicate = dupsave;
++		skb2 = NULL;
+ 	}
+ 
+ 	qdisc_qstats_backlog_inc(sch, skb);
+@@ -592,9 +600,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ 	}
+ 
+ finish_segs:
++	if (skb2)
++		__qdisc_drop(skb2, to_free);
++
+ 	if (segs) {
+ 		unsigned int len, last_len;
+-		int nb;
++		int rc, nb;
+ 
+ 		len = skb ? skb->len : 0;
+ 		nb = skb ? 1 : 0;
+diff --git a/security/selinux/avc.c b/security/selinux/avc.c
+index 4a744b1cebc8d..ebe3d08cccac2 100644
+--- a/security/selinux/avc.c
++++ b/security/selinux/avc.c
+@@ -329,12 +329,12 @@ static int avc_add_xperms_decision(struct avc_node *node,
+ {
+ 	struct avc_xperms_decision_node *dest_xpd;
+ 
+-	node->ae.xp_node->xp.len++;
+ 	dest_xpd = avc_xperms_decision_alloc(src->used);
+ 	if (!dest_xpd)
+ 		return -ENOMEM;
+ 	avc_copy_xperms_decision(&dest_xpd->xpd, src);
+ 	list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head);
++	node->ae.xp_node->xp.len++;
+ 	return 0;
+ }
+ 
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 246fabc0f667e..98ee2a898b392 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -527,7 +527,7 @@ static int snd_timer_start1(struct snd_timer_instance *timeri,
+ 	/* check the actual time for the start tick;
+ 	 * bail out as error if it's way too low (< 100us)
+ 	 */
+-	if (start) {
++	if (start && !(timer->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
+ 		if ((u64)snd_timer_hw_resolution(timer) * ticks < 100000) {
+ 			result = -EINVAL;
+ 			goto unlock;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 77034c31fa120..dddf3f55cb13b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -520,7 +520,6 @@ static void alc_shutup_pins(struct hda_codec *codec)
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+-	case 0x10ec0257:
+ 	case 0x19e58326:
+ 	case 0x10ec0283:
+ 	case 0x10ec0285:
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 96d32766e93c6..a5b0a1a0fc576 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -390,6 +390,7 @@ YAMAHA_DEVICE(0x105a, NULL),
+ YAMAHA_DEVICE(0x105b, NULL),
+ YAMAHA_DEVICE(0x105c, NULL),
+ YAMAHA_DEVICE(0x105d, NULL),
++YAMAHA_DEVICE(0x1718, "P-125"),
+ {
+ 	USB_DEVICE(0x0499, 0x1503),
+ 	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+diff --git a/tools/include/linux/align.h b/tools/include/linux/align.h
+new file mode 100644
+index 0000000000000..14e34ace80dda
+--- /dev/null
++++ b/tools/include/linux/align.h
+@@ -0,0 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#ifndef _TOOLS_LINUX_ALIGN_H
++#define _TOOLS_LINUX_ALIGN_H
++
++#include <uapi/linux/const.h>
++
++#define ALIGN(x, a)		__ALIGN_KERNEL((x), (a))
++#define ALIGN_DOWN(x, a)	__ALIGN_KERNEL((x) - ((a) - 1), (a))
++#define IS_ALIGNED(x, a)	(((x) & ((typeof(x))(a) - 1)) == 0)
++
++#endif /* _TOOLS_LINUX_ALIGN_H */
+diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
+index 05dca5c203f39..61cf3b9a23a6f 100644
+--- a/tools/include/linux/bitmap.h
++++ b/tools/include/linux/bitmap.h
+@@ -3,6 +3,7 @@
+ #define _PERF_BITOPS_H
+ 
+ #include <string.h>
++#include <linux/align.h>
+ #include <linux/bitops.h>
+ #include <stdlib.h>
+ #include <linux/kernel.h>
+@@ -28,13 +29,14 @@ void bitmap_clear(unsigned long *map, unsigned int start, int len);
+ #define small_const_nbits(nbits) \
+ 	(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
+ 
++#define bitmap_size(nbits)	(ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
++
+ static inline void bitmap_zero(unsigned long *dst, int nbits)
+ {
+ 	if (small_const_nbits(nbits))
+ 		*dst = 0UL;
+ 	else {
+-		int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+-		memset(dst, 0, len);
++		memset(dst, 0, bitmap_size(nbits));
+ 	}
+ }
+ 
+@@ -120,7 +122,7 @@ static inline int test_and_clear_bit(int nr, unsigned long *addr)
+  */
+ static inline unsigned long *bitmap_alloc(int nbits)
+ {
+-	return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
++	return calloc(1, bitmap_size(nbits));
+ }
+ 
+ /*
+diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
+index e566c70e64a19..81f76a5d12ac6 100755
+--- a/tools/testing/selftests/tc-testing/tdc.py
++++ b/tools/testing/selftests/tc-testing/tdc.py
+@@ -129,7 +129,6 @@ class PluginMgr:
+             except Exception as ee:
+                 print('exception {} in call to pre_case for {} plugin'.
+                       format(ee, pgn_inst.__class__))
+-                print('test_ordinal is {}'.format(test_ordinal))
+                 print('testid is {}'.format(caseinfo['id']))
+                 raise
+ 


             reply	other threads:[~2025-08-21  5:20 UTC|newest]

Thread overview: 348+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-08-21  5:20 Arisu Tachibana [this message]
  -- strict thread matches above, loose matches on Subject: below --
2025-10-30  6:42 [gentoo-commits] proj/linux-patches:5.4 commit in: / Arisu Tachibana
2025-10-02 13:27 Arisu Tachibana
2025-09-10  5:33 Arisu Tachibana
2025-09-04 14:32 Arisu Tachibana
2025-09-04 14:32 Arisu Tachibana
2025-08-21  7:00 Arisu Tachibana
2025-08-21  6:59 Arisu Tachibana
2025-08-21  6:58 Arisu Tachibana
2025-08-21  6:58 Arisu Tachibana
2025-08-21  6:57 Arisu Tachibana
2025-08-21  6:56 Arisu Tachibana
2025-08-21  6:56 Arisu Tachibana
2025-08-21  6:55 Arisu Tachibana
2025-08-21  6:54 Arisu Tachibana
2025-08-21  5:23 Arisu Tachibana
2025-08-21  5:23 Arisu Tachibana
2025-08-21  5:23 Arisu Tachibana
2025-08-21  5:21 Arisu Tachibana
2025-08-21  5:19 Arisu Tachibana
2025-08-21  5:19 Arisu Tachibana
2025-08-21  5:18 Arisu Tachibana
2025-08-21  5:18 Arisu Tachibana
2025-08-21  5:17 Arisu Tachibana
2025-08-21  5:16 Arisu Tachibana
2025-08-21  1:17 Arisu Tachibana
2025-08-21  1:16 Arisu Tachibana
2025-08-21  1:13 Arisu Tachibana
2025-08-21  1:12 Arisu Tachibana
2025-08-16  3:12 Arisu Tachibana
2025-08-01 10:32 Arisu Tachibana
2025-07-24  9:19 Arisu Tachibana
2025-07-18 12:07 Arisu Tachibana
2025-07-14 16:22 Arisu Tachibana
2025-07-11  2:32 Arisu Tachibana
2025-07-11  2:29 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 18:27 Arisu Tachibana
2025-07-06 13:36 Arisu Tachibana
2025-07-06 13:36 Arisu Tachibana
2025-07-06 13:36 Arisu Tachibana
2024-04-18  3:06 Alice Ferrazzi
2023-10-05 14:24 Mike Pagano
2023-09-23 10:18 Mike Pagano
2023-09-02  9:58 Mike Pagano
2023-08-30 14:56 Mike Pagano
2023-08-16 17:00 Mike Pagano
2023-08-11 11:57 Mike Pagano
2023-08-08 18:42 Mike Pagano
2023-07-27 11:51 Mike Pagano
2023-07-24 20:29 Mike Pagano
2023-06-28 10:28 Mike Pagano
2023-06-21 14:55 Alice Ferrazzi
2023-06-14 10:20 Mike Pagano
2023-06-09 11:32 Mike Pagano
2023-06-05 11:50 Mike Pagano
2023-05-30 12:56 Mike Pagano
2023-05-17 11:21 Mike Pagano
2023-05-17 11:00 Mike Pagano
2023-05-10 17:58 Mike Pagano
2023-04-26  9:51 Alice Ferrazzi
2023-04-20 11:17 Alice Ferrazzi
2023-04-05 10:01 Alice Ferrazzi
2023-03-30 13:41 Alice Ferrazzi
2023-03-22 14:16 Alice Ferrazzi
2023-03-17 10:46 Mike Pagano
2023-03-13 11:34 Alice Ferrazzi
2023-03-11 16:20 Mike Pagano
2023-03-03 12:31 Mike Pagano
2023-02-25 11:42 Mike Pagano
2023-02-24  3:08 Alice Ferrazzi
2023-02-22 14:41 Alice Ferrazzi
2023-02-06 12:48 Mike Pagano
2023-02-02 19:15 Mike Pagano
2023-01-24  7:25 Alice Ferrazzi
2023-01-18 11:10 Mike Pagano
2022-12-19 12:27 Alice Ferrazzi
2022-12-14 12:14 Mike Pagano
2022-12-08 12:13 Alice Ferrazzi
2022-11-25 17:05 Mike Pagano
2022-11-10 17:59 Mike Pagano
2022-11-03 15:13 Mike Pagano
2022-11-01 19:47 Mike Pagano
2022-10-29  9:52 Mike Pagano
2022-10-26 11:44 Mike Pagano
2022-10-17 16:48 Mike Pagano
2022-10-15 10:06 Mike Pagano
2022-10-07 11:12 Mike Pagano
2022-10-05 11:58 Mike Pagano
2022-09-28  9:26 Mike Pagano
2022-09-20 12:02 Mike Pagano
2022-09-15 10:31 Mike Pagano
2022-09-05 12:04 Mike Pagano
2022-08-25 10:34 Mike Pagano
2022-08-11 12:35 Mike Pagano
2022-08-03 14:51 Alice Ferrazzi
2022-07-29 15:29 Mike Pagano
2022-07-21 20:09 Mike Pagano
2022-07-15 10:04 Mike Pagano
2022-07-12 16:01 Mike Pagano
2022-07-07 16:18 Mike Pagano
2022-07-02 16:08 Mike Pagano
2022-06-29 11:09 Mike Pagano
2022-06-27 19:03 Mike Pagano
2022-06-25 19:46 Mike Pagano
2022-06-22 13:50 Mike Pagano
2022-06-22 13:25 Mike Pagano
2022-06-22 12:47 Mike Pagano
2022-06-16 11:43 Mike Pagano
2022-06-14 17:12 Mike Pagano
2022-06-06 11:04 Mike Pagano
2022-05-25 11:55 Mike Pagano
2022-05-18  9:49 Mike Pagano
2022-05-15 22:11 Mike Pagano
2022-05-12 11:30 Mike Pagano
2022-05-09 10:55 Mike Pagano
2022-04-27 12:21 Mike Pagano
2022-04-20 12:08 Mike Pagano
2022-04-15 13:10 Mike Pagano
2022-04-12 19:21 Mike Pagano
2022-03-28 10:58 Mike Pagano
2022-03-23 11:57 Mike Pagano
2022-03-19 13:21 Mike Pagano
2022-03-16 13:31 Mike Pagano
2022-03-11 10:55 Mike Pagano
2022-03-08 18:31 Mike Pagano
2022-03-02 13:07 Mike Pagano
2022-02-23 12:38 Mike Pagano
2022-02-16 12:46 Mike Pagano
2022-02-11 12:36 Mike Pagano
2022-02-08 17:55 Mike Pagano
2022-02-05 12:14 Mike Pagano
2022-02-01 17:24 Mike Pagano
2022-01-31 13:01 Mike Pagano
2022-01-29 17:44 Mike Pagano
2022-01-27 11:38 Mike Pagano
2022-01-20 10:00 Mike Pagano
2022-01-16 10:22 Mike Pagano
2022-01-11 14:34 Mike Pagano
2022-01-05 12:54 Mike Pagano
2021-12-29 13:07 Mike Pagano
2021-12-22 14:06 Mike Pagano
2021-12-17 11:55 Mike Pagano
2021-12-16 16:51 Mike Pagano
2021-12-14 14:19 Mike Pagano
2021-12-08 12:54 Mike Pagano
2021-12-01 12:50 Mike Pagano
2021-11-26 11:58 Mike Pagano
2021-11-21 20:44 Mike Pagano
2021-11-17 12:00 Mike Pagano
2021-11-12 14:14 Mike Pagano
2021-11-06 13:26 Mike Pagano
2021-11-04 11:23 Mike Pagano
2021-11-02 19:31 Mike Pagano
2021-10-27 15:51 Mike Pagano
2021-10-27 11:58 Mike Pagano
2021-10-20 13:24 Mike Pagano
2021-10-17 13:12 Mike Pagano
2021-10-13 14:55 Alice Ferrazzi
2021-10-09 21:32 Mike Pagano
2021-10-06 14:06 Mike Pagano
2021-09-30 10:49 Mike Pagano
2021-09-26 14:13 Mike Pagano
2021-09-22 11:39 Mike Pagano
2021-09-20 22:03 Mike Pagano
2021-09-16 11:19 Mike Pagano
2021-09-15 12:00 Mike Pagano
2021-09-12 14:38 Mike Pagano
2021-09-03 11:21 Mike Pagano
2021-09-03  9:39 Alice Ferrazzi
2021-08-26 14:36 Mike Pagano
2021-08-18 12:46 Mike Pagano
2021-08-15 20:06 Mike Pagano
2021-08-12 11:52 Mike Pagano
2021-08-08 13:38 Mike Pagano
2021-08-04 11:53 Mike Pagano
2021-08-03 12:23 Mike Pagano
2021-07-31 10:32 Alice Ferrazzi
2021-07-28 12:36 Mike Pagano
2021-07-25 17:27 Mike Pagano
2021-07-20 15:39 Alice Ferrazzi
2021-07-19 11:18 Mike Pagano
2021-07-14 16:22 Mike Pagano
2021-07-13 12:37 Mike Pagano
2021-07-11 14:44 Mike Pagano
2021-07-07 13:13 Mike Pagano
2021-06-30 14:24 Mike Pagano
2021-06-23 15:11 Mike Pagano
2021-06-18 11:38 Mike Pagano
2021-06-16 12:23 Mike Pagano
2021-06-10 11:59 Mike Pagano
2021-06-07 11:23 Mike Pagano
2021-06-03 10:28 Alice Ferrazzi
2021-05-28 12:03 Alice Ferrazzi
2021-05-26 12:06 Mike Pagano
2021-05-22 10:04 Mike Pagano
2021-05-19 12:23 Mike Pagano
2021-05-14 14:10 Alice Ferrazzi
2021-05-11 14:20 Mike Pagano
2021-05-07 11:44 Alice Ferrazzi
2021-05-07 11:37 Mike Pagano
2021-05-02 16:02 Mike Pagano
2021-05-02 16:00 Mike Pagano
2021-04-30 19:01 Mike Pagano
2021-04-28 11:52 Alice Ferrazzi
2021-04-21 11:42 Mike Pagano
2021-04-16 11:14 Alice Ferrazzi
2021-04-14 11:20 Alice Ferrazzi
2021-04-10 13:25 Mike Pagano
2021-04-07 13:27 Mike Pagano
2021-03-30 13:12 Alice Ferrazzi
2021-03-24 12:09 Mike Pagano
2021-03-22 15:55 Mike Pagano
2021-03-20 14:32 Mike Pagano
2021-03-17 18:43 Mike Pagano
2021-03-16 16:04 Mike Pagano
2021-03-11 14:08 Mike Pagano
2021-03-09 12:18 Mike Pagano
2021-03-07 15:16 Mike Pagano
2021-03-04 14:51 Mike Pagano
2021-03-04 12:06 Alice Ferrazzi
2021-03-01 23:49 Mike Pagano
2021-03-01 23:44 Mike Pagano
2021-02-27 14:16 Mike Pagano
2021-02-26 10:01 Alice Ferrazzi
2021-02-23 17:01 Mike Pagano
2021-02-23 14:28 Alice Ferrazzi
2021-02-17 11:39 Alice Ferrazzi
2021-02-13 14:46 Alice Ferrazzi
2021-02-10  9:53 Alice Ferrazzi
2021-02-07 15:24 Alice Ferrazzi
2021-02-03 23:48 Mike Pagano
2021-01-30 13:37 Alice Ferrazzi
2021-01-27 11:13 Mike Pagano
2021-01-23 17:50 Mike Pagano
2021-01-23 16:37 Mike Pagano
2021-01-19 20:32 Mike Pagano
2021-01-17 16:19 Mike Pagano
2021-01-12 20:05 Mike Pagano
2021-01-09 17:51 Mike Pagano
2021-01-08 16:08 Mike Pagano
2021-01-06 14:14 Mike Pagano
2020-12-30 12:53 Mike Pagano
2020-12-21 13:27 Mike Pagano
2020-12-16 23:14 Mike Pagano
2020-12-11 12:56 Mike Pagano
2020-12-08 12:07 Mike Pagano
2020-12-02 12:50 Mike Pagano
2020-11-26 14:27 Mike Pagano
2020-11-24 14:44 Mike Pagano
2020-11-22 19:31 Mike Pagano
2020-11-18 20:19 Mike Pagano
2020-11-18 20:10 Mike Pagano
2020-11-18 20:03 Mike Pagano
2020-11-13 12:16 Mike Pagano
2020-11-11 15:48 Mike Pagano
2020-11-10 13:57 Mike Pagano
2020-11-05 12:36 Mike Pagano
2020-11-01 20:31 Mike Pagano
2020-10-29 11:19 Mike Pagano
2020-10-17 10:18 Mike Pagano
2020-10-14 20:37 Mike Pagano
2020-10-07 12:48 Mike Pagano
2020-10-01 12:49 Mike Pagano
2020-09-26 21:59 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-24 15:38 Mike Pagano
2020-09-23 12:10 Mike Pagano
2020-09-17 14:56 Mike Pagano
2020-09-12 18:08 Mike Pagano
2020-09-09 18:00 Mike Pagano
2020-09-08 22:26 Mike Pagano
2020-09-05 10:47 Mike Pagano
2020-09-03 11:38 Mike Pagano
2020-08-26 11:16 Mike Pagano
2020-08-21 13:25 Alice Ferrazzi
2020-08-19  9:28 Alice Ferrazzi
2020-08-12 23:30 Alice Ferrazzi
2020-08-07 12:16 Alice Ferrazzi
2020-08-05 14:45 Thomas Deutschmann
2020-08-01 19:45 Mike Pagano
2020-07-31 18:28 Mike Pagano
2020-07-31 18:04 Mike Pagano
2020-07-30 14:58 Mike Pagano
2020-07-29 12:40 Mike Pagano
2020-07-22 12:53 Mike Pagano
2020-07-16 11:19 Mike Pagano
2020-07-09 12:13 Mike Pagano
2020-07-01 12:23 Mike Pagano
2020-06-29 17:40 Mike Pagano
2020-06-24 16:49 Mike Pagano
2020-06-22 14:48 Mike Pagano
2020-06-17 16:40 Mike Pagano
2020-06-10 19:42 Mike Pagano
2020-06-07 21:53 Mike Pagano
2020-06-03 11:43 Mike Pagano
2020-06-02 11:37 Mike Pagano
2020-05-27 16:31 Mike Pagano
2020-05-20 11:37 Mike Pagano
2020-05-20 11:33 Mike Pagano
2020-05-14 11:32 Mike Pagano
2020-05-13 12:18 Mike Pagano
2020-05-11 22:49 Mike Pagano
2020-05-09 22:12 Mike Pagano
2020-05-06 11:47 Mike Pagano
2020-05-02 19:24 Mike Pagano
2020-05-02 13:25 Mike Pagano
2020-04-29 17:56 Mike Pagano
2020-04-23 11:55 Mike Pagano
2020-04-21 11:19 Mike Pagano
2020-04-17 11:46 Mike Pagano
2020-04-15 15:52 Mike Pagano
2020-04-13 11:18 Mike Pagano
2020-04-08 12:42 Mike Pagano
2020-04-02 15:26 Mike Pagano
2020-04-01 12:03 Mike Pagano
2020-03-25 15:01 Mike Pagano
2020-03-21 18:58 Mike Pagano
2020-03-18 14:23 Mike Pagano
2020-03-12 14:04 Mike Pagano
2020-03-05 16:26 Mike Pagano
2020-02-28 16:41 Mike Pagano
2020-02-24 11:09 Mike Pagano
2020-02-19 23:48 Mike Pagano
2020-02-14 23:55 Mike Pagano
2020-02-11 15:35 Mike Pagano
2020-02-06 11:07 Mike Pagano
2020-02-01 10:53 Mike Pagano
2020-02-01 10:31 Mike Pagano
2020-01-29 16:18 Mike Pagano
2020-01-26 12:27 Mike Pagano
2020-01-23 11:09 Mike Pagano
2020-01-17 19:57 Mike Pagano
2020-01-14 22:33 Mike Pagano
2020-01-12 15:01 Mike Pagano
2020-01-09 11:17 Mike Pagano
2020-01-04 19:59 Mike Pagano
2019-12-31 17:48 Mike Pagano
2019-12-30 23:03 Mike Pagano
2019-12-21 15:01 Mike Pagano
2019-12-18 19:30 Mike Pagano
2019-12-17 21:57 Mike Pagano
2019-12-13 12:39 Mike Pagano
2019-12-05  1:04 Thomas Deutschmann
2019-11-29 21:21 Thomas Deutschmann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1755753625.77fdf4c4bfec11fd0f67b8cac4d8e404333d6c29.alicef@gentoo \
    --to=alicef@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox